thank you for your work sharing,
I'm trying to adapt your repository to our dataset.
`score_heat_list = []
import glob
def make_dir(name):
if not os.path.isdir(name):
os.makedirs(name)
print(name, "폴더가 생성되었습니다.")
else:
print("해당 폴더가 이미 존재합니다.")
make_dir('save_imageheatmap_model_figure_folder')
def json_extract_feature(json_data):
patient=json_data['case_id']
#read_all_data:
"""
components
'user id' = no
'case_id' = split.('_')[1] = patients number
'contour_list' = dict('image_type',dict())
"""
temp_image_type = []
temp_image_type1 = []
temp_image_type2 = []
temp_image_type3 = []
temp_key = []
temp_contour = []
temp_contour1 = []
temp_contour2 = []
temp_contour3 = []
for image_type in json_data['contour_list']['cancer']:
# print(image_type)
if image_type == 'lcc':
temp_image_type.append(image_type)
if image_type == 'lmlo':
temp_image_type1.append(image_type)
if image_type == 'rcc':
temp_image_type2.append(image_type)
if image_type == 'rmlo':
temp_image_type3.append(image_type)
for key in json_data['contour_list']['cancer'][image_type]:
# print(key)
for contour in json_data['contour_list']['cancer'][image_type][key]:
# print(contour)
# print(contour.get('x'))
# print(contour.get('y'))
bin_list = [contour.get('y'),contour.get('x')]
if image_type == 'lcc':
temp_contour.append(bin_list)
if image_type == 'lmlo':
temp_contour1.append(bin_list)
if image_type == 'rcc':
temp_contour2.append(bin_list)
elif image_type == 'rmlo':
temp_contour3.append(bin_list)
return temp_image_type,temp_image_type1,temp_image_type2,temp_image_type3,temp_contour,temp_contour1,temp_contour2,temp_contour3
from skimage import draw
def polygon2mask(image_shape, polygon):
"""Compute a mask from polygon.
Parameters
----------
image_shape : tuple of size 2.
The shape of the mask.
polygon : array_like.
The polygon coordinates of shape (N, 2) where N is
the number of points.
Returns
-------
mask : 2-D ndarray of type 'bool'.
The mask that corresponds to the input polygon.
Notes
-----
This function does not do any border checking, so that all
the vertices need to be within the given shape.
Examples
--------
>>> image_shape = (128, 128)
>>> polygon = np.array([[60, 100], [100, 40], [40, 40]])
>>> mask = polygon2mask(image_shape, polygon)
>>> mask.shape
(128, 128)
"""
polygon = np.asarray(polygon)
vertex_row_coords, vertex_col_coords = polygon.T
fill_row_coords, fill_col_coords = draw.polygon(
vertex_row_coords, vertex_col_coords, image_shape)
mask = np.zeros(image_shape, dtype=np.bool)
mask[fill_row_coords, fill_col_coords] = True
return mask
##############################################################################################################################
from tqdm import tqdm
from src.heatmaps.run_producer_single import produce_heatmaps
import json
from PIL import Image
annotation_folder = r'/home/ncc/Desktop/2020_deep_learning_breastcancer/annotation_SN/'
import pickle
for png in tqdm(png_list[0:8]):
print(PATH+png)
crop_single_mammogram(PATH+png, horizontal_flip = 'NO', view = png.split('_')[1].split('.')[0],
cropped_mammogram_path = PATH+'cropped_image/'+png, metadata_path = PATH+png.split('.')[0]+'.pkl',num_iterations = 100, buffer_size = 50)
print(PATH+'cropped_image/'+png)
get_optimal_center_single(PATH+'cropped_image/'+png,PATH+png.split('.')[0]+'.pkl')
model_input = load_inputs(
image_path=PATH+'cropped_image/'+png,
metadata_path=PATH+png.split('.')[0]+'.pkl',
use_heatmaps=False,
)
####################################################################################################################################
parameters = dict(
device_type='gpu',
gpu_number='0',
patch_size=256,
stride_fixed=20,
more_patches=5,
minibatch_size=10,
seed=np.random.RandomState(shared_parameters["seed"]),
initial_parameters="/home/ncc/Desktop/breastcancer/nccpatient/breast_cancer_classifier/models/sample_patch_model.p",
input_channels=3,
number_of_classes=4,
cropped_mammogram_path=PATH+'cropped_image/'+png,
metadata_path=PATH+png.split('.')[0]+'.pkl',
heatmap_path_malignant=PATH+png.split('.')[0]+'_malignant_heatmap.hdf5',
heatmap_path_benign=PATH+png.split('.')[0]+'_benign_heatmap.hdf5',
heatmap_type=[0, 1], # 0: malignant 1: benign 0: nothing
use_hdf5="store_true"
)
###########################################################################################################################
read annotation SN00000016_L-CC.png
#코드를 읽어보면 이름이 같은 JSON 파일을 4번 읽어오고 있음.. 코드 경량화때 해결 필요
#annotation 기준은 CROP된 이미지가 아니라, 원본 이미지임, 그런데 이미지로 보여주는건 CROP된 이미지로 보여주고 있음..
# print(png.split('_')[0])
with open(PATH+png.split('.')[0]+'.pkl','rb') as f:
location_data = pickle.load(f)
print(location_data)
start_point1 = list(location_data['window_location'])[0]
endpoint1 = list(location_data['window_location'])[1]
start_point2 = list(location_data['window_location'])[2]
endpoint2 = list(location_data['window_location'])[3]
print(start_point1,start_point2)
with open(annotation_folder+'Cancer_'+png.split('_')[0]+'.json') as json_file:
json_data = json.load(json_file)
temp_image_type,temp_image_type1,temp_image_type2,temp_image_type3,temp_contour,temp_contour1,temp_contour2,temp_contour3 = json_extract_feature(json_data)
import operator
if png.split('_')[1].split('.')[0] =='L-CC':
new_contour_list = temp_contour
if png.split('_')[1].split('.')[0] =='L-MLO':
new_contour_list = temp_contour1
if png.split('_')[1].split('.')[0] =='R-CC':
new_contour_list = temp_contour2
if png.split('_')[1].split('.')[0] =='R-MLO':
new_contour_list = temp_contour3
im = Image.open(PATH+png)
im_cropped = Image.open(PATH+'cropped_image/'+png)
print('원본 이미지:',im.size,'cropped image:',im_cropped.size)
new_contour = []
for image_list in new_contour_list:
# print('_',image_list)
new_temp_contour =map(operator.add,image_list,reversed(list(np.array(im.size)/2)))
new_contour.append(list(new_temp_contour))
# print(new_contour)
try:
# 'window_location': (103, 2294, 0, 1041)
img = polygon2mask(im.size[::-1],np.array(list(new_contour)))
img_cropped = img[start_point1:endpoint1,start_point2:endpoint2]
im = cv2.imread(PATH+png)
im_cropped = cv2.imread(PATH+'cropped_image/'+png)
except ValueError as e:
img = np.zeros(im.size)
###########################################################################################################################
random_number_generator = np.random.RandomState(shared_parameters["seed"])
# random_number_generator = np.random.RandomState(shared_parameters["seed"])
produce_heatmaps(parameters)
image_heatmaps_parameters = shared_parameters.copy()
image_heatmaps_parameters["view"] = png.split('_')[1].split('.')[0]
image_heatmaps_parameters["use_heatmaps"] = True
image_heatmaps_parameters["model_path"] = "/home/ncc/Desktop/breastcancer/nccpatient/breast_cancer_classifier/models/ImageHeatmaps__ModeImage_weights.p"
model, device = load_model(image_heatmaps_parameters)
model_input = load_inputs(
image_path=PATH+'cropped_image/'+png,
metadata_path=PATH+png.split('.')[0]+'.pkl',
use_heatmaps=True,
benign_heatmap_path=PATH+png.split('.')[0]+'_malignant_heatmap.hdf5',
malignant_heatmap_path=PATH+png.split('.')[0]+'_benign_heatmap.hdf5')
batch = [
process_augment_inputs(
model_input=model_input,
random_number_generator=random_number_generator,
parameters=image_heatmaps_parameters,
),
]
tensor_batch = batch_to_tensor(batch, device)
y_hat = model(tensor_batch)
###############################################################
fig, axes = plt.subplots(1, 5, figsize=(16, 4))
x = tensor_batch[0].cpu().numpy()
axes[0].imshow(im, cmap="gray")
axes[0].imshow(img, cmap = 'autumn', alpha = 0.4)
axes[0].set_title("OG_Image")
axes[1].imshow(im_cropped, cmap="gray")
axes[1].imshow(img_cropped, cmap = 'autumn', alpha = 0.4)
axes[1].set_title("Image")
axes[2].imshow(x[0], cmap="gray")
axes[2].imshow(img_cropped, cmap = 'autumn', alpha = 0.4)
axes[2].set_title("Image")
axes[3].imshow(x[1], cmap=LinearSegmentedColormap.from_list("benign", [(0, 0, 0), (0, 1, 0)]))
axes[3].set_title("Benign Heatmap")
axes[4].imshow(x[2], cmap=LinearSegmentedColormap.from_list("malignant", [(0, 0, 0), (1, 0, 0)]))
axes[4].set_title("Malignant Heatmap")
plt.savefig('save_imageheatmap_model_figure_folder'+'/'+png.split('.')[0]+'.png')
################################################################
predictions = np.exp(y_hat.cpu().detach().numpy())[:, :2, 1]
predictions_dict = {
"image" : png,
"benign": float(predictions[0][0]),
"malignant": float(predictions[0][1]),
}
print(predictions_dict)
score_heat_list.append(predictions_dict)`
Attached file is cropped mammography which is made by this code.
Issue is some mammogram doesn't crop well. Am I doing something wrong?