Exemple #1
0
def save_output_images(images, logits, image_names, contour_type):
    save_dir = 'D:\cardiac_data\Sunnybrook\Sunnybrook_online_submission'
    overlay_full_path = os.path.join(save_dir, 'Overlay')
    img_shape = images.shape
    for idx in range(img_shape[0]):
        image = images[idx, ...]
        image_name = image_names[idx]
        logit = logits[idx, ..., 1]
        logit = logit[..., np.newaxis]
        logit = reshape(logit,
                        to_shape=(img_shape[1], img_shape[2], img_shape[3]))
        logit = np.where(logit > 0.5, 255, 0).astype('uint8')
        tmp2, coords, hierarchy = cv2.findContours(logit.copy(), cv2.RETR_LIST,
                                                   cv2.CHAIN_APPROX_NONE)
        if not coords:
            print('\nNo detection in image: {:s}'.format(image_name))
            coords = np.ones((1, 1, 1, 2), dtype='int')
        if len(coords) > 1:
            print('\nMultiple detections in image: {:s}'.format(image_name))
            # cv2.imwrite(data_path + '\\multiple_dets\\'+contour_type+'{:04d}.png'.format(idx), tmp)
            lengths = []
            for coord in coords:
                lengths.append(len(coord))
            coords = [coords[np.argmax(lengths)]]

        coords = np.squeeze(coords)
        draw_contour(image, image_name, overlay_full_path, contour_type,
                     coords)
Exemple #2
0
def create_submission(dcm_list, data_path):
    crop_size = 200
    input_shape = (crop_size, crop_size, 1)
    num_classes = 2

    oweights = 'weights/lvsc_o.h5'
    iweights = 'weights/lvsc_i.h5'
    omodel = fcn_model(input_shape, num_classes, weights=oweights)
    imodel = fcn_model(input_shape, num_classes, weights=iweights)

    images = np.zeros((len(dcm_list), crop_size, crop_size, 1))
    for idx, dcm_path in enumerate(dcm_list):
        img = read_dicom(dcm_path)
        img = center_crop(img, crop_size=crop_size)
        images[idx] = img
    opred_masks = omodel.predict(images, batch_size=32, verbose=1)
    ipred_masks = imodel.predict(images, batch_size=32, verbose=1)

    save_dir = data_path + '_auto_contours'
    prefix = 'MYFCN_'  # change prefix to your unique initials
    for idx, dcm_path in enumerate(dcm_list):
        img = read_dicom(dcm_path)
        h, w, d = img.shape
        otmp = reshape(opred_masks[idx], to_shape=(h, w, d))
        otmp = np.where(otmp > 0.5, 255, 0).astype('uint8')
        itmp = reshape(ipred_masks[idx], to_shape=(h, w, d))
        itmp = np.where(itmp > 0.5, 255, 0).astype('uint8')
        assert img.shape == otmp.shape, 'Prediction does not match shape'
        assert img.shape == itmp.shape, 'Prediction does not match shape'
        tmp = otmp - itmp
        tmp = np.squeeze(tmp, axis=(2, ))
        sub_dir = dcm_path[dcm_path.find('CAP_'):dcm_path.rfind('DET')]
        filename = prefix + dcm_path[dcm_path.rfind('DET'):].replace(
            '.dcm', '.png')
        full_path = os.path.join(save_dir, sub_dir)
        if not os.path.exists(full_path):
            os.makedirs(full_path)
        cv2.imwrite(os.path.join(full_path, filename), tmp)
        in_ = cv2.imread(os.path.join(full_path, filename),
                         cv2.IMREAD_GRAYSCALE)
        if not np.allclose(in_, tmp):
            raise AssertionError('File read error: {:s}'.format(
                os.path.join(full_path, filename)))
Exemple #3
0
def create_submission(contours, data_path):
    if contour_type == 'i':
        weights = 'weights/rvsc_i.h5'
    elif contour_type == 'o':
        weights = 'weights/rvsc_o.h5'
    else:
        sys.exit('\ncontour type "%s" not recognized\n' % contour_type)

    crop_size = 200
    images = np.zeros((len(contours), crop_size, crop_size, 1))
    for idx, contour in enumerate(contours):
        img, _ = read_contour(contour, data_path, return_mask=False)
        img = center_crop(img, crop_size=crop_size)
        images[idx] = img

    input_shape = (crop_size, crop_size, 1)
    num_classes = 2
    model = fcn_model(input_shape, num_classes, weights=weights)
    pred_masks = model.predict(images, batch_size=32, verbose=1)
    
    save_dir = data_path + '_auto_contours'
    num = 0
    for idx, ctr in enumerate(contours):
        img, _ = read_contour(ctr, data_path, return_mask=False)
        h, w, d = img.shape
        tmp = reshape(pred_masks[idx], to_shape=(h, w, d))
        assert img.shape == tmp.shape, 'Shape of prediction does not match'
        tmp = np.where(tmp > 0.5, 255, 0).astype('uint8')
        tmp2, coords, hierarchy = cv2.findContours(tmp.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
        if not coords:
            print('No detection: %s' % ctr.ctr_path)
            coords = np.ones((1, 1, 1, 2), dtype='int')
        if len(coords) > 1:
            print('Multiple detections: %s' % ctr.ctr_path)

            #cv2.imwrite('multiple_dets/'+contour_type+'{:04d}.png'.format(idx), tmp)
            
            lengths = []
            for coord in coords:
                lengths.append(len(coord))
            coords = [coords[np.argmax(lengths)]]
            num += 1
        filename = 'P{:s}-{:s}-'.format(ctr.patient_no, ctr.img_no)+contour_type+'contour-auto.txt'
        full_path = os.path.join(save_dir, 'P{:s}'.format(ctr.patient_no)+'contours-auto')
        if not os.path.exists(full_path):
            os.makedirs(full_path)
        with open(os.path.join(full_path, filename), 'w') as f:
            for coord in coords:
                coord = np.squeeze(coord, axis=(1,))
                coord = np.append(coord, coord[:1], axis=0)
                np.savetxt(f, coord, fmt='%i', delimiter=' ')
    
    print('Num of files with multiple detections: {:d}'.format(num))
def create_submission(cases, data_path, output_path, contour_type='i'):

    weight_t = 'model_logs/sunnybrook_a_unetres_inv_time.h5'
    weight_s = 'model_logs/sunnybrook_i_unetres_inv.h5'
    crop_size = 128
    num_phases = 3
    num_classes = 2

    input_shape = (num_phases, crop_size, crop_size, 1)
    input_shape_s = (crop_size, crop_size, 1)
    model_s = unet_res_model_Inv(input_shape,
                                 num_classes,
                                 nb_filters=16,
                                 transfer=True,
                                 contour_type=contour_type,
                                 weights=weight_s)
    model_t = unet_res_model_time(input_shape,
                                  num_classes,
                                  nb_filters=16,
                                  n_phases=num_phases,
                                  transfer=True,
                                  contour_type=contour_type,
                                  weights=weight_t)
    for idx, case in enumerate(cases):
        print('\nPredict image sequence {:d}'.format(idx))
        images, _, file_names = read_all_contour(case, data_path, num_classes)
        images_crop = center_crop_3d(images, crop_size=crop_size)
        pred_masks = model_s.predict(images_crop, batch_size=32, verbose=1)
        p, h, w, d = images.shape

        for idx in range(p):
            image = images[idx, ...]
            tmp = pred_masks[idx, :]
            out_file = file_names[idx]
            tmp = reshape(tmp, to_shape=(h, w, d))
            tmp = np.where(tmp > 0.5, 255, 0).astype('uint8')
            tmp2, coords, hierarchy = cv2.findContours(tmp.copy(),
                                                       cv2.RETR_LIST,
                                                       cv2.CHAIN_APPROX_NONE)
            if not coords:
                print('\nNo detection in case: {:s}; image: {:s}'.format(
                    case, out_file))
                coords = np.ones((1, 1, 1, 2), dtype='int')

            output_full_path = os.path.join(output_path, case)

            p = re.compile("dcm")
            out_file = p.sub('jpg', out_file)
            draw_image_overlay(image, out_file, output_full_path, contour_type,
                               coords)
Exemple #5
0
def create_submission(contours, data_path, output_path, contour_type='i'):
    if contour_type == 'i':
        weights = 'model_logs/sunnybrook_i_unet_inv.h5'
    else:
        sys.exit('\ncontour type "%s" not recognized\n' % contour_type)

    crop_size = 128
    input_shape = (crop_size, crop_size, 1)
    num_classes = 2
    images, masks = export_all_contours(contours,
                                        data_path,
                                        output_path,
                                        crop_size,
                                        num_classes=num_classes)
    model = unet_model_inv(input_shape,
                           num_classes,
                           num_filters=8,
                           transfer=True,
                           contour_type=contour_type,
                           weights=weights)

    pred_masks = model.predict(images, batch_size=32, verbose=1)
    print('\nEvaluating dev set ...')
    result = model.evaluate(images, masks, batch_size=32)
    result = np.round(result, decimals=10)
    print('\nDev set result {:s}:\n{:s}'.format(str(model.metrics_names),
                                                str(result)))
    num = 0
    for idx, ctr in enumerate(contours):
        img, mask = read_contour(ctr, data_path, num_classes)
        h, w, d = img.shape
        tmp = pred_masks[idx, :]
        tmp = reshape(tmp, to_shape=(h, w, d))
        tmp = np.where(tmp > 0.5, 255, 0).astype('uint8')
        tmp2, coords, hierarchy = cv2.findContours(tmp.copy(), cv2.RETR_LIST,
                                                   cv2.CHAIN_APPROX_NONE)
        if not coords:
            print('\nNo detection in case: {:s}; image: {:d}'.format(
                ctr.case, ctr.img_no))
            coords = np.ones((1, 1, 1, 2), dtype='int')

        if contour_type == 'i':
            man_filename = ctr.ctr_endo_path[ctr.ctr_endo_path.rfind('\\') +
                                             1:]
        elif contour_type == 'm':
            man_filename = ctr.ctr_epi_path[ctr.ctr_epi_path.rfind('\\') + 1:]

        auto_filename = man_filename.replace('manual', 'auto')
        img_filename = re.sub(r'-[io]contour-manual.txt', '.dcm', man_filename)
        man_full_path = os.path.join(save_dir, ctr.case, 'contours-manual',
                                     'IRCCI-expert')
        auto_full_path = os.path.join(save_dir, ctr.case, 'contours-auto',
                                      'FCN')
        img_full_path = os.path.join(save_dir, ctr.case, 'DICOM')
        dcm = 'IM-0001-%04d.dcm' % (ctr.img_no)
        dcm_path = os.path.join(data_path, ctr.case, 'DICOM', dcm)
        overlay_full_path = os.path.join(save_dir, ctr.case, 'Overlay')
        for dirpath in [
                man_full_path, auto_full_path, img_full_path, overlay_full_path
        ]:
            if not os.path.exists(dirpath):
                os.makedirs(dirpath)
            if 'DICOM' in dirpath:
                src = dcm_path
                dst = os.path.join(dirpath, img_filename)
                shutil.copyfile(src, dst)
            elif 'Overlay' in dirpath:
                draw_result(ctr, data_path, overlay_full_path, contour_type,
                            coords)
            else:
                dst = os.path.join(auto_full_path, auto_filename)
                if not os.path.exists(auto_full_path):
                    os.makedirs(auto_full_path)
                with open(dst, 'wb') as f:
                    for cd in coords:
                        cd = np.squeeze(cd)
                        if cd.ndim == 1:
                            np.savetxt(f, cd, fmt='%d', delimiter=' ')
                        else:
                            for coord in cd:
                                np.savetxt(f, coord, fmt='%d', delimiter=' ')

    print('\nNumber of multiple detections: {:d}'.format(num))
    dst_eval = os.path.join(save_dir,
                            'evaluation_{:s}.txt'.format(contour_type))
    with open(dst_eval, 'wb') as f:
        f.write(
            ('Dev set result {:s}:\n{:s}'.format(str(model.metrics_names),
                                                 str(result))).encode('utf-8'))
        f.close()

    # Detailed evaluation:
    masks = np.squeeze(masks)
    pred_masks = np.squeeze(pred_masks)
    detail_eval = os.path.join(
        save_dir, 'evaluation_detail_{:s}.csv'.format(contour_type))
    evalArr = dice_coef_each(masks, pred_masks)
    caseArr = [ctr.case for ctr in contours]
    imgArr = [ctr.img_no for ctr in contours]
    resArr = [caseArr, imgArr]
    resArr.append(list(evalArr))
    resArr = np.transpose(resArr)
    np.savetxt(detail_eval, resArr, fmt='%s', delimiter=',')
    traj[20, i] = 1
    model.set_hidden(Variable(torch.zeros(
        1, 1, HIDDEN_SIZE)))  # doing this every time now
    out = model.all_hiddens(traj).data.numpy()
    np.savetxt("rnndata/" + str(i) + "perturb.csv", out, delimiter=",")

# get RNN weight matrix
m = model.rnn.state_dict()['weight_hh_l0'].numpy()
np.save('rnndata/weight_hh_l0.npy', m)

# test a random trajectory
criterion = nn.CrossEntropyLoss()
model = ThreeBitRNN(hidden_size=HIDDEN_SIZE)
model.load_state_dict(torch.load('rnndata/model.pkl'))
for _ in range(10):
    inn, out = reshape(genxy(102, 0.25))
    inn = Variable(torch.Tensor(inn), requires_grad=False)
    out = Variable(torch.LongTensor(out))
    model.set_hidden(Variable(torch.zeros(1, 1, HIDDEN_SIZE)))
    outt = model(inn)
    _, preds = torch.max(outt, 1)
    print(criterion(outt, out).data[0])
    print(sum(preds == out).data[0])

# create special world tour trajectory and extract hidden states (condition-averaged)
model = ThreeBitRNN(hidden_size=HIDDEN_SIZE)
model.load_state_dict(torch.load('rnndata/model.pkl'))
n = 101
hids = np.zeros((320, HIDDEN_SIZE))
traj = np.zeros((320, 3))
traj[20, 0] = 1
Exemple #7
0
Not using utils.DataLoader
'''

model = ThreeBitRNN(hidden_size=args.hidden_size)
params = list(filter(lambda x: x.requires_grad, model.parameters()))
# optimizer = optim.Adadelta(params, lr=args.learning_rate, rho=args.rho, weight_decay=args.weight_decay)
optimizer = optim.SGD(params,
                      lr=args.learning_rate,
                      weight_decay=args.weight_decay)
criterion = nn.CrossEntropyLoss()
best_loss = 10000

start = time.time()
for epoch in range(args.num_epochs):
    # new training data every time
    train_in, train_out = reshape(genxy(sl * 400, 0.25))

    val_in, val_out = reshape(genxy(sl * 100, 0.25))

    model.train()
    model.set_hidden(Variable(torch.zeros(1, 1, args.hidden_size)))
    ctr = 0
    for i in range(0, len(train_out) // sl):
        inp = Variable(torch.Tensor(train_in[i * sl:(i + 1) * sl, :]))
        trg = Variable(torch.LongTensor(train_out[i * sl:(i + 1) * sl]))
        optimizer.zero_grad()
        outputs = model(inp)
        loss = criterion(outputs, trg)
        loss.backward()
        optimizer.step()
        ctr += 1
Exemple #8
0
def create_submission(data_path):
    print(len(list(contours)))
    if contour_type == 'i':
        weights = 'weights/sunnybrook_i.h5'
    elif contour_type == 'o':
        weights = 'weights/sunnybrook_o.h5'
    else:
        sys.exit('\ncontour type "%s" not recognized\n' % contour_type)
    #pdb.set_trace()
    crop_size = 256
    images, masks = export_all_contours(contours, data_path, crop_size)
    
    input_shape = (crop_size, crop_size, 1)
    num_classes = 2
    model = fcn_model(input_shape, num_classes, weights=weights)

    pred_masks = model.predict(images, batch_size=32, verbose=1)
    
    num = 0
    print(list(contours))
    for idx, ctr in enumerate(contours):
        img, mask = read_contour(ctr, data_path)
        h, w, d = img.shape
        tmp = reshape(pred_masks[idx], to_shape=(h, w, d))
        assert img.shape == tmp.shape, 'Shape of prediction does not match'
        tmp = np.where(tmp > 0.5, 255, 0).astype('uint8')
        tmp2, coords, hierarchy = cv2.findContours(tmp.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
        if not coords:
            print('\nNo detection: %s' % ctr.ctr_path)
            coords = np.ones((1, 1, 1, 2), dtype='int')
        if len(coords) > 1:
            print('\nMultiple detections: %s' % ctr.ctr_path)
            
            #cv2.imwrite('multiple_dets/'+contour_type+'{:04d}.png'.format(idx), tmp)
            
            lengths = []
            for coord in coords:
                lengths.append(len(coord))
            coords = [coords[np.argmax(lengths)]]
            num += 1
        
        man_filename = ctr.ctr_path[ctr.ctr_path.rfind('/')+1:]
        auto_filename = man_filename.replace('manual', 'auto')
        img_filename = re.sub(r'-[io]contour-manual.txt', '.dcm', man_filename)
        man_full_path = os.path.join(save_dir, ctr.case, 'contours-manual', 'IRCCI-expert')
        auto_full_path = os.path.join(save_dir, ctr.case, 'contours-auto', 'FCN')
        img_full_path = os.path.join(save_dir, ctr.case, 'DICOM')
        dcm = 'IM-%s-%04d.dcm' % (SAX_SERIES[ctr.case], ctr.img_no)
        dcm_path = os.path.join(data_path, ctr.case, dcm)
        for dirpath in [man_full_path, auto_full_path, img_full_path]:
            if not os.path.exists(dirpath):
                os.makedirs(dirpath)
            if 'manual' in dirpath:
                src = ctr.ctr_path
                dst = os.path.join(dirpath, man_filename)
                shutil.copyfile(src, dst)
            elif 'DICOM' in dirpath:
                src = dcm_path
                dst = os.path.join(dirpath, img_filename)
                shutil.copyfile(src, dst)
            else:
                dst = os.path.join(auto_full_path, auto_filename)
                with open(dst, 'w') as f:
                    for coord in coords:
                        coord = np.squeeze(coord, axis=(1,))
                        coord = np.append(coord, coord[:1], axis=0)
                        np.savetxt(f, coord, fmt='%i', delimiter=' ')
    
    print('\nNumber of multiple detections: {:d}'.format(num))
def create_submission(contours, data_path, output_path):
    if contour_type == 'i':
        weights = 'model_logs/sunnybrook_i_unetresnet_epoch100_aug4.h5'
    elif contour_type == 'm':
        weights = 'model_logs/sunnybrook_m.h5'
    else:
        sys.exit('\ncontour type "%s" not recognized\n' % contour_type)

    crop_size = 128
    input_shape = (crop_size, crop_size, 1)
    num_classes = 4
    images, masks = export_all_contours(contours,
                                        data_path,
                                        output_path,
                                        crop_size,
                                        num_classes=num_classes)
    model = unet_res_model(input_shape,
                           num_classes,
                           weights=weights,
                           transfer=True)

    pred_masks = model.predict(images, batch_size=32, verbose=1)
    print('\nEvaluating dev set ...')
    result = model.evaluate(images, masks, batch_size=32)
    result = np.round(result, decimals=10)
    print('\nDev set result {:s}:\n{:s}'.format(str(model.metrics_names),
                                                str(result)))
    num = 0
    for idx, ctr in enumerate(contours):
        img, mask = read_contour(ctr, data_path, num_classes)
        h, w, d = img.shape
        if contour_type == 'i':
            tmp = pred_masks[idx, ..., 3]
        elif contour_type == 'm':
            tmp = pred_masks[idx, ..., 2]
        elif contour_type == 'r':
            tmp = pred_masks[idx, ..., 1]
        #cv2.imwrite(data_path + '\\tmp\\' + 'i' + '{:04d}.png'.format(idx), pred_masks[idx,...,3])
        #cv2.imwrite(data_path + '\\tmp\\' + 'o' + '{:04d}.png'.format(idx), pred_masks[idx,...,2])
        #cv2.imwrite(data_path + '\\tmp\\' + 'r' + '{:04d}.png'.format(idx), pred_masks[idx, ..., 1])
        tmp = tmp[..., np.newaxis]
        tmp = reshape(tmp, to_shape=(h, w, d))
        tmp = np.where(tmp > 0.5, 255, 0).astype('uint8')
        tmp2, coords, hierarchy = cv2.findContours(tmp.copy(), cv2.RETR_LIST,
                                                   cv2.CHAIN_APPROX_NONE)
        if not coords:
            print('\nNo detection in case: {:s}; image: {:d}'.format(
                ctr.case, ctr.img_no))
            coords = np.ones((1, 1, 1, 2), dtype='int')
        if len(coords) > 1:
            print('\nMultiple detections in case: {:s}; image: {:d}'.format(
                ctr.case, ctr.img_no))
            lengths = []
            for coord in coords:
                lengths.append(len(coord))
            coords = [coords[np.argmax(lengths)]]
            num += 1
        coords = np.squeeze(coords)

        if contour_type == 'i':
            man_filename = ctr.ctr_endo_path[ctr.ctr_endo_path.rfind('\\') +
                                             1:]
        elif contour_type == 'o':
            man_filename = ctr.ctr_epi_path[ctr.ctr_epi_path.rfind('\\') + 1:]

        auto_filename = man_filename.replace('manual', 'auto')
        img_filename = re.sub(r'-[io]contour-manual.txt', '.dcm', man_filename)
        man_full_path = os.path.join(save_dir, ctr.case, 'contours-manual',
                                     'IRCCI-expert')
        auto_full_path = os.path.join(save_dir, ctr.case, 'contours-auto',
                                      'FCN')
        img_full_path = os.path.join(save_dir, ctr.case, 'DICOM')
        dcm = 'IM-0001-%04d.dcm' % (ctr.img_no)
        #dcm = 'IM-%s-%04d.dcm' % (SAX_SERIES[ctr.case], ctr.img_no)
        dcm_path = os.path.join(data_path, ctr.case, 'DICOM', dcm)
        overlay_full_path = os.path.join(save_dir, ctr.case, 'Overlay')
        for dirpath in [
                man_full_path, auto_full_path, img_full_path, overlay_full_path
        ]:
            if not os.path.exists(dirpath):
                os.makedirs(dirpath)
            if 'manual' in dirpath:
                if contour_type == 'i':
                    src = ctr.ctr_endo_path
                elif contour_type == 'o':
                    src = ctr.ctr_epi_path
                dst = os.path.join(dirpath, man_filename)
                shutil.copyfile(src, dst)
            elif 'DICOM' in dirpath:
                src = dcm_path
                dst = os.path.join(dirpath, img_filename)
                shutil.copyfile(src, dst)
            elif 'Overlay' in dirpath:
                draw_result(ctr, data_path, overlay_full_path, contour_type,
                            coords)
            else:
                dst = os.path.join(auto_full_path, auto_filename)
                with open(dst, 'wb') as f:
                    if coords.ndim == 1:
                        np.savetxt(f, coords, fmt='%d', delimiter=' ')
                    else:
                        for coord in coords:
                            #coord = np.squeeze(coord, axis=(1,))
                            #coord = np.append(coord, coord[:1], axis=0)
                            np.savetxt(f, coord, fmt='%d', delimiter=' ')

    print('\nNumber of multiple detections: {:d}'.format(num))
    dst_eval = os.path.join(save_dir,
                            'evaluation_{:s}.txt'.format(contour_type))
    with open(dst_eval, 'wb') as f:
        f.write(
            ('Dev set result {:s}:\n{:s}'.format(str(model.metrics_names),
                                                 str(result))).encode('utf-8'))
        f.close()

    # Detailed evaluation:
    detail_eval = os.path.join(
        save_dir, 'evaluation_detail_{:s}.csv'.format(contour_type))
    evalArr = dice_coef_endo_each(masks, pred_masks)
    caseArr = [ctr.case for ctr in contours]
    imgArr = [ctr.img_no for ctr in contours]
    resArr = np.transpose([caseArr, imgArr, evalArr])
    np.savetxt(detail_eval, resArr, fmt='%s', delimiter=',')
Exemple #10
0
model = ThreeBitRNN(hidden_size=args.hidden_size)
params = list(filter(lambda x: x.requires_grad, model.parameters()))
# optimizer = optim.Adadelta(params, lr=args.learning_rate, rho=args.rho, weight_decay=args.weight_decay)
optimizer = optim.SGD(params,
                      lr=args.learning_rate,
                      weight_decay=args.weight_decay)
criterion = nn.CrossEntropyLoss()
best_loss = 10000

start = time.time()
for epoch in range(args.num_epochs):
    model.train()
    ctr = 0
    for i in range(0, 400):
        model.set_hidden(Variable(torch.zeros(1, 1, args.hidden_size)))
        train_in, train_out = reshape(genxy(sl, 0.25))
        inp = Variable(torch.Tensor(train_in))
        trg = Variable(torch.LongTensor(train_out))
        optimizer.zero_grad()
        outputs = model(inp)
        loss = criterion(outputs, trg)
        loss.backward()
        optimizer.step()
        ctr += 1
        if ctr % 399 == 0:
            timenow = timeSince(start)
            print(
                'Epoch [%d/%d], Iter [%d/%d], Time: %s, Loss: %4f' %
                (epoch + 1, args.num_epochs, ctr, 400, timenow, loss.data[0]))
    #
    model.eval()
def create_submission(contours, volume_map, data_path, output_path, num_slices, num_phase_in_cycle, contour_type='a', debug=False):
    if contour_type == 'a':
        weights = 'model_logs/sunnybrook_a_unet_3d.h5'
    else:
        sys.exit('\ncontour type "%s" not recognized\n' % contour_type)

    crop_size = 128
    input_shape = (crop_size, crop_size, num_slices, 1)
    num_classes = 3
    volumes, vol_masks, cases, img_nos = export_all_volumes(contours,
                                                            volume_map,
                                                            data_path,
                                                            output_path,
                                                            crop_size,
                                                            num_classes=num_classes,
                                                            num_slices=num_slices,
                                                            num_phase_in_cycle=num_phase_in_cycle,
                                                            is_all_valid_slice=True)

    model = unet_model_3d_Inv(input_shape, pool_size=(2, 2, 1), kernel=(7, 7, 5), n_labels=3, initial_learning_rate=0.00001,
                              deconvolution=False, depth=4, n_base_filters=4, include_label_wise_dice_coefficients=True, batch_normalization=True, weights=weights)

    if debug:
        kwargs = dict(
            rotation_range=90,
            zoom_range=0.2,
            width_shift_range=0.2,
            height_shift_range=0.2,
            horizontal_flip=True,
            vertical_flip=True,
            data_format="channels_last",
            fill_mode='constant',
        )
        seed = 1234
        np.random.seed(seed)
        image_datagen = CardiacVolumeDataGenerator(**kwargs)
        mask_datagen = CardiacVolumeDataGenerator(**kwargs)
        volumes = image_datagen.fit(volumes, augment=True, seed=seed, rounds=8, toDir=None)
        vol_masks = mask_datagen.fit(vol_masks, augment=True, seed=seed, rounds=8, toDir=None)
        result = model.evaluate(volumes, vol_masks, batch_size=8)
        result = np.round(result, decimals=10)
        print('\nResult {:s}:\n{:s}'.format(str(model.metrics_names), str(result)))
    else:
        pred_masks = model.predict(volumes, batch_size=8, verbose=1)
        print('\nEvaluating ...')
        result = model.evaluate(volumes, vol_masks, batch_size=8)
        result = np.round(result, decimals=10)
        print('\nResult {:s}:\n{:s}'.format(str(model.metrics_names), str(result)))
        num = 0

        for c_type in ['i', 'm']:
            for idx in range(len(volumes)):
                volume = volumes[idx]

                h, w, s, d = volume.shape
                for s_i in range(s):
                    img = volume[...,s_i, 0]
                    if c_type == 'i':
                        tmp = pred_masks[idx, ..., s_i, 2]
                    elif c_type == 'm':
                        tmp = pred_masks[idx, ..., s_i, 1]

                    tmp = tmp[..., np.newaxis]
                    tmp = reshape(tmp, to_shape=(h, w, d))
                    tmp = np.where(tmp > 0.5, 255, 0).astype('uint8')
                    tmp2, coords, hierarchy = cv2.findContours(tmp.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
                    if not coords:
                        print('\nNo detection in case: {:s}; image: {:d}'.format(cases[idx], img_nos[idx]))
                        coords = np.ones((1, 1, 1, 2), dtype='int')

                    overlay_full_path = os.path.join(save_dir, cases[idx], 'Overlay')
                    if not os.path.exists(overlay_full_path):
                        os.makedirs(overlay_full_path)
                    if 'Overlay' in overlay_full_path:
                        out_file = 'IM-0001-%s-%04d-%01d.png' % (c_type, img_nos[idx], s_i)
                        draw_image_overlay(img, out_file, overlay_full_path, c_type, coords)


            print('\nNumber of multiple detections: {:d}'.format(num))
            dst_eval = os.path.join(save_dir, 'evaluation_{:s}.txt'.format(c_type))
            with open(dst_eval, 'wb') as f:
                f.write(('Dev set result {:s}:\n{:s}'.format(str(model.metrics_names), str(result))).encode('utf-8'))
                f.close()

            # Detailed evaluation:
            detail_eval = os.path.join(save_dir, 'evaluation_detail_{:s}.csv'.format(c_type))
            evalEndoArr = []
            evalMyoArr = []
            resArr = [cases, img_nos]
            for s_i in range(s):
                resArr.append(list(dice_coef_endo_each(vol_masks[...,s_i,:], pred_masks[...,s_i,:])))
            for s_i in range(s):
                resArr.append(list(dice_coef_myo_each(vol_masks[..., s_i, :], pred_masks[..., s_i, :])))



            resArr = np.transpose(resArr)
            np.savetxt(detail_eval, resArr, fmt='%s', delimiter=',')
def create_submission(contours, data_path, output_path, contour_type='a'):
    if contour_type == 'a':
        weights = 'model_logs/temp_weights.hdf5'
    else:
        sys.exit('\ncontour type "%s" not recognized\n' % contour_type)
    num_phases = 9
    crop_size = 128
    input_shape = (num_phases, crop_size, crop_size, 1)
    num_classes = 3
    images, masks = export_all_contours(contours,
                                        data_path,
                                        output_path,
                                        crop_size,
                                        num_classes=num_classes,
                                        num_phases=num_phases)
    model = unet_model_time(input_shape,
                            downsize_filters_factor=2,
                            pool_size=(1, 2, 2),
                            n_labels=3,
                            initial_learning_rate=0.00001,
                            deconvolution=False,
                            weights=weights)

    pred_masks = model.predict(images, batch_size=8, verbose=1)
    print('\nEvaluating dev set ...')
    result = model.evaluate(images, masks, batch_size=8)
    result = np.round(result, decimals=10)
    print('\nDev set result {:s}:\n{:s}'.format(str(model.metrics_names),
                                                str(result)))
    num = 0

    for c_type in ['i', 'm']:
        for idx, ctr in enumerate(contours):
            img, mask = read_contour(ctr,
                                     data_path,
                                     num_classes,
                                     num_phases=1,
                                     num_phases_in_cycle=20,
                                     phase_dilation=1)
            p, h, w, d = img.shape
            if c_type == 'i':
                tmp = pred_masks[idx, 0, ..., 2]
            elif c_type == 'm':
                tmp = pred_masks[idx, 0, ..., 1]

            tmp = tmp[..., np.newaxis]
            tmp = reshape(tmp, to_shape=(h, w, d))
            tmp = np.where(tmp > 0.5, 255, 0).astype('uint8')
            tmp2, coords, hierarchy = cv2.findContours(tmp.copy(),
                                                       cv2.RETR_LIST,
                                                       cv2.CHAIN_APPROX_NONE)
            if not coords:
                print('\nNo detection in case: {:s}; image: {:d}'.format(
                    ctr.case, ctr.img_no))
                coords = np.ones((1, 1, 1, 2), dtype='int')

            if c_type == 'i':
                man_filename = ctr.ctr_endo_path[ctr.ctr_endo_path.
                                                 rfind('\\') + 1:]
            elif c_type == 'm':
                man_filename = ctr.ctr_epi_path[ctr.ctr_epi_path.rfind('\\') +
                                                1:]

            auto_filename = man_filename.replace('manual', 'auto')
            img_filename = re.sub(r'-[io]contour-manual.txt', '.dcm',
                                  man_filename)
            man_full_path = os.path.join(save_dir, ctr.case, 'contours-manual',
                                         'IRCCI-expert')
            auto_full_path = os.path.join(save_dir, ctr.case, 'contours-auto',
                                          'FCN')
            img_full_path = os.path.join(save_dir, ctr.case, 'DICOM')
            dcm = 'IM-0001-%04d.dcm' % (ctr.img_no)
            # dcm = 'IM-%s-%04d.dcm' % (SAX_SERIES[ctr.case], ctr.img_no)
            dcm_path = os.path.join(data_path, ctr.case, 'DICOM', dcm)
            overlay_full_path = os.path.join(save_dir, ctr.case, 'Overlay')
            for dirpath in [
                    man_full_path, auto_full_path, img_full_path,
                    overlay_full_path
            ]:
                if not os.path.exists(dirpath):
                    os.makedirs(dirpath)
                if 'DICOM' in dirpath:
                    src = dcm_path
                    dst = os.path.join(dirpath, img_filename)
                    shutil.copyfile(src, dst)
                elif 'Overlay' in dirpath:
                    draw_result(ctr, data_path, overlay_full_path, c_type,
                                coords)
                else:
                    dst = os.path.join(auto_full_path, auto_filename)
                    if not os.path.exists(auto_full_path):
                        os.makedirs(auto_full_path)
                    with open(dst, 'wb') as f:
                        for cd in coords:
                            cd = np.squeeze(cd)
                            if cd.ndim == 1:
                                np.savetxt(f, cd, fmt='%d', delimiter=' ')
                            else:
                                for coord in cd:
                                    np.savetxt(f,
                                               coord,
                                               fmt='%d',
                                               delimiter=' ')

        print('\nNumber of multiple detections: {:d}'.format(num))
        dst_eval = os.path.join(save_dir, 'evaluation_{:s}.txt'.format(c_type))
        with open(dst_eval, 'wb') as f:
            f.write(('Dev set result {:s}:\n{:s}'.format(
                str(model.metrics_names), str(result))).encode('utf-8'))
            f.close()

        # Detailed evaluation:
        detail_eval = os.path.join(save_dir,
                                   'evaluation_detail_{:s}.csv'.format(c_type))
        evalEndoArr = dice_coef_endo_each(masks, pred_masks)
        evalMyoArr = dice_coef_myo_each(masks, pred_masks)
        caseArr = [ctr.case for ctr in contours]
        imgArr = [ctr.img_no for ctr in contours]
        resArr = np.transpose([caseArr, imgArr, evalEndoArr, evalMyoArr])
        np.savetxt(detail_eval, resArr, fmt='%s', delimiter=',')