Пример #1
0
def save_result(complnet_path,dataset_path):
    #-------------------------------------------------------------
    compl_model = load_compl_model(complnet_path, (None,None,1))
    global cnet_path, index
    cnet_path = complnet_path
    index = 0
    #-------------------------------------------------------------
    paths = list(utils.file_paths(dataset_path))
    mask_paths = list(filter(lambda s: 'mask' in s, paths))
    #answer_paths = list(filter(lambda s: 'clean' in s, paths))
    answer_paths = list(filter(lambda s: not('mask' in s), paths))
    #print(len(answer_paths),answer_paths,flush=True)

    test_infos, similarities, errors, masked_ssims, full_ssims = [],[],[],[],[]
    for path_tup in path_tuples(answer_paths, mask_paths):
        similarity, error, masked_ssim, full_ssim\
            = scores(compl_model, *path_tup2img_tup(*path_tup))
        test_infos.append(path_tup)
        similarities.append(np.asscalar(similarity))
        errors.append(np.asscalar(error))
        masked_ssims.append(np.asscalar(masked_ssim))
        full_ssims.append(np.asscalar(full_ssim))
        #-------------------------------------------------------------
    result = {'name' : (complnet_path.replace(os.sep,'_') 
                        + '+' + 
                        dataset_path.replace(os.sep,'_')),
              'cnet_path' : complnet_path,
              'dataset_path' : dataset_path,

              'mse ratio similarity mean' : sum(similarities) / len(similarities),
              'mse ratio error mean' : sum(errors) / len(similarities),
              'masked ssim mean' : sum(masked_ssims) / len(similarities),
              'full ssim mean' : sum(full_ssims) / len(similarities),

              'origin,answer,mask paths' : test_infos,
              'similarities' : similarities,
              'errors' : errors,
              'masked_ssims' : masked_ssims,
              'full_ssims' : full_ssims}

    with open(result['name']+'.yml','w') as f:
        f.write(yaml.dump(result))

    print('{}'.format(result['name']),end='|') 
    print('mse ratio similarity mean = {:f} ({:f}%)'\
            .format(result['mse ratio similarity mean'], 
                    result['mse ratio similarity mean']*100),end='|')
    print('mse ratio error mean = {:f} ({:f}%)'\
            .format(result['mse ratio error mean'],
                    result['mse ratio error mean']*100),end='|')
    print('masked ssim mean = {:f}'.format(result['masked ssim mean']),end='|')
    print('full ssim mean = {:f}'.format(result['full ssim mean']),flush=True)
Пример #2
0
  make list of pair of image:mask
  shuffle the list and then move image, masks into train/valid/test
  NOTE: the names of the masks are changed to the same as the image.
  
[synopsys]
  python crops_dir2dataset_dir.py crops_dir label_str
ex)
  python crops_dir2dataset_dir.py 35crops _mask_
''')

crops_dir = sys.argv[1]
crops_dir = pathlib.Path(crops_dir).parts[0]

label_str = sys.argv[2]

all_paths = list(utils.file_paths(crops_dir))
img_paths = sorted(filter(lambda p: label_str not in p, all_paths))
mask_paths = sorted(filter(lambda p: label_str in p, all_paths))

img_mask_pairs = list(zip(img_paths, mask_paths))
random.shuffle(img_mask_pairs)

# Make directory structure
train_img_dir = pjoin(pjoin(crops_dir, 'train'), 'image')
train_label_dir = pjoin(pjoin(crops_dir, 'train'), 'label')

valid_img_dir = pjoin(pjoin(crops_dir, 'valid'), 'image')
valid_label_dir = pjoin(pjoin(crops_dir, 'valid'), 'label')

test_img_dir = pjoin(pjoin(crops_dir, 'test'), 'image')
test_label_dir = pjoin(pjoin(crops_dir, 'test'), 'label')
Пример #3
0
        r = bgr_img[:, :, 2]
        #calculate and leave difference set of lower priority color
        #(remove intersection)
        r_minus_b = (r != b).astype(np.uint8) * r
        bgr_img[:, :, 2] = r_minus_b
        r_minus_g = (r != g).astype(np.uint8) * r
        bgr_img[:, :, 2] = r_minus_g
        g_minus_b = (g != b).astype(np.uint8) * g
        bgr_img[:, :, 1] = g_minus_b
    #cv2.imshow('intersection removed',bgr_img); cv2.waitKey(0)
    return bgr_img


if __name__ == '__main__':
    mask_dir = sys.argv[1]
    mask_paths = list(utils.file_paths(mask_dir))

    check_img = cv2.imread(mask_paths[0])
    if (np.array_equal(check_img[:, :, 0], check_img[:, :, 1])
            and np.array_equal(check_img[:, :, 1], check_img[:, :, 2])):
        img_type = cv2.IMREAD_GRAYSCALE
    else:
        img_type = cv2.IMREAD_COLOR

    leaving_channels = sys.argv[2] if len(sys.argv) == 3 else 'rgb'

    masks \
    = pipe(cmap(lambda path: cv2.imread(path, img_type)),
           cfilter(lambda img: img is not None),
           cmap(binarization),
           cmap(dilation),
Пример #4
0
                                      history.history['val_loss'])),
                    val_acc=list(
                        map(np.asscalar, history.history['val_mean_iou'])),
                    test_loss=np.asscalar(test_metrics[0]),
                    test_acc=np.asscalar(test_metrics[1]),
                    train_time=train_time_str,
                )))

    modulo = 2**num_maxpool
    evaluator.eval_and_save_result(
        dataset_dir,
        save_model_path,
        eval_result_dirpath,
        files_2b_copied=[history_path, experiment_yml_path],
        num_filters=num_filters,
        num_maxpool=num_maxpool,
        modulo=modulo)
    #--------------------------------------------------------------------


if __name__ == '__main__':
    with open('experiment_log', 'w') as log:
        for experiment_path in human_sorted(file_paths(sys.argv[1])):
            try:
                timer = ElapsedTimer(experiment_path)
                main(experiment_path)
                log.write(timer.elapsed_time())
            except AssertionError as error:
                print(str(error))
                log.write(str(error))
Пример #5
0
def main(src_imgs_path, dataset_name, num_crop, crop_size, chk_size):
    if num_crop != 0:
        rand_sqr_crop = img2rand_sqr_crops(crop_size)
        gen \
        = pipe(utils.file_paths,
               cmap(lambda path: cv2.imread(path)),
               cfilter(lambda img: img is not None),
               cfilter(lambda img: is_cuttable(img, crop_size)),
               cmap(utils.slice1channel),
               cflatMap(crepeat(num_crop)),
               cmap(lambda img: rand_sqr_crop(img)),
               cmap(lambda img: (img / 255).astype(np.float32)),
               lambda imgs: split_every(chk_size, imgs))
    else:
        print('!')
        num_crop = 100  # big enough value..
        gen \
        = pipe(utils.file_paths,
               cmap(lambda path: cv2.imread(path)),
               cfilter(lambda img: img is not None),
               cfilter(lambda img: is_cuttable(img, crop_size)),
               cmap(utils.slice1channel),
               cflatMap(lambda img: img2sqr_crops(img, crop_size)),
               cmap(lambda img: (img / 255).astype(np.float32)),
               lambda imgs: split_every(chk_size, imgs))

    print(src_imgs_path)
    expected_num_imgs = len(list(utils.file_paths(src_imgs_path))) * num_crop
    print('-------------- SUMARY --------------')
    print('      dataset name = ', dataset_name)
    print('      size of crop = ', crop_size)
    print(' num crops per img = ', num_crop)
    print(' expected num imgs = ', expected_num_imgs)
    print('        chunk size = ', chk_size)

    f = h5py.File(dataset_name, 'w')
    timer = utils.ElapsedTimer()
    #-------------------------------------------------------------
    f.create_dataset('images', (expected_num_imgs, crop_size, crop_size, 1),
                     maxshape=(None, crop_size, crop_size, 1),
                     chunks=(chk_size, crop_size, crop_size, 1))

    mean = 0
    num_img_elems = (crop_size**2)
    for chk_no, chunk in tqdm(enumerate(gen(src_imgs_path)),
                              total=expected_num_imgs // chk_size):
        beg_idx = chk_no * chk_size
        f['images'][beg_idx:beg_idx + len(chunk)] = chunk
        mean = iter_mean(mean, beg_idx * num_img_elems, np.sum(chunk),
                         len(chunk) * num_img_elems)
    f.create_dataset('mean_pixel_value', data=mean)

    last_chunk_size = len(chunk)
    actual_num_img = chk_no * chk_size + last_chunk_size
    if actual_num_img != expected_num_imgs:
        print(expected_num_imgs, ' != ', actual_num_img)
        print('dataset resized!')
        f['images'].resize((actual_num_img, crop_size, crop_size, 1))

    # [mean test code]
    #li = list(flatten(gen(src_imgs_path)))
    #real_mean = np.mean(li)
    #print('real MEAN:', real_mean)
    #print(len(li))
    #print('saved MEAN:', f['mean_pixel_value'][()])
    #-------------------------------------------------------------
    f.close()
    print('------------------------------------')
    print('dataset generated successfully.')
    msg = timer.elapsed_time()
    '''
    import mailing
    mailing.send_mail_to_kur(
        'Dataset generated successfully.',msg
    )
    '''

    # [load test code]
    f = h5py.File(dataset_name, 'r')
    #-------------------------------------------------------------
    print('f', f['images'].shape)
    print('loaded MEAN:', f['mean_pixel_value'][()])
    #for i in range(f['images'].shape[0] ):
    #cv2.imshow('img',f['images'][i]);cv2.waitKey(0)
    cv2.imshow('img', f['images'][-1])
    cv2.waitKey(0)
    #-------------------------------------------------------------
    f.close()
    '''
    '''
    '''
Пример #6
0
parser.add_argument('-n', '--num_crop', type=int, 
                    help='number of crops per 1 original image.') 
parser.add_argument('-c', '--chk_size', type=int, 
                    help='size of chunk of data for performance.') 

if __name__ == '__main__':
    #unittest.main()
    args = parser.parse_args()
    src_imgs_path = args.src_imgs_path#'H:\\DATA2\\f'
    dataset_name = args.dataset_name#'gray128.h5'
    num_crop = args.num_crop# 3
    crop_size = args.crop_size#128
    chk_size = args.chk_size#100 #00 

    print(src_imgs_path)
    expected_num_imgs = len(list(utils.file_paths(src_imgs_path))) * num_crop
    print('-------------- SUMARY --------------')
    print('      dataset name = ', dataset_name)
    print('      size of crop = ', crop_size)
    print(' num crops per img = ', num_crop)
    print(' expected num imgs = ', expected_num_imgs)
    print('        chunk size = ', chk_size)

    img2_128x128crop = img2sqr_crop(crop_size)
    gen = pipe(utils.file_paths,
               cmap(lambda path: cv2.imread(path)),
               cfilter(lambda img: img is not None),
               cfilter(lambda img: is_cuttable(img, crop_size)),
               cmap(slice1channel),
               cflatMap(crepeat(num_crop)),
               cmap(lambda img: img2_128x128crop(img)),
Пример #7
0
def main(complnet_dir,dataset_dir):
    complnet_paths = utils.file_paths(complnet_dir)
    #complnet_paths = list(human_sorted(complnet_paths))
    complnet_paths = list(reversed(human_sorted(complnet_paths)))
    for complnet_path in tqdm(complnet_paths):
        save_result(complnet_path, dataset_dir)