def main():

    if IS_TRAINING:

        original_imgs_path = list_images(
            'D:/ImageDatabase/Image_fusion_MSCOCO/original/')

        for ssim_weight, model_save_path in zip(SSIM_WEIGHTS,
                                                MODEL_SAVE_PATHS):
            print('\nBegin to train the network ...\n')
            train_recons(original_imgs_path,
                         model_save_path,
                         model_pre_path,
                         ssim_weight,
                         EPOCHES,
                         BATCH_SIZE,
                         debug=True)

            print('\nSuccessfully! Done training...\n')
    else:
        if IS_VIDEO:
            ssim_weight = SSIM_WEIGHTS[0]
            model_path = MODEL_SAVE_PATHS[0]

            IR_path = list_images('video/1_IR/')
            VIS_path = list_images('video/1_VIS/')
            output_save_path = 'video/fused' + str(ssim_weight) + '/'
            generate(IR_path,
                     VIS_path,
                     model_path,
                     model_pre_path,
                     ssim_weight,
                     0,
                     IS_VIDEO,
                     'addition',
                     output_path=output_save_path)
        else:
            print('\nBegin to generate pictures ...\n')

            path = 'images/IV_images/'
            for i in range(20):
                index = i + 1
                infrared = path + 'IR' + str(index) + '.png'
                visible = path + 'VIS' + str(index) + '.png'
                fusion_type = 'addition'
                # fusion_type = 'l1'
                for ssim_weight, model_path in zip(SSIM_WEIGHTS,
                                                   MODEL_SAVE_PATHS):
                    output_save_path = 'outputs/fused_deepdense_bs2_epoch4_all_l1_focus_' + str(
                        ssim_weight)

                    generate(infrared,
                             visible,
                             model_path,
                             model_pre_path,
                             ssim_weight,
                             index,
                             IS_VIDEO,
                             type=fusion_type,
                             output_path=output_save_path)
Example #2
0
def main():

    if IS_TRAINING:
        training_imgs_paths = list_images(TRAINING_IMGS_PATH)

        train(training_imgs_paths,
              ENCODER_WEIGHTS_PATH,
              MODEL_SAVE_PATH,
              autoencoder_levels=AUTUENCODER_LEVELS_TRAIN,
              debug=DEBUG,
              logging_period=LOGGING_PERIOD)
        
        print('\n>>>>>> Successfully done training...\n')

    else:
        contents_path = list_images(CONTENTS_DIR)
        styles_path = list_images(STYLES_DIR)
        model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX

        stylize(contents_path, 
                styles_path, 
                OUTPUT_DIR, 
                ENCODER_WEIGHTS_PATH, 
                model_path, 
                style_ratio=STYLE_RATIO,
                repeat_pipeline=REPEAT_PIPELINE,
                autoencoder_levels=AUTUENCODER_LEVELS_INFER)

        print('\n>>>>>> Successfully done stylizing...\n')
Example #3
0
def inference(opt):
    content_img_list = utils.list_images(opt.content_img_dir)
    style_img_list = utils.list_images(opt.style_img_dir)

    with tf.Graph().as_default(), tf.Session() as sess:
        # build the dataflow graph
        content_img = tf.placeholder(tf.float32, shape=(1, None, None, 3), name='content_img')
        style_img   = tf.placeholder(tf.float32, shape=(1, None, None, 3), name='style_img')

        model = Model(opt.checkpoint_encoder)

        generated_img = model.transform(content_img, style_img)

        sess.run(tf.global_variables_initializer())

        # restore the trained model and run the style transferring
        saver = tf.train.Saver()
        saver.restore(sess, opt.checkpoint_model)

        outputs = []
        for content_img_path in content_img_list:
            content_img = utils.get_images(content_img_path, height=opt.img_size, width=opt.img_size)
            for style_img_path in style_img_list:
                style_img = utils.get_images(style_img_path)

                result = sess.run(generated_img, feed_dict={
                    content_img: content_img, 
                    style_img: style_img
                    })

                outputs.append(result[0])

        utils.save_images(outputs, opt.content_img_dir, opt.style_img_dir, opt.output_dir, suffix=opt.style_weight)

    return outputs
Example #4
0
def main():

    if IS_TRAINING:

        content_imgs_path = list_images(TRAINING_CONTENT_DIR)
        style_imgs_path   = list_images(TRAINING_STYLE_DIR)

        for style_weight, model_save_path in zip(STYLE_WEIGHTS, MODEL_SAVE_PATHS):
            print('\n>>> Begin to train the network with the style weight: %.2f\n' % style_weight)

            train(style_weight, content_imgs_path, style_imgs_path, ENCODER_WEIGHTS_PATH, 
                  model_save_path, logging_period=LOGGING_PERIOD, debug=True)

        print('\n>>> Successfully! Done all training...\n')

    else:

        content_imgs_path = list_images(INFERRING_CONTENT_DIR)
        style_imgs_path   = list_images(INFERRING_STYLE_DIR)

        for style_weight, model_save_path in zip(STYLE_WEIGHTS, MODEL_SAVE_PATHS):
            print('\n>>> Begin to stylize images with style weight: %.2f\n' % style_weight)

            stylize(content_imgs_path, style_imgs_path, OUTPUTS_DIR, 
                    ENCODER_WEIGHTS_PATH, model_save_path, 
                    suffix='-' + str(style_weight))

        print('\n>>> Successfully! Done all stylizing...\n')
def main():

    if IS_TRAINING:

        content_targets = list_images('./contents')  # path to training dataset

        for style in list(STYLES.keys()):

            print('\nBegin to train the network with the style "%s"...\n' %
                  style)

            content_weight, style_weight, tv_weight = STYLES[style]

            style_target = 'images//style//' + style + '.jpg'
            model_save_path = 'models//' + style + '.ckpt-done'

            content_loss, style_loss, tv_loss, total_loss = train(
                content_targets,
                style_target,
                content_weight,
                style_weight,
                tv_weight,
                vgg_path=VGG_PATH,
                save_path=model_save_path)
            x_axis = [i * 100 for i in range(len(content_loss))]
            plt.plot(x_axis, content_loss, label='content_loss')
            plt.plot(x_axis, style_loss, label='style_loss')
            plt.plot(x_axis, total_loss, label='total_loss')

            plt.legend()
            plt.xlabel('iterations')
            plt.ylabel('loss')
            plt.savefig('losses_' + style + '.png')
            plt.clf()

            print('\nSuccessfully! Done training style "%s"...\n' % style)

        print('Successfully finish all the training...\n')
    else:

        for style in list(STYLES.keys()):

            print('\nBegin to generate pictures with the style "%s"...\n' %
                  style)

            model_path = 'models/' + style + '.ckpt-done'
            output_save_path = 'outputs'

            content_targets = list_images('images/content')
            generated_images = generate(content_targets,
                                        model_path,
                                        save_path=output_save_path,
                                        prefix=style + '-')

            print('\ntype(generated_images):', type(generated_images))
            print('\nlen(generated_images):', len(generated_images), '\n')
Example #6
0
def main():

    if IS_TRAINING:

        content_imgs_path = list_images(
            '../MS_COCO')  # path to training content dataset
        style_imgs_path = list_images(
            '../WikiArt')  # path to training style dataset

        for style_weight, model_save_path in zip(STYLE_WEIGHTS,
                                                 MODEL_SAVE_PATHS):
            print(
                '\nBegin to train the network with the style weight: %.2f ...\n'
                % style_weight)

            train(style_weight,
                  content_imgs_path,
                  style_imgs_path,
                  ENCODER_WEIGHTS_PATH,
                  model_save_path,
                  debug=True)

            print('\nSuccessfully! Done training...\n')
    else:

        for style_name in STYLES:

            # print('\nUse "%s.jpg" as style to generate images:' % style_name)

            for style_weight, model_save_path in zip(STYLE_WEIGHTS,
                                                     MODEL_SAVE_PATHS):
                # print('\nBegin to generate images with the style weight: %.2f ...\n' % style_weight)

                # contents_path = list_images('images/content')
                contents_path = [user_content_path]
                # style_path    = 'images/style/' + style_name + '.jpg'
                style_path = style_name
                # output_save_path = 'outputs'
                output_save_path = save_path
                style_name_only = style_name.split('/')[-1].split('.')[0]
                content_name_only = contents_path[0].split('/')[-1]
                # generated_images = generate(contents_path, style_path, ENCODER_WEIGHTS_PATH, model_save_path,
                #     output_path=output_save_path, prefix=style_name + '-', suffix='-' + str(style_weight))
                generated_images = generate(contents_path,
                                            style_path,
                                            ENCODER_WEIGHTS_PATH,
                                            model_save_path,
                                            output_path=output_save_path,
                                            prefix=style_name_only + '-',
                                            suffix='')

                # print('\nlen(generated_images): %d\n' % len(generated_images))

                print(save_path + '/' + style_name_only + '-' +
                      content_name_only)
Example #7
0
def main():

    if IS_TRAINING:

        content_imgs_path = list_images(TRAINING_CONTENT_DIR)
        style_imgs_path = list_images(TRAINING_STYLE_DIR)

        for style_weight, model_save_path in zip(STYLE_WEIGHTS,
                                                 MODEL_SAVE_PATHS):
            print(
                '\n>>> Begin to train the network with the style weight: %.2f\n'
                % style_weight)

            train(style_weight,
                  content_imgs_path,
                  style_imgs_path,
                  ENCODER_WEIGHTS_PATH,
                  model_save_path,
                  logging_period=LOGGING_PERIOD,
                  debug=True)

        print('\n>>> Successfully! Done all training...\n')

    else:
        # load all images at a time, so OOM error will occur
        # when content images size and style images size are too big.

        content_imgs_path = list_images(INFERRING_CONTENT_DIR)
        print("content_imgs_path:", content_imgs_path)

        content_imgs_path = list_images(INFERRING_CONTENT_DIR)[18]
        style_imgs_path = list_images(INFERRING_STYLE_DIR)[:12]

        for style_weight, model_save_path in zip(STYLE_WEIGHTS,
                                                 MODEL_SAVE_PATHS):
            print('\n>>> Begin to stylize images with style weight: %.2f\n' %
                  style_weight)

            start_time = time.time()
            outputs = stylize(content_imgs_path,
                              style_imgs_path,
                              OUTPUTS_DIR,
                              ENCODER_WEIGHTS_PATH,
                              model_save_path,
                              suffix='-' + str(style_weight))
            end_time = time.time()
            sum_time = end_time - start_time
            avg_time = sum_time / (len(content_imgs_path) *
                                   len(style_imgs_path))
            print("sum_time:", sum_time, "content_imgs size:",
                  len(content_imgs_path), "style_imgs size:",
                  len(style_imgs_path))

        print('\n>>> Successfully! Done all stylizing in {}s'.format(avg_time))
Example #8
0
def main():

	if IS_TRAINING:

		original_imgs_path = list_images('D:/Database/Image_fusion_MSCOCO/original/')
		validatioin_imgs_path = list_images('./validation/')

		for ssim_weight, model_save_path in zip(SSIM_WEIGHTS, MODEL_SAVE_PATHS):
			print('\nBegin to train the network ...\n')
			train_recons(original_imgs_path, validatioin_imgs_path, model_save_path, model_pre_path, ssim_weight, EPOCHES, BATCH_SIZE, IS_Validation, debug=True)

			print('\nSuccessfully! Done training...\n')
	else:
		if IS_VIDEO:
			ssim_weight = SSIM_WEIGHTS[0]
			model_path = MODEL_SAVE_PATHS[0]

			IR_path = list_images('video/1_IR/')
			VIS_path = list_images('video/1_VIS/')
			output_save_path = 'video/fused'+ str(ssim_weight) +'/'
			generate(IR_path, VIS_path, model_path, model_pre_path,
			         ssim_weight, 0, IS_VIDEO, 'addition', output_path=output_save_path)
		else:
			ssim_weight = SSIM_WEIGHTS[2]
			model_path = MODEL_SAVE_PATHS[2]
			print('\nBegin to generate pictures ...\n')
			# path = 'images/IV_images/'
			path = 'images/MF_images/color/'
			for i in range(1):
				index = i + 1
				# infrared = path + 'IR' + str(index) + '.png'
				# visible = path + 'VIS' + str(index) + '.png'

				# RGB images
				infrared = path + 'lytro-2-A.jpg'
				visible = path + 'lytro-2-B.jpg'

				# choose fusion layer
				fusion_type = 'addition'
				# fusion_type = 'l1'
				# for ssim_weight, model_path in zip(SSIM_WEIGHTS, MODEL_SAVE_PATHS):
				# 	output_save_path = 'outputs'
                #
				# 	generate(infrared, visible, model_path, model_pre_path,
				# 	         ssim_weight, index, IS_VIDEO, is_RGB, type = fusion_type, output_path = output_save_path)

				output_save_path = 'outputs'
				generate(infrared, visible, model_path, model_pre_path,
						 ssim_weight, index, IS_VIDEO, IS_RGB, type = fusion_type, output_path = output_save_path)
Example #9
0
def extract_signatures(dataset, outdir, model, preprocess=True):
    """Extracts signatures from all images in the dataset.

    Segmentation masks for each input image are generated and saved in the
    specified directory.

    Parameters:
        dataset (str) : Path of the test dataset.
        outdir (str) : Path to save the segmentation output.
        model (str) : Path of the extraction model to use.
        preprocess (bool) : Optional. Additional preprocessing happens before
                            feature extraction if True. Default is True.
    """
    # Load extraction model
    print("Loading segmentation model...")
    clf = joblib.load(model)

    # Get list of input files
    images = list_images(dataset)
    print("Found", len(images), "images. Starting segmentation...")

    # Create output directory if doesn't already exist
    if not os.path.exists(outdir):
        os.makedirs(outdir)

    for image_f in tqdm(images):
        im = cv2.imread(image_f, 0)
        mask = extract_signature(im, clf)

        outfile = os.path.split(image_f)[1]
        outfile = os.path.splitext(outfile)[0] + ".png"
        outfile = os.path.join(outdir, outfile)
        cv2.imwrite(outfile, mask)
Example #10
0
def extract(path, preprocess=False):
    """Extract features from all images in a directory.

    This function should be used to extract features for a single training class
    in the dataset.

    Parameters:
        path (str) : Location of the input directory.
        preprocess (bool) : Optional. If this is True, image is preprocessed
                            before feature extraction. Default is False.

    Returns:
        all extracted features as a Nx128 dimensional array, where N is the sum
        of number of all detected components in all images
    """
    components = None
    for image_f in tqdm(list_images(path)):
        try:
            # Open the image in OpenCV
            im = cv2.imread(image_f, 0)
            if im is None:
                raise IOError(f'{image_f} could not be opened.')

            for descriptors, idx in get_components(im, preprocess):
                component = np.vstack(descriptors)
                if components is None:
                    components = component
                else:
                    components = np.vstack((components, component))
        except Exception as e:
            print(e)

    return components
Example #11
0
def test_autoencoder(autoencoder_levels, model_save_path):
    
    input_imgs_paths = list_images(TEST_IMG_DIR)

    with tf.Graph().as_default(), tf.Session() as sess:

        input_img = tf.placeholder(
            tf.float32, shape=(1, None, None, 3), name='input_img')

        stn = StyleTransferNet(ENCODER_WEIGHTS_PATH, autoencoder_levels)

        input_encs = [encoder.encode(input_img)[0] for encoder in stn.encoders]

        output_imgs = [decoder.decode(input_enc) for decoder, input_enc in zip(stn.decoders, input_encs)]

        sess.run(tf.global_variables_initializer())

        # restore the trained model and run the reconstruction
        saver = tf.train.Saver(var_list=tf.trainable_variables())
        saver.restore(sess, model_save_path)

        for input_img_path in input_imgs_paths:

            img = get_images(input_img_path)

            for autoencoder_id, output_img in zip(autoencoder_levels, output_imgs):

                out = sess.run(output_img, feed_dict={input_img: img})

                save_single_image(out[0], input_img_path, OUTPUT_DIR, prefix=str(autoencoder_id) + '-')
Example #12
0
def main():

    if IS_TRAINING:

        content_imgs_path = list_images(TRAINING_CONTENT_DIR)
        style_imgs_path = list_images(TRAINING_STYLE_DIR)

        tf.reset_default_graph()

        for style_weight, content_weight, lambda1, lambda2, model_save_path in zip(
                STYLE_WEIGHTS, CONTENT_WEIGHTS, LAMBDA1, LAMBDA2,
                MODEL_SAVE_PATHS):
            print('\n>>> Begin to train the network')

            train(style_weight,
                  content_weight,
                  lambda1,
                  lambda2,
                  content_imgs_path,
                  style_imgs_path,
                  ENCODER_WEIGHTS_PATH,
                  model_save_path,
                  logging_period=LOGGING_PERIOD,
                  debug=True)

        print('\n>>> Successfully! Done all training...\n')

    else:

        content_imgs_path = list_images(INFERRING_CONTENT_DIR)
        style_imgs_path = list_images(INFERRING_STYLE_DIR)

        for style_weight, content_weight, lambda1, lambda2, model_save_path in zip(
                STYLE_WEIGHTS, CONTENT_WEIGHTS, LAMBDA1, LAMBDA2,
                MODEL_SAVE_PATHS):
            print('\n>>> Begin to stylize images')

            stylize(content_imgs_path,
                    style_imgs_path,
                    OUTPUTS_DIR,
                    ENCODER_WEIGHTS_PATH,
                    model_save_path,
                    suffix='-' + str(style_weight) + '-' +
                    str(content_weight) + '-' + str(lambda1) + '-' +
                    str(lambda2))

        print('\n>>> Successfully! Done all stylizing...\n')
def main():
	original_imgs_path = utils.list_images(args.dataset)
	train_num = 80000
	original_imgs_path = original_imgs_path[:train_num]
	random.shuffle(original_imgs_path)
	for i in range(2,3):
		# i = 3
		train(i, original_imgs_path)
Example #14
0
def main():
    # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    original_imgs_path = utils.list_images(args.dataset)
    train_num = 40000
    original_imgs_path = original_imgs_path[:train_num]
    random.shuffle(original_imgs_path)
    # for i in range(5):
    i = 2
    train(i, original_imgs_path)
Example #15
0
def main():
    content_imgs_path = list_images(INFERRING_CONTENT_DIR)
    style_imgs_path = list_images(INFERRING_STYLE_DIR)

    for style_weight, model_save_path in product(STYLE_WEIGHTS,
                                                 MODEL_SAVE_PATHS):
        print('\n>>> Begin to stylize images with style weight: %.2f\n' %
              style_weight)

        stylize(content_imgs_path,
                style_imgs_path,
                OUTPUTS_DIR,
                ENCODER_WEIGHTS_PATH,
                model_save_path,
                suffix='-' + str(style_weight),
                alpha=style_weight)

    print('\n>>> Successfully! Done all stylizing...\n')
Example #16
0
File: cdbn.py Project: dfdx/cdbn
def run():
    import cv2
    im = cv2.imread(list_images('../data/gender/female')[2])
    im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    im = cv2.equalizeHist(im)
    im = cv2.resize(im, (96, 96))
    kernels = gabor_kernels()
    filtered = [nd.convolve(im, k, mode='wrap') for k in kernels]
    smartshow(filtered)
    return filtered
Example #17
0
def run():
    import cv2
    im = cv2.imread(list_images('../data/gender/female')[2])
    im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    im = cv2.equalizeHist(im)
    im = cv2.resize(im, (96, 96))
    kernels = gabor_kernels()
    filtered = [nd.convolve(im, k, mode='wrap') for k in kernels]
    smartshow(filtered)
    return filtered
Example #18
0
def main():

    if IS_TRAINING:

        original_imgs_path = list_images(
            'D:/ImageDatabase/Image_fusion_MSCOCO/original/')

        for ssim_weight, model_save_path in zip(SSIM_WEIGHTS,
                                                MODEL_SAVE_PATHS):
            print('\nBegin to train the network ...\n')
            train_recons(original_imgs_path,
                         model_save_path,
                         model_pre_path,
                         ssim_weight,
                         EPOCHES,
                         BATCH_SIZE,
                         debug=True)

            print('\nSuccessfully! Done training...\n')
    else:

        # sourceA_name = 'VIS'
        # sourceB_name = 'IR'
        # print('\nBegin to generate pictures ...\n')
        #
        # content_name = 'images/IV_images/' + sourceA_name
        # style_name   = 'images/IV_images/' + sourceB_name

        sourceA_name = 'image'
        sourceB_name = 'image'
        print('\nBegin to generate pictures ...\n')

        content_name = 'images/multifocus_images/' + sourceA_name
        style_name = 'images/multifocus_images/' + sourceB_name

        # fusion_type = 'addition'
        fusion_type = 'l1'
        # fusion_type = 'weight' # Failed
        for ssim_weight, model_save_path in zip(SSIM_WEIGHTS,
                                                MODEL_SAVE_PATHS):
            output_save_path = 'outputs/fused_deepdense_bs2_epoch4_all_l1_focus_' + str(
                ssim_weight)
            for i in range(20):
                index = i + 1
                content_path = content_name + str(index) + '_left.png'
                style_path = style_name + str(index) + '_right.png'
                generate(content_path,
                         style_path,
                         model_save_path,
                         model_pre_path,
                         ssim_weight,
                         index,
                         fusion_type,
                         output_path=output_save_path)
Example #19
0
def run(image_path: str, weights_path: str):

    config_logger(level='DEBUG', to_console=True)

    person_detector = PersonDetector(weights_path=weights_path,
                                     resize_height=192)

    images_paths = list_images(image_path)

    logger.info('Starting analysis...')
    logger.info('Press "space" key to display next result. Press "q" to quit.')

    max_image_size = 1920

    drawer = Drawer()
    drawer.font_scale = 0.5
    drawer.font_linewidth = 1

    for image_path in images_paths:
        image_name = path.basename(image_path)

        logger.info(f'Analyzing image {image_name}...')

        image = cv.imread(image_path)

        if image is None:
            logger.warn(f'Unable to open image file {image_path}')
            continue

        h, w, = image.shape[0:2]

        logger.info(f'Image loaded. Image size is {w}x{h} pixels.')

        if max(w, h) > max_image_size:
            image, scale = resize(image, max_image_size)
            h, w, = image.shape[0:2]
            logger.info(f'Image resized to {w}x{h} pixels.')

        tic = time()
        boxes, scores = person_detector.detect(image)
        toc = time()
        logger.info(f'Found {len(boxes)} persons in {(toc - tic):.3f} s.')

        for ind, box in enumerate(boxes):
            drawer.draw_labeled_box(image, f'{int(100*scores[ind])}%', box)

        cv.imshow(f'Faces in {image_name}', image)

        ret = cv.waitKey()
        if ret == ord(' '):
            cv.destroyAllWindows()
        elif ret == ord('q'):
            cv.destroyAllWindows()
            break
Example #20
0
def main():

    if IS_TRAINING:

        original_imgs_path = list_images('./trainset/')

        print('Begin to train the network ...')
        train_recons(original_imgs_path,
                     MODEL_SAVE_PATH,
                     model_pre_path,
                     EPOCHES,
                     BATCH_SIZE,
                     debug=True,
                     logging_period=10)
        print('Successfully! Done training...')
    else:
        model_path = MODEL_SAVE_PATH
        print('Begin to generate pictures ...')

        from os import listdir
        img_path = './IV_images/'
        images = sorted(listdir(img_path))
        # ir_images = images[::2]
        # vi_images = images[1::2]
        ir_images = []
        vi_images = []
        for img in images:
            if 'IR' in img:
                ir_images.append(img)
            else:
                vi_images.append(img)

        for ir, vi in zip(ir_images, vi_images):
            index = ir[2:4]

            infrared = img_path + ir
            visible = img_path + vi

            # choose fusion layer
            # fusion_type = 'CBF'
            fusion_type = 'wt'
            # fusion_type = 'addition'
            output_save_path = os.path.join('./outputs', fusion_type,
                                            str(nModel))

            generate(infrared,
                     visible,
                     model_path,
                     model_pre_path,
                     index,
                     type=fusion_type,
                     output_path=output_save_path)
Example #21
0
def main():

    if IS_TRAINING:

        content_targets = list_images('./MS_COCO') # path to training dataset

        for style in list(STYLES.keys()):

            print('\nBegin to train the network with the style "%s"...\n' % style)

            content_weight, style_weight, tv_weight = STYLES[style]

            style_target = 'images/style/' + style + '.jpg'
            model_save_path = 'models/' + style + '.ckpt-done'

            train(content_targets, style_target, content_weight, style_weight, tv_weight, 
                vgg_path=VGG_PATH, save_path=model_save_path, debug=True)

            print('\nSuccessfully! Done training style "%s"...\n' % style)

        print('Successfully finish all the training...\n')
    else:

        for style in list(STYLES.keys()):

            print('\nBegin to generate pictures with the style "%s"...\n' % style)

            model_path = 'models/' + style + '.ckpt-done'
            output_save_path = 'outputs'

            content_targets = list_images('images/content')
            generated_images = generate(content_targets, model_path, save_path=output_save_path, 
                prefix=style + '-')

            print('\ntype(generated_images):', type(generated_images))
            print('\nlen(generated_images):', len(generated_images), '\n')
Example #22
0
 def GET(self):
     files, labels = list_images(TEST_DATASET)
     files_and_labels = list(zip(files, labels))
     shuffle(files_and_labels)
     files_and_labels = files_and_labels[0:10]
     files, labels = zip(*files_and_labels)
     files = list(files)
     labels = np.array(list(labels))
     basenames = list(map(lambda f: os.path.basename(f), files))
     preds = predict(files)
     sess = tf.Session()
     losses = [
         sess.run(emd(preds[i], labels[i])) for i in range(len(preds))
     ]
     loss = sess.run(emd(preds, labels))
     return render.random(basenames, preds, labels, losses, loss)
Example #23
0
def run3():
    import cv2
    size = 96
    im_list = list(list_images('../data/gender/female'))
    X = np.zeros((len(im_list), size, size))
    print('Reading images...')
    for i in xrange(len(im_list)):
        im = cv2.imread(im_list[i])
        im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
        im = cv2.equalizeHist(im)
        im = cv2.resize(im, (size, size))
        X[i] = im.astype(np.float64) / 256
    print('X.shape is %s' % (X.shape, ))
    print('Fitting...')
    crbm = ConvolutionalRBM((size, size), 10, w_size=7, n_iter=3, verbose=True)
    crbm.fit(X)
    return crbm
Example #24
0
File: cdbn.py Project: dfdx/cdbn
def run3():
    import cv2
    size = 96
    im_list = list(list_images('../data/gender/female'))
    X = np.zeros((len(im_list), size, size))
    print('Reading images...')
    for i in xrange(len(im_list)):
        im = cv2.imread(im_list[i])
        im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
        im = cv2.equalizeHist(im)
        im = cv2.resize(im, (size, size))
        X[i] = im.astype(np.float64) / 256
    print('X.shape is %s' % (X.shape,))
    print('Fitting...')
    crbm = ConvolutionalRBM((size, size), 10, w_size=7, n_iter=3, verbose=True)
    crbm.fit(X)
    return crbm
Example #25
0
def main():

    if IS_TRAINING:

        original_imgs_path = list_images(
            'D:/ImageDatabase/Image_fusion_MSCOCO/original/')

        print('\nBegin to train the network ...\n')
        train_recons(original_imgs_path,
                     MODEL_SAVE_PATH,
                     model_pre_path,
                     EPOCHES,
                     BATCH_SIZE,
                     debug=True)

        print('\nSuccessfully! Done training...\n')
    else:

        output_save_path = 'outputs'
        # sourceA_name = 'image'
        # sourceB_name = 'image'
        sourceA_name = 'IR'
        sourceB_name = 'VIS'
        print('\nBegin to generate pictures ...\n')

        content_name = 'images/IV_images/' + sourceA_name
        style_name = 'images/IV_images/' + sourceB_name

        for i in range(1):
            index = i + 1
            content_path = content_name + str(index) + '.png'
            style_path = style_name + str(index) + '.png'

            # content_path = content_name + str(index) + '_left.png'
            # style_path = style_name + str(index) + '_right.png'
            generate(content_path,
                     style_path,
                     MODEL_SAVE_PATH,
                     model_pre_path,
                     index,
                     output_path=output_save_path)
Example #26
0
def stream_train_images(dir_path, true_rectangles_dict, window_size=(128, 128), window_step=32, visualize=False):
    # define the window width and height
    winW, winH = window_size

    for image_path in list_images(dir_path):
        if not is_image_file(image_path):
            continue

        # Read the image
        image = scipy.misc.imread(image_path)
        parent_dir_path, image_name = os.path.split(image_path)
        parent_dir_name = os.path.split(parent_dir_path)[-1]
        image_name_key = os.path.join(parent_dir_name, image_name)
        true_rectangles = true_rectangles_dict[image_name_key]

        if visualize:
            clone = image.copy()
            for rect in true_rectangles:
                cv2.rectangle(clone, (rect[0], rect[1]), (rect[2], rect[3]), RED, thickness=2)

        for (x, y, window) in sliding_window(image, step_size=window_step, window_size=(winW, winH)):
            # if the window does not meet our desired window size, ignore it
            if window.shape[0] != winH or window.shape[1] != winW:
                continue

            if visualize:
                copy = clone.copy()
                cv2.rectangle(copy, (x, y), (x + winW, y + winH), BLUE, thickness=2)
                cv2.imshow(image_path, copy)
                cv2.waitKey(1)

            if all(bb_intersection_over_union((x, y, x + winW, y + winH), rect) == 0 for rect in true_rectangles):
                if visualize:
                    cv2.rectangle(clone, (x, y), (x + winW, y + winH), GREEN, thickness=2)
                    cv2.imshow(image_path, clone)
                    cv2.waitKey(1)

                yield image_name, window

        if visualize:
            cv2.destroyAllWindows()
    def __init__(self,
                 images_path,
                 label_options=None,
                 preload_labels='labels.json'):

        if not label_options:
            label_options = [
                'front-side', 'front', 'side', 'back', 'back-side', 'top',
                'other'
            ]

        self.images_path = images_path
        self.label_options = label_options
        self.all_buttons = [
            widgets.Button(description=s) for s in label_options
        ]

        self.previous_button = widgets.Button(description='previous')
        self.next_button = widgets.Button(description='next')
        self.previous_button.on_click(self._go_to_previous)
        self.next_button.on_click(self._go_to_next_pic)

        for b in self.all_buttons:
            b.on_click(self._label_click)

        if preload_labels:
            with open(preload_labels, 'r') as labels:
                self.images_list_label = json.loads(labels.read())
        else:
            self.images_list_label = {
                i: 'noclass'
                for i in list(list_images(images_path))
            }

        self.images_list_label = OrderedDict(
            sorted(self.images_list_label.items()))
        self.slider = widgets.IntSlider(min=0,
                                        max=len(self.images_list_label) - 1)
        self.slider.value = self._get_next_first_unlabeled()
Example #28
0
def main():

    if IS_TRAINING:

        # original_imgs_path_name = 'D:/ImageDatabase/Image_fusion_MSCOCO/original/'
        # sourceA_imgs_path  = list_images('D:/ImageDatabase/Image_fusion_MSCOCO/source_a')
        # sourceB_imgs_path_name  = 'D:/ImageDatabase/Image_fusion_MSCOCO/source_b/'

        original_imgs_path = list_images(
            'D:/ImageDatabase/Image_fusion_MSCOCO/original/')

        print('\nBegin to train the network ...\n')

        # train(ssim_weight, original_imgs_path_name,sourceA_imgs_path, sourceB_imgs_path_name, ENCODER_WEIGHTS_PATH, model_save_path,model_pre_path, debug=True)
        train_recons(original_imgs_path,
                     ENCODER_WEIGHTS_PATH,
                     MODEL_SAVE_PATH,
                     model_pre_path,
                     debug=True)

        print('\nSuccessfully! Done training...\n')
    else:

        sourceA_name = 'visible'
        sourceB_name = 'infrared'
        print('\nBegin to generate pictures ...\n')

        content_path = 'images/IV/' + sourceA_name
        style_path = 'images/IV/' + sourceB_name

        output_save_path = 'outputs'

        generated_images = generate(content_path,
                                    style_path,
                                    ENCODER_WEIGHTS_PATH,
                                    MODEL_SAVE_PATH,
                                    model_pre_path,
                                    output_path=output_save_path)
Example #29
0
def train_recons(inputPath, validationPath, save_path, model_pre_path, EPOCHES_set, BATCH_SIZE, debug=False, logging_period=1):
    from datetime import datetime
    start_time = datetime.now()
    path = './models/performanceData/'
    fileName = 'TrainPerformanceData_'+str(start_time)+'.txt'
    fileName = fileName.replace(" ", "_")
    fileName = fileName.replace(":", "_")
    file = open(path+fileName, 'w')
    file.close()
    folders = list_folders(inputPath)
    valFolders = list_folders(validationPath)
    EPOCHS = EPOCHES_set
    print("EPOCHES   : ", EPOCHS)
    print("BATCH_SIZE: ", BATCH_SIZE)
    # get the traing image shape
    HEIGHT, WIDTH, CHANNELS = TRAINING_IMAGE_SHAPE
    INPUT_SHAPE = (BATCH_SIZE, HEIGHT, WIDTH, CHANNELS)

    HEIGHT_OR, WIDTH_OR, CHANNELS_OR = TRAINING_IMAGE_SHAPE_OR
    INPUT_SHAPE_OR = (BATCH_SIZE, HEIGHT_OR, WIDTH_OR, CHANNELS_OR)
    GROUNDTRUTH_SHAPE_OR = (1, HEIGHT_OR, WIDTH_OR, CHANNELS_OR)

    # create the graph
    with tf.Graph().as_default(), tf.Session() as sess:
        original = tf.placeholder(tf.float32, shape=INPUT_SHAPE_OR, name='original')
        groundtruth = tf.placeholder(tf.float32, shape=GROUNDTRUTH_SHAPE_OR, name='groundtruth')
        source = original

        print('source  :', source.shape)
        print('original:', original.shape)
        print('groundtruth:', groundtruth.shape)
        # create the deepfuse net (encoder and decoder)
        dfn = DenseFuseNet(model_pre_path)
        generated_img = dfn.transform_recons_train(source)
        print('generate:', generated_img.shape)
        pixel_loss = tf.reduce_sum(tf.square(groundtruth - generated_img))
        pixel_loss = tf.math.sqrt(pixel_loss / (BATCH_SIZE * HEIGHT * WIDTH))
        loss = pixel_loss
        train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)

        sess.run(tf.global_variables_initializer())

        # saver = tf.train.Saver()
        saver = tf.train.Saver(keep_checkpoint_every_n_hours=1)

        # ** Start Training **
        step = 0
        count_loss = 0
        numTrainSite = len(folders)
        numValSite = len(valFolders)

        for epoch in range(EPOCHS):
            save_path_epoc = './models_intermediate/'+str(epoch)+'.ckpt'
            start_time_epoc = datetime.now()
            for site in range(numTrainSite):
                start_time_site = datetime.now()
                file = open(path + fileName, 'a')
                groundtruth_imgs_path = list_images(inputPath+folders[site] + '/gt/')
                training_imgs_path = list_images(inputPath+folders[site])
                np.random.shuffle(training_imgs_path)
                gt = get_train_images(groundtruth_imgs_path, crop_height=HEIGHT, crop_width=WIDTH, flag=False)
                gtImgTrain = np.zeros(GROUNDTRUTH_SHAPE_OR)
                gtImgTrain[0] = gt
                n_batches = int(len(training_imgs_path) // BATCH_SIZE)
                for batch in range(n_batches):
                    original_path = training_imgs_path[batch * BATCH_SIZE:(batch * BATCH_SIZE + BATCH_SIZE)]
                    original_batch = get_train_images(original_path, crop_height=HEIGHT, crop_width=WIDTH, flag=False)
                    original_batch = original_batch.reshape([BATCH_SIZE, 256, 256, 1])
                    sess.run(train_op, feed_dict={original: original_batch, groundtruth: gtImgTrain})
                if debug:
                    for batch in range(n_batches):
                        original_path = training_imgs_path[batch * BATCH_SIZE:(batch * BATCH_SIZE + BATCH_SIZE)]
                        original_batch = get_train_images(original_path, crop_height=HEIGHT, crop_width=WIDTH, flag=False)
                        original_batch = original_batch.reshape([BATCH_SIZE, 256, 256, 1])

                        # print('original_batch shape final:', original_batch.shape)

                        # run the training step
                        _p_loss = sess.run(pixel_loss, feed_dict={original: original_batch, groundtruth: gtImgTrain})
                        # add text file to add  mode(validation/training), epoch#, site#, batch#, _p_loss) ------------------------------
                        file.write('Train[Epoch#: %d, Site#: %d, Batch#: %d, _p_loss: %d]\n' % (epoch, site, batch, _p_loss))
                        print('Train[Epoch#: %d, Site#: %d, Batch#: %d, _p_loss: %d]' % (epoch, site, batch, _p_loss))
                print('Time taken per site: %s' %(datetime.now() - start_time_site))
                file.close()
            for site in range(numValSite):
                file = open(path + fileName, 'a')
                start_time_validation = datetime.now()
                groundtruth_val_imgs_path = list_images(validationPath+valFolders[site] + '/gt/')
                validation_imgs_path = list_images(validationPath+valFolders[site])
                np.random.shuffle(validation_imgs_path)
                gt = get_train_images(groundtruth_val_imgs_path, crop_height=HEIGHT, crop_width=WIDTH, flag=False)
                gtImgVal = np.zeros(GROUNDTRUTH_SHAPE_OR)
                gtImgVal[0] = gt
                val_batches = int(len(validation_imgs_path) // BATCH_SIZE)
                val_pixel_acc = 0
                for batch in range(val_batches):
                    val_original_path = validation_imgs_path[batch * BATCH_SIZE:(batch * BATCH_SIZE + BATCH_SIZE)]
                    val_original_batch = get_train_images(val_original_path, crop_height=HEIGHT, crop_width=WIDTH, flag=False)
                    val_original_batch = val_original_batch.reshape([BATCH_SIZE, 256, 256, 1])

                    val_pixel = sess.run(pixel_loss, feed_dict={original: val_original_batch, groundtruth: gtImgVal})
                    file.write('Validation[Epoch#: %d, Site#: %d, Batch#: %d, _p_loss: %d]\n' % (epoch, site, batch, val_pixel))
                    val_pixel_acc = val_pixel_acc + val_pixel
                print('Time taken per validation site: %s' % (datetime.now() - start_time_validation))
                val_loss = val_pixel_acc/val_batches
                file.write('ValidationAcc[Epoch#: %d, Site#: %d, Batch#: %d, val_loss: %d]\n' % (epoch, site, batch, val_loss))
                print('ValidationAcc[Epoch#: %d, Site#: %d, Batch#: %d, _p_loss: %d]' % (epoch, site, batch, val_loss))
                file.close()
            print('------------------------------------------------------------------------------')
            print('Time taken per epoc: %s' % (datetime.now() - start_time_epoc))
            saver.save(sess, save_path_epoc)
        saver.save(sess, save_path)
        print('Done training!')
        print('Total Time taken (training): %s' % (datetime.now() - start_time))
        file.close()
Example #30
0
def main():

	if IS_TRAINING:
#-------------------------------------------------------------------------------------------------------
		original_imgs_path = list_images('/data/ljy/train_2w/')
		validatioin_imgs_path = list_images('/data/ljy/修改专用/imagefusion_densefuse-master/validation/validation/')
#---------------------------------------------------------------------------------------------------------
		for ssim_weight, model_save_path in zip(SSIM_WEIGHTS, MODEL_SAVE_PATHS):
			print('\nBegin to train the network ...\n')
			train_recons(original_imgs_path, validatioin_imgs_path, model_save_path, model_pre_path, ssim_weight, EPOCHES, BATCH_SIZE, debug=True)

			print('\nSuccessfully! Done training...\n')
	#====================================================================================================
	elif IS_TRAINING_A:
		original_imgs_path = list_images('/data/ljy/train_5000')
		validatioin_imgs_path = list_images('/data/ljy/修改专用/imagefusion_densefuse-master/validation/validation/')
		for ssim_weight_a, model_save_path_a in zip(SSIM_WEIGHTS_A, MODEL_SAVE_PATHS_A):
			print('\nBegin to train the attention network ...\n')
			train_recons_a(original_imgs_path, validatioin_imgs_path, model_save_path_a, model_pre_path_a, ssim_weight_a,EPOCHES, BATCH_SIZE,MODEL_SAVE_PATHS[0], debug=True)
			print('\nSuccessfully! Done training...\n')




	#================================================================================================
	else:
		if IS_VIDEO:
			ssim_weight = SSIM_WEIGHTS[0]
			model_path = MODEL_SAVE_PATHS[0]

			IR_path = list_images('video/1_IR/')
			VIS_path = list_images('video/1_VIS/')
			output_save_path = 'video/fused'+ str(ssim_weight) +'/'
			generate(IR_path, VIS_path, model_path, model_pre_path,
			         ssim_weight, 0, IS_VIDEO, 'addition', output_path=output_save_path)
		else:
			ssim_weight = SSIM_WEIGHTS[1]
			model_path = MODEL_SAVE_PATHS[1]
			model_path_a=MODEL_SAVE_PATHS_A[1]
			print('\nBegin to generate pictures ...\n')
			# path = 'images/IV_images/'
			path ='D:/DLUT/2020.01.06ICIP/Crop/IV_images/'
			for i in range(20):
				#if i < 10 :
				#	continue
				index = i + 1
				infrared = path + 'IR' + str(index) + '.png'
				visible = path + 'VIS' + str(index) + '.png'

				# RGB images
				#infrared = path + 'lytro-' + str(index) + '-A.jpg'
				#visible = path + 'lytro-' + str(index) + '-B.jpg'

				# choose fusion layer
				fusion_type = 'addition'
				# fusion_type = 'l1'
				# for ssim_weight, model_path in zip(SSIM_WEIGHTS, MODEL_SAVE_PATHS):
				# 	output_save_path = 'outputs'
                #
				# 	generate(infrared, visible, model_path, model_pre_path,
				# 	         ssim_weight, index, IS_VIDEO, is_RGB, type = fusion_type, output_path = output_save_path)

				output_save_path = 'CCSSSvi8264'
				generate(infrared, visible, model_path, model_pre_path,model_path_a,model_pre_path_a,
						 ssim_weight, index, IS_VIDEO, is_RGB, type = fusion_type, output_path = output_save_path)
Example #31
0
def _main_(args):
    config_path = args.conf
    weights_path = args.weights
    image_path = args.input

    keras.backend.tensorflow_backend.set_session(get_session())

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    if weights_path == '':
        weights_path = config['train']['pretrained_weights"']

    ###############################
    #   Make the model
    ###############################

    input_size = (config['model']['input_size_h'],
                  config['model']['input_size_w'])

    yolo = YOLO(backend=config['model']['backend'],
                input_size=(config['model']['input_size_h'],
                            config['model']['input_size_w']),
                labels=config['model']['labels'],
                max_box_per_image=config['model']['max_box_per_image'],
                anchors=config['model']['anchors'],
                gray_mode=config['model']['gray_mode'])

    if config['model']['gray_mode']:
        depth = 1
    else:
        depth = 3

    yolo.load_weights(weights_path)

    if image_path[-4:] == '.mp4':
        video_out = image_path[:-4] + '_detected' + image_path[-4:]
        #cap = FileVideoStream(image_path).start()
        cap = cv2.VideoCapture(image_path)
        time.sleep(1.0)
        #        fps = FPS().start()
        fps_img = 0.0
        counter = 0
        while True:
            start = time.time()
            ret, image = cap.read()

            if depth == 1:
                # convert video to gray
                image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                image = cv2.resize(image,
                                   input_size,
                                   interpolation=cv2.INTER_CUBIC)
                image = np.expand_dims(image, 2)
                #image = np.array(image, dtype='f')
            else:
                if counter == 1:
                    print("Color image")
                image = cv2.resize(image,
                                   input_size,
                                   interpolation=cv2.INTER_CUBIC)
                #image = np.array(image, dtype='f')

            #image = np.divide(image, 255.)
            tm_inf = time.time()
            boxes = yolo.predict(image)
            fps_img = (fps_img + (1 / (time.time() - start))) / 2

            print("Inference time: {:.4f}".format(time.time() - tm_inf))
            image = draw_boxes(image, boxes, config['model']['labels'])
            image = cv2.putText(image, "fps: {:.2f}".format(fps_img), (0, 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1, 4)
            cv2.imshow("Press q to quit", image)
            #            fps.update()

            #if counter == 10:
            #print(image.sum(), boxes)
            #    time.sleep(1)
            counter += 1

            if cv2.getWindowProperty("Press q to quit",
                                     cv2.WND_PROP_ASPECT_RATIO) < 0.0:
                print("Window closed")
                break
            elif cv2.waitKey(1) & 0xFF == ord('q'):
                print("Q pressed")
                break


#        fps.stop()
#        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
#        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
        cap.release()

    else:
        images = list(list_images(image_path))
        for fname in images[100:]:
            image = cv2.imread(fname)
            tm_inf = time.time()
            boxes = yolo.predict(image)
            print("Inference time: {:.4f}".format(time.time() - tm_inf))
            image = draw_boxes(image, boxes, config['model']['labels'])
            cv2.imshow("Press q to quit", image)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            time.sleep(2)
    cv2.destroyAllWindows()
HEIGHT=256
WIDTH=256
CHANNEL=3
# batch_size, height, weight, channel_number
INPUT_SHAPE = (BATCH_SIZE,HEIGHT, WIDTH, CHANNEL)
CONTENT_DATA_PATH = '/root/even/dataset/COCO_train_2014/'
STYLE_DATA_PATH = '/root/even/dataset/wiki_all_images/'


if __name__ == '__main__':

    start_time = datetime.now()

    # Get the path of all valid images
    print('Preprocessing training images \n')
    content_images = utils.list_images(CONTENT_DATA_PATH)
    style_images = utils.list_images(STYLE_DATA_PATH)
    num_imgs = min(len(content_images), len(style_images))
    content_images = content_images[:num_imgs]
    style_images = style_images[:num_imgs]
    mod = num_imgs % BATCH_SIZE
    print('Preprocessing finish, %d images in total \n' % (num_imgs-mod))
    if mod > 0:
        print('Train set has been trimmed %d samples...\n' % mod)
        content_images = content_images[:-mod]
        style_images = style_images[:-mod]

    with tf.Graph().as_default(), tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:

        encoder = Encoder(VGG_PATH)
        decoder = Decoder()