Exemple #1
0
def main():
    # capture the config path from the run arguments
    # then process the json configration file
    try:
        args = get_args()
        config = process_config(args.config)

    except Exception as e:
        print(e)
        print('Missing or invalid arguments')
        exit(0)

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    sess = tf.Session()
    # create instance of the model you want
    model = Model(config)
    # load model if exist
    model.load(sess)
    # create your data generator
    data = IVUSDataGenerator(config)
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and path all previous components to it
    trainer = SigmoidTrainer(sess, model, data, config, logger)

    # train the model
    trainer.train()
def main():
    # get json configuration filepath from the run argument
    # process the json configuration file
    args = get_args()
    config = process_config(args.config)

    # create the experiment directories
    log_dir, checkpoint_dir = create_dirs(config)

    print('Create the data generator')
    data_loader = DataLoader(config)

    print('Create the model')
    model = CycleGANAttrModel(config, config['weights_path'])
    model.build_model()
    print('model ready loading data now')

    print('Create the trainer')
    trainer = CycleGANModelTrainer(model, data_loader.get_trainA_data(),
                                   data_loader.get_trainB_data(),
                                   data_loader.get_testA_data(),
                                   data_loader.get_testB_data(), config,
                                   log_dir, checkpoint_dir)

    # print('Start training the model.')
    trainer.train()
def main():
    # get json configuration filepath from the run argument
    # process the json configuration file
    try:
        args = get_args()
        config, log_dir, checkpoint_dir = process_config(args.config)
    except:
        print('missing or invalid arguments')
        print('Unexpected error:', sys.exc_info()[0])

    # create the experiment directories
    create_dirs([log_dir, checkpoint_dir])

    print('Create the data generator')
    data_loader = WikiArtDataLoader(config)

    print('Create the model')
    model = ResNet50AttrModel(config)
    print('model ready loading data now')

    print('Create the trainer')
    trainer = ResNet50ModelTrainer(model.model, data_loader.get_train_data(), data_loader.get_val_data(), config, log_dir, checkpoint_dir)

    print('Start training the model.')
    trainer.train()
def main():
    try:
        args = get_args()
        config = process_config(args.config)

        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True
        sess = tf.Session(config=tf_config)
        ktf.set_session(sess)

        create_dirs([config.callbacks.tensorboard_log_dir,
                     config.callbacks.checkpoint_dir,
                     config.path.chache_path])
        print("Create the data generator.")
        data_loader = SuperResolutionDataLoader(config)
        print("Create the model.")
        model = SuperResolutionModel(config)
        print("Create the trainer.")
        trainer = SuperResolutionTrainer(model.model,
                                         data_loader.generate_train_data(),
                                         config)
        print("Start training...!")
        trainer.train()

    except Exception as err:
        print("missing or invalid arguments: {0}".format(err))
        exit(0)
Exemple #5
0
def main():
    start_time = time.time()
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        configurations = process_config(args.config)
    except:
        print("unable to create configurations ")
        exit(0)

    scores = []
    for config in configurations:
        experiment_result = experiment(config)
        scores.append(experiment_result)
    # results is a list of future objects
    results = compss_wait_on(scores)

    end_time = time.time()
    elapsed_time = end_time - start_time

    with open('./results/cifar_times.txt', 'a') as the_file:
        the_file.write("Computing Units = " + str(os.environ['VAR']) + "\n")
        the_file.write("Number of Nodes = " + str(args.nodes) + "\n")
        the_file.write("Elapsed time = " + str(elapsed_time) + "\n")
        the_file.write("\n")
        the_file.write("\n")

    with open('./results/cifar_scores.txt', 'a') as the_file:
        the_file.write(str(results) + "\n")
Exemple #6
0
def init() -> None:
    """
    The main function of the project used to initialise all the required classes
    used when training the model
    """
    # get input arguments
    args = get_args()
    # get static config information
    config = process_config()
    # combine both into dictionary
    config = {**config, **args}

    # initialise model
    model = RawModel(config)
    # create your data generators for each mode
    train_data = TFRecordDataLoader(config, mode="train")

    val_data = TFRecordDataLoader(config, mode="val")

    test_data = TFRecordDataLoader(config, mode="test")

    # initialise the estimator
    trainer = RawTrainer(config, model, train_data, val_data, test_data)

    # start training
    trainer.run()
Exemple #7
0
def main():
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("Missing Arguments")
        exit(0)
    sess = tf.Session()
    data = DataLoader(sess, "./data/celebA/images/", "./data/celebA/list_attr_celeba.txt", 178, 128, 16, "train")
    stargan = StarGAN(sess, config, data)
    stargan.train()
Exemple #8
0
def main():
    try:
        FLAG = process_config()
    except:
        print("missing or invalid arguments")
        exit(0)

    if FLAG.GPU_options:
        session_config = tf.ConfigProto()
        session_config.gpu_options.per_process_gpu_memory_fraction = 0.9
        session_config.gpu_options.allow_growth = True
        sess = tf.Session(config=session_config)
    else:
        sess = tf.Session()

    model = yolov3(FLAG)
    model.build()
    model.init_saver()
    model.load(sess)

    image_test = Image.open('images/timg.jpg')
    resized_image = image_test.resize(size=(416, 416))
    image_data = np.array(resized_image, dtype='float32') / 255.0
    img_hw = tf.placeholder(dtype=tf.float32, shape=[2])
    boxes, scores, classes = model.pedict(img_hw,
                                          iou_threshold=0.5,
                                          score_threshold=0.5)

    begin_time = time.time()
    boxes_, scores_, classes_, conv0 = sess.run(
        [boxes, scores, classes, model.feature_extractor.conv0],
        feed_dict={
            img_hw: [image_test.size[1], image_test.size[0]],
            model.x: [image_data]
        })
    end_time = time.time()
    print(end_time - begin_time)
    #    print conv0

    image_draw = draw_boxes(np.array(image_test, dtype=np.float32) / 255,
                            boxes_,
                            classes_,
                            FLAG.names,
                            scores=scores_)
    fig = plt.figure(frameon=False)
    ax = plt.Axes(fig, [0, 0, 1, 1])
    ax.set_axis_off()
    fig.add_axes(ax)
    plt.imshow(image_draw)
    fig.savefig('prediction.jpg')
    plt.show()
    sess.close()
def main():
    # get json configuration filepath from the run argument
    # process the json configuration file
    args = get_args()
    config = process_config(args.config)

    print('Create the data generator')
    data_loader = DataLoader(config)

    if config['all_weights_in_folder']:
        weights = np.array(glob(os.path.dirname(os.path.abspath(config['weights_path'])) + '/*.hdf5'))
    else:
        weights = np.array([config['weights_path']])  

    for weight in weights:
        weightnum = int(os.path.basename(weight).split('-')[-1:][0][:-5])
        print('Create the model for weight #%s' % (weightnum))
        model = CycleGANAttrModel(config, weight, is_train=False)
        predict_set = config['predict_set']  # either a, b, both
        model.build_predict_model(predict_set)
        print('model ready loading data now')

        os.makedirs('images/%s' % config['dataset_name'], exist_ok=True)

        if predict_set=='both' or predict_set=='a':
            testA_datagen = DataGenerator(img_filenames=data_loader.get_testA_data(), batch_size=1, target_size=(config['predict_img_height'], config['predict_img_width']))
            testA_generator = iter(testA_datagen)

            num_images = len(testA_datagen)
            for i in range(num_images):
                imgs_A = next(testA_generator)
                fake_B = model.predict_g_AB.predict(imgs_A)
                imageio.imwrite("images/%s/%i_a_transl_%i.png" % (config['dataset_name'], weightnum, i), ((fake_B[0]+1)*127.5).astype(np.uint8))

                if predict_set=='both':
                    reconstr_A = model.predict_g_BA.predict(fake_B)
                    imageio.imwrite("images/%s/%i_a_recon_%i.png" % (config['dataset_name'], weightnum, i), ((reconstr_A[0]+1)*127.5).astype(np.uint8))

        if predict_set=='both' or predict_set=='b':
            testB_datagen = DataGenerator(img_filenames=data_loader.get_testB_data(), batch_size=1, target_size=(config['predict_img_height'], config['predict_img_width']))
            testB_generator = iter(testB_datagen)

            num_images = len(testB_datagen)
            for i in range(num_images):
                imgs_B = next(testB_generator)    
                fake_A = model.predict_g_BA.predict(imgs_B)
                imageio.imwrite("images/%s/%i_b_transl_%i.png" % (config['dataset_name'], weightnum, i), ((fake_A[0]+1)*127.5).astype(np.uint8))

                if predict_set=='both':
                    reconstr_B = model.predict_g_AB.predict(fake_A)
                    imageio.imwrite("images/%s/%i_b_recon_%i.png" % (config['dataset_name'], weightnum, i), ((reconstr_B[0]+1)*127.5).astype(np.uint8))
def infer():
    # get json configuration filepath from the run argument
    # process the json configuration file
    try:
        config = 'input_params_for_inference.json'
        config, _, _ = process_config(config)
    except:
        print('missing or invalid arguments')
        print('Unexpected error:', sys.exc_info()[0])

    print('Create the data generator')
    data_loader = WikiArtDataLoader(config)

    print('Create the model')
    model = ResNet50AttrModel(config)
    print('model ready loading data now')

    print('Create the trainer')
    trainer = ResNet50ModelTrainer(model.model, data_loader.get_train_data(), data_loader.get_val_data(), config, '', '')

    print('Infer.')
    trainer.predict()
            num_params = np.prod(shape)

            var_weights = weights[ptr:ptr + num_params].reshape(
                (shape[3], shape[2], shape[0], shape[1]))
            # remember to transpose to column-major
            var_weights = np.transpose(var_weights, (2, 3, 1, 0))
            ptr += num_params
            assign_ops.append(tf.assign(var1, var_weights,
                                        validate_shape=True))
            i += 1
    return assign_ops


if __name__ == '__main__':
    try:
        FLAG = process_config()
    except:
        print("missing or invalid arguments")
        exit(0)

    if FLAG.npz:
        if os.path.exists(FLAG.npz_path):
            print "darknet53.conv.74.npz already exists"
        else:
            print FLAG.config_path
            load_weights_for_finetune(FLAG)

    elif FLAG.ckpt:
        detections = yolov3(FLAG)
        #        with tf.variable_scope('detector'):
        detections.build()
Exemple #12
0
import os

import matplotlib.pyplot as plt

from config import Config
# from model.ACM_SinGAN import SinGAN_ACM as SinGAN
from model.SinGAN import SinGAN
from utils.image import read_img, torch2np
from utils.utils import process_config, adjust_scales, calcul_sr_scale

if __name__ == '__main__':
    process_config(Config)
    Config.infer_dir = f'{Config.exp_dir}/infer_pyramid' if Config.save_all_pyramid else f'{Config.exp_dir}/infer'
    singan = SinGAN(config=Config)
    singan.load_trained_weights()

    inference_img = read_img(Config)
    os.makedirs(Config.infer_dir, exist_ok=True)
    plt.imsave(f'{Config.exp_dir}/real.png',
               torch2np(inference_img),
               vmin=0,
               vmax=1)

    if Config.mode == 'train':
        adjust_scales(inference_img, Config)
        start_img_input = singan.create_inference_input()
    elif Config.mode == 'train_SR':
        in_scale, iter_num = calcul_sr_scale(Config)
        Config.scale_factor = 1 / in_scale
        Config.scale_factor_init = 1 / in_scale
        Config.scale_w = 1