Exemplo n.º 1
0
def main(_):
    """3.print configurations"""
    print('tf version:', tf.__version__)
    print('tf setup:')
    for k, v in FLAGS.flag_values_dict().items():
        print(k, v)
    FLAGS.TB_dir += '_' + str(FLAGS.c_dim)
    """4.check/create folders"""
    print("check dirs...")
    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.TB_dir):
        os.makedirs(FLAGS.TB_dir)
    """5.begin tf session"""
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        print("building model...")
        """6.init srcnn model"""
        srcnn = SRCNN(sess, FLAGS)
        """7.start to train/test"""
        if (FLAGS.is_train):
            srcnn.train()
        elif FLAGS.patch_test:
            srcnn.test()
        else:
            srcnn.test_whole_img()
Exemplo n.º 2
0
def main(_):
    t0 = time.time()

    pp.pprint(FLAGS.build_model)

    if not FLAGS.build_model:
        FLAGS.test_img = validate(FLAGS.test_img)
        print("Image path = ", FLAGS.test_img)
        if not os.path.isfile(FLAGS.test_img):
            print("File does not exist ", FLAGS.test_img)
            sys.exit()

    create_required_directories(FLAGS)

    with tf.compat.v1.Session() as sess:
        srcnn = SRCNN(sess,
                      image_size=FLAGS.image_size,
                      label_size=FLAGS.label_size,
                      batch_size=FLAGS.batch_size,
                      c_dim=FLAGS.c_dim,
                      checkpoint_dir=FLAGS.checkpoint_dir,
                      sample_dir=FLAGS.sample_dir)

        if FLAGS.build_model:
            srcnn.train(FLAGS)
        else:
            srcnn.test(FLAGS)
    print("\n\nTime taken %4.2f\n\n" % (time.time() - t0))
Exemplo n.º 3
0
def main():

    dataloaders = myDataloader()
    train_loader = dataloaders.getTrainLoader(batch_size)

    model = SRCNN().cuda()
    model.train()

    optimizer = optim.Adam(model.parameters(), lr=lr)
    mse_loss = nn.MSELoss()

    for ep in range(epoch):
        running_loss = 0.0
        for i, (pic, blurPic, _) in enumerate(train_loader):
            pic = pic.cuda()
            blurPic = blurPic.cuda()
            optimizer.zero_grad()
            out = model(blurPic)
            loss = mse_loss(out, pic)
            loss.backward()
            optimizer.step()

            running_loss += loss
            if i % 10 == 9:
                print('[%d %d] loss: %.4f' %
                      (ep + 1, i + 1, running_loss / 20))
                running_loss = 0.0
        if ep % 10 == 9:
            torch.save(model.state_dict(),
                       f="./result/train/" + str(ep + 1) + "srcnnParms.pth")
    print("finish training")
Exemplo n.º 4
0
def main(_): #?
    with tf.Session() as sess:
        srcnn = SRCNN(sess,
                      image_size = FLAGS.image_size,
                      label_size = FLAGS.label_size,
                      c_dim = FLAGS.c_dim)

        srcnn.train(FLAGS)
Exemplo n.º 5
0
def main(_):
    with tf.Session() as sess:
        srcnn = SRCNN(sess,
                      image_dim=FLAGS.image_dim,
                      label_dim=FLAGS.label_dim,
                      channel=FLAGS.channel)

        srcnn.train(FLAGS)
Exemplo n.º 6
0
def main(args):
    srcnn = SRCNN(
        image_size=args.image_size,
        c_dim=args.c_dim,
        is_training=True,
        learning_rate=args.learning_rate,
        batch_size=args.batch_size,
        epochs=args.epochs)
    X_train, Y_train = load_train(image_size=args.image_size, stride=args.stride, scale=args.scale)
    srcnn.train(X_train, Y_train)
Exemplo n.º 7
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)
    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)
    if not os.path.exists(FLAGS.log_dir):
        os.makedirs(FLAGS.log_dir)
    with tf.Session() as sess:
        srcnn = SRCNN(sess, FLAGS)
        srcnn.train()
Exemplo n.º 8
0
def main(_): #?
    with tf.Session() as sess:
        
        #print("Calling init")

        srcnn = SRCNN(sess,
                      image_size = FLAGS.image_size,
                      label_size = FLAGS.label_size,
                      c_dim = FLAGS.c_dim)

        #print("Calling train")
        srcnn.train(FLAGS)
Exemplo n.º 9
0
def main(_):
    """3.print configurations"""
    print('tf version:',tf.__version__)
    print('tf setup:')
    #os.makedirs(FLAGS.checkpoint_dir)
    """5.begin tf session"""
    with tf.Session() as sess:
        """6.init srcnn model"""
        srcnn = SRCNN(sess, FLAGS)
        """7.start to train/test"""
        if(FLAGS.is_train):
            srcnn.train()
        else:
            srcnn.test()
Exemplo n.º 10
0
def main(_):
    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)
    with tf.Session() as sess:
        srcnn = SRCNN(sess,
                      image_size=FLAGS.image_size,
                      label_size=FLAGS.label_size,
                      batch_size=FLAGS.batch_size,
                      c_dim=FLAGS.c_dim,
                      checkpoint_dir=FLAGS.checkpoint_dir,
                      sample_dir=FLAGS.sample_dir)
        srcnn.train(FLAGS)
Exemplo n.º 11
0
def main():

    if not os.path.exists(Config.checkpoint_dir):
        os.makedirs(Config.checkpoint_dir)

    with tf.Session() as sess:
        trysr = SRCNN(sess,
                      image_size=Config.image_size,
                      label_size=Config.label_size,
                      batch_size=Config.batch_size,
                      c_dim=Config.c_dim,
                      checkpoint_dir=Config.checkpoint_dir,
                      scale=Config.scale)

        trysr.train(Config)
Exemplo n.º 12
0
def train():
    print("process the image to h5file.....")
    data_dir = flags.data_dir
    h5_dir = flags.h5_dir
    stride = flags.train_stride
    data_helper.gen_input_image(data_dir, h5_dir, stride)

    print("reading data......")
    h5_path = os.path.join(h5_dir, "data.h5")
    data, label = data_helper.load_data(h5_path)

    print("initialize the model......")
    model = SRCNN(flags)
    model.build_graph()
    model.train(data, label)
Exemplo n.º 13
0
def main(_):
    """3.print configurations"""
    print('tf version:', tf.__version__)
    print('tf setup:')
    for k, v in FLAGS.flag_values_dict().items():
        print(k, v)
    """4.check/create folders"""
    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    """5.begin tf session"""
    with tf.Session() as sess:
        """6.init srcnn model"""
        srcnn = SRCNN(sess, FLAGS)
        """7.start to train/test"""
        if (FLAGS.is_train):
            srcnn.train()
        else:
            srcnn.test()
Exemplo n.º 14
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    with tf.Session() as sess:
        srcnn = SRCNN(sess,
                      image_size=FLAGS.image_size,
                      label_size=FLAGS.label_size,
                      batch_size=FLAGS.batch_size,
                      is_grayscale=FLAGS.is_grayscale,
                      checkpoint_dir=FLAGS.checkpoint_dir,
                      sample_dir=FLAGS.sample_dir)

        srcnn.train(FLAGS)
Exemplo n.º 15
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    with tf.Session() as sess:
        srcnn = SRCNN(sess,
                      image_size=FLAGS.image_size,
                      label_size=FLAGS.label_size,
                      batch_size=FLAGS.batch_size,
                      c_dim=FLAGS.c_dim,
                      checkpoint_dir=FLAGS.checkpoint_dir,
                      sample_dir=FLAGS.sample_dir)

        #srcnn.test('Test/Set5/baby_GT.bmp', FLAGS)
        srcnn.train(FLAGS)
Exemplo n.º 16
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        srcnn = SRCNN(sess,
                      image_size=FLAGS.image_size,
                      label_size=FLAGS.label_size,
                      batch_size=FLAGS.batch_size,
                      c_dim=FLAGS.c_dim,
                      checkpoint_dir=FLAGS.checkpoint_dir,
                      sample_dir=FLAGS.sample_dir)

        srcnn.train(FLAGS)
Exemplo n.º 17
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    srcnn = SRCNN(image_size=FLAGS.image_size,
                  label_size=FLAGS.label_size,
                  batch_size=FLAGS.batch_size,
                  c_dim=FLAGS.c_dim,
                  checkpoint_dir=FLAGS.checkpoint_dir,
                  sample_dir=FLAGS.sample_dir,
                  FLAGS=FLAGS)
    srcnn.call()
    if FLAGS.is_train == True:
        srcnn.train()
    else:
        srcnn.inference()
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    config = tf.ConfigProto(allow_soft_placement=True)
    with tf.device('/gpu:0'):
        with tf.Session(config=config) as sess:
            srcnn = SRCNN(sess,
                          image_size=FLAGS.image_size,
                          label_size=FLAGS.label_size,
                          batch_size=FLAGS.batch_size,
                          c_dim=FLAGS.c_dim,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir,
                          config=FLAGS)
            srcnn.train(FLAGS)
Exemplo n.º 19
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)
    config = tf.ConfigProto(allow_soft_placement=True)

    with tf.device('/device:GPU:0'):
        with tf.Session(config=config) as sess:
            srcnn = SRCNN(sess,
                          image_size=FLAGS.image_size,
                          label_size=FLAGS.label_size,
                          batch_size=FLAGS.batch_size,
                          ci_dim=FLAGS.ci_dim,
                          co_dim=FLAGS.co_dim,
                          scale_factor=FLAGS.scale_factor,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir,
                          model_carac=FLAGS.model_carac,
                          train_dir=FLAGS.train_dir,
                          test_dir=FLAGS.test_dir)
            srcnn.train(FLAGS)
Exemplo n.º 20
0
    train_dataloader = DataLoader(dataset=train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.num_workers,
                                  pin_memory=True,
                                  drop_last=True)

    eval_dataset = EvalDataset(args.eval_file)
    eval_dataloader = DataLoader(dataset=eval_dataset, batch_size=1)

    best_weights = copy.deepcopy(model.state_dict())
    best_epoch = 0
    best_psnr = 0.0

    for epoch in range(args.num_epochs):
        model.train()
        epoch_losses = AverageMeter()

        with tqdm(total=(len(train_dataset) -
                         len(train_dataset) % args.batch_size)) as t:
            t.set_description('epoch:{}/{}'.format(epoch, args.num_epochs - 1))

            for data in train_dataloader:
                inputs, labels = data

                inputs = inputs.to(device)
                labels = labels.to(device)

                preds = model(inputs)
                loss = criterion(preds, labels)
Exemplo n.º 21
0
        self.learning_rate = 1e-4
        self.batch_size = 128
        self.result_dir = 'result'
        self.test_img = ''  # Do not change this.


arg = this_config()
print(
    "Hello TA!  We are group 7. Thank you for your work for us. Hope you have a happy day!"
)

with tf.Session() as sess:
    FLAGS = arg
    srcnn = SRCNN(sess,
                  image_size=FLAGS.image_size,
                  label_size=FLAGS.label_size,
                  c_dim=FLAGS.c_dim)
    srcnn.train(FLAGS)

    # Testing
    files = glob.glob(os.path.join(os.getcwd(), 'train_set', 'LR0', '*.jpg'))
    test_files = random.sample(files, len(files) // 5)

    FLAGS.is_train = False
    count = 1
    for f in test_files:
        FLAGS.test_img = f
        print('Saving ', count, '/', len(test_files), ': ', FLAGS.test_img,
              '\n')
        count += 1
        srcnn.test(FLAGS)