Exemple #1
0
def soft_train(network, args):
    device = torch.device("cuda" if args.gpu_flag is True else "cpu")
    optimizer, scheduler = get_optimizer(network, args)

    train_data_set = get_data_set(args, train_flag=True)
    test_data_set = get_data_set(args, train_flag=False)
    train_data_loader = torch.utils.data.DataLoader(train_data_set,
                                                    batch_size=args.batch_size,
                                                    shuffle=True)
    test_data_loader = torch.utils.data.DataLoader(test_data_set,
                                                   batch_size=args.batch_size,
                                                   shuffle=False)

    print("-*-" * 10 + "\n\t\tTrain network\n" + "-*-" * 10)
    for epoch in range(0, args.epoch):
        network = network.cpu()
        if args.network is "vgg":
            network = soft_prune_vgg_step(network, args.prune_rate[0])
        elif args.network == 'resnet':
            network = soft_prune_resnet_step(network, args.prune_rate)
        network = network.to(device)
        train_step(network, train_data_loader, test_data_loader, optimizer,
                   device, epoch)
        if scheduler is not None:
            scheduler.step()

    return network
Exemple #2
0
def train_network(network, args):
    if network is None:
        if args.network == 'vgg':
            network = MyVGG()
        elif args.network == 'resnet':
            network = resnet32()

    device = torch.device("cuda" if args.gpu_flag is True else "cpu")
    network = network.to(device)
    optimizer, scheduler = get_optimizer(network, args)

    train_data_set = get_data_set(args, train_flag=True)
    test_data_set = get_data_set(args, train_flag=False)
    train_data_loader = torch.utils.data.DataLoader(train_data_set,
                                                    batch_size=args.batch_size,
                                                    shuffle=True)
    test_data_loader = torch.utils.data.DataLoader(test_data_set,
                                                   batch_size=args.batch_size,
                                                   shuffle=False)

    print("-*-" * 10 + "\n\t\tTrain network\n" + "-*-" * 10)
    for epoch in range(0, args.epoch):
        train_step(network, train_data_loader, test_data_loader, optimizer,
                   device, epoch)
        if scheduler is not None:
            scheduler.step()

    return network
Exemple #3
0
def train_network(network, args):
    if network is None:
        return

    device = torch.device("cuda" if args.gpu_flag is True else "cpu")
    network = network.to(device)
    optimizer, scheduler = get_optimizer(network, args)

    train_data_set = get_data_set(args, train_flag=True)
    test_data_set = get_data_set(args, train_flag=False)
    train_data_loader = torch.utils.data.DataLoader(train_data_set,
                                                    batch_size=args.batch_size,
                                                    shuffle=True)
    test_data_loader = torch.utils.data.DataLoader(test_data_set,
                                                   batch_size=args.batch_size,
                                                   shuffle=False)

    print("-*-" * 10 + "\n\t\tTrain network\n" + "-*-" * 10)
    for epoch in range(0, args.epoch):
        if args.pruned and args.alpha < 1:
            network = network.cpu()
            network = soft_prune_step(network, 1 - args.alpha)
            network = network.to(device)
        train_step(network, train_data_loader, test_data_loader, optimizer,
                   device, epoch)
        if scheduler is not None:
            scheduler.step()

    return network
def test_network(args, network=None, data_set=None):
    device = torch.device("cuda" if args.gpu_no >= 0 else "cpu")

    if network is None:
        if 'vgg' in args.network:
            network = VGG(args.network, args.data_set)
        # elif args.network == 'resnet56':
        #     network = resnet56()
        # elif args.network == 'resnet110':
        #     network = resnet110()

        if args.load_path:
            check_point = torch.load(args.load_path)
            network.load_state_dict(check_point['state_dict'])

    network.to(device)

    if data_set is None:
        data_set = get_data_set(args, train_flag=False)
    data_loader = torch.utils.data.DataLoader(data_set,
                                              batch_size=100,
                                              shuffle=False)

    top1, top5 = test_step(network, data_loader, device)

    return network, data_set, (top1, top5)
def create_dp(params):
    if params['dp_name'] == 'session_data_provider':
        data_provider = SessDataProvider()
    dp_file_path = os.path.join(params['data_folder'],
                                params['dp_name'] + '.pkl')
    if os.path.exists(dp_file_path):
        data_provider.load(dp_file_path)
        print 'load data provider from %s' % dp_file_path
    else:
        data_file_path = os.path.join(params['data_folder'],
                                      'raw_data_set.pkl')
        data_set = get_data_set(params)
        data_provider.build(data_set)
        data_provider.save(dp_file_path)
        print 'build data provider and save to %s' % dp_file_path

    for p in data_provider.data_set_params.keys():
        params[p] = data_provider.data_set_params[p]
    params['cuid_unknow_idx'] = data_provider.cuid_voca['cuid_to_ix']['UNKNOW']
    params['city_unknow_idx'] = data_provider.city_voca['city_to_ix']['UNKNOW']
    params['query_unknow_idx'] = data_provider.query_voca['query_to_ix'][
        'UNKNOW']
    params['query_start_idx'] = data_provider.query_voca['query_to_ix'][
        '<START>']
    params['num_query'] = len(data_provider.query_voca['ix_to_query'])
    print 'data provider created!'

    return data_provider, params
Exemple #6
0
def main(input):
    teta0, teta1 = get_minimize_cost_tetas()
    data_x, data_y = get_data_set()
    xmin = min(data_x)
    xmax = max(data_x)
    ymin = min(data_y)
    ymax = max(data_y)
    normalized_input = (float(input) - min(data_x)) / (max(data_x) -
                                                       min(data_x))
    normalized_output = float(teta1) * float(normalized_input) + float(teta0)
    denormalized_output = normalized_output * (float(ymax) - float(ymin)) + (
        float)(ymin)
    return denormalized_output
Exemple #7
0
def supervised_testing():
	test_x, test_y = get_data_set("test")
	num_test = test_x.shape[0]

	test_data = tf.data.Dataset.from_tensor_slices((test_x, test_y))
	test_data = test_data.map(test_parse, num_parallel_calls=8)
	test_data = test_data.batch(1)
	test_iter = test_data.make_initializable_iterator()
	x_test, y_test = test_iter.get_next()

	X = tf.placeholder(tf.float32, [None, height, width, 3], name='Input')
	Y = tf.placeholder(tf.int32, [None, num_classes], name='Label')
	drop_rate = tf.placeholder(tf.float32)
	
	logits = TESnet(X, "TESnet", drop_rate, reuse=False)
	pred = tf.nn.softmax(logits)

	saver = tf.train.Saver()

	# Evaluate Model
	correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(Y,1))
	accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

	with tf.Session() as sess:
		# Initialize variables
		sess.run(tf.global_variables_initializer())

		# Restore weights of model
		saver.restore(sess, sv_model_dir)

		log = "\n========== Supervised Testing Begin ==========\n"
		write_logs(logs_sv, log, False)
		test_start = time.time()
		avg_acc = 0
		sess.run(test_iter.initializer)
		for i in range(num_test):
			batch_start = time.time()

			bx, by = sess.run([x_test, y_test])
			acc = sess.run(accuracy, feed_dict={X:bx, Y:by, drop_rate:0.0})
			avg_acc += acc

			log = "Time {:2.5f}, Image {:05d}, Testing Accuracy = {:0.4f}".format(time.time()-batch_start, i+1, acc)
			write_logs(logs_sv, log, False)

		log = "\nTesting Accuracy = {:0.4f}\n".format(avg_acc/num_test)
		write_logs(logs_sv, log, False)
		log = "\nSupervised Testing Time: {:2.5f}".format(time.time()-test_start)
		write_logs(logs_sv, log, False)

		sess.close()
Exemple #8
0
def plot_calcium():
    """Visualize calcium traces."""

    plt.figure()
    for i, (calcium, spikes) in enumerate(utils.get_data_set('train')):
        print(i)

        plt.subplot(10, 2, i + 1)
        plt.plot(calcium[:, 0])

        plt.subplot(10, 2, i + 11)
        plt.plot(spikes[:, 0])

    plt.show()
Exemple #9
0
def calcium_hist():
    """Generates a histogram of calcium fluorescences over all the data."""

    plt.figure()
    for i, (calcium, _) in enumerate(utils.get_data_set('train')):
        print(i)

        x = np.reshape(calcium, (-1, ))
        x = x[np.isnan(x) == False]

        plt.subplot(5, 2, i + 1)
        plt.hist(x, log=True)

    plt.show()
Exemple #10
0
def spike_hist():
    """Generates a histogram of spike counts over all the data."""

    plt.figure()
    for i, (_, spikes) in enumerate(utils.get_data_set('train')):
        print(i)

        x = np.reshape(spikes, (-1, ))
        x = x[np.isnan(x) == False]

        plt.subplot(5, 2, i + 1)
        plt.hist(x, range(6), log=True)

    plt.show()
Exemple #11
0
def train_network(args, network=None, data_set=None):
    device = torch.device("cuda" if args.gpu_no >= 0 else "cpu")

    if network is None:
        if args.data_set == 'CIFAR10':
            if 'vgg' in args.network:
                network = VGG(args.network, args.data_set)

    network = network.to(device)
    print(network)

    if data_set is None:
        data_set = get_data_set(args, train_flag=True)

    loss_calculator = Loss_Calculator()

    optimizer, scheduler = get_optimizer(network, args)

    if args.resume_flag:
        check_point = torch.load(args.load_path)
        network.load_state_dict(check_point['state_dict'])
        loss_calculator.loss_seq = check_point['loss_seq']
        args.start_epoch = check_point['epoch']  # update start epoch

    print("Start at %s" % time.ctime())
    print("-*-" * 10 + "\n\tTrain network\n" + "-*-" * 10)
    for epoch in range(args.start_epoch, args.epoch):
        print("---------- EPOCH %d ----------" % (epoch + 1))
        # make shuffled data loader
        data_loader = torch.utils.data.DataLoader(data_set,
                                                  batch_size=args.batch_size,
                                                  shuffle=True)

        # train one epoch
        train_step(network, data_loader, loss_calculator, optimizer, device,
                   epoch, args.print_freq)

        # adjust learning rate
        if scheduler is not None:
            scheduler.step()

        torch.save(
            {
                'epoch': epoch + 1,
                'state_dict': network.state_dict(),
                'loss_seq': loss_calculator.loss_seq
            }, args.save_path + args.network + '_checkpoint.pth')
    print("End at %s" % time.ctime())
    return network
Exemple #12
0
    def do_report():
        avg_acc=0

        for bt in xrange(test_batches):


            test_names, test_inputs, test_targets, test_seq_len = utils.get_data_set('valid.txt', bt*test_batch_size, (bt+1)*test_batch_size )
            test_feed = {inputs: test_inputs,
                         targets: test_targets,
                         seq_len: test_seq_len}
            dd, log_probs, accuracy = session.run([decoded[0], log_prob, acc], test_feed)
            accuracy = report_accuracy(dd, test_targets,test_names)
            if accuracy is not None:
                avg_acc+=accuracy
        avg_acc= avg_acc/test_batches
        save_path = saver.save(session, "models/ocr.model-" + str(avg_acc), global_step=steps)
def main():
    df = get_data_set('idao_dataset/train', save_to_csv=False)
    create_folds(df, 5, config)

    run_training(1, config, mode='clf')
    run_training(1, config, mode='reg')

    clf_preds, reg_preds = predict(config)

    sub_df = pd.read_csv(sub_df_path)
    sub_df['classification_predictions'] = clf_preds
    sub_df['regression_predictions'] = reg_preds
    sub_df['regression_predictions'] = sub_df['regression_predictions'].apply(
        transform)

    sub_df.to_csv('Final_Submission.csv', index=False)
def train_network(args, network=None, data_set=None):
    device = torch.device("cuda" if args.gpu_no >= 0 else "cpu")
    print("1. Finish check device: ", device)

    if network is None:
        network = VGG(args.vgg, args.data_set)
    network = network.to(device)
    print("2. Finish create network")

    if data_set is None:
        data_set = get_data_set(args, train_flag=True)
    print("3. Finish load dataset")

    loss_calculator = Loss_Calculator()

    optimizer, scheduler = get_optimizer(network, args)

    if args.resume_flag:
        check_point = torch.load(args.load_path)
        network.load_state_dict(check_point['state_dict'])
        loss_calculator.loss_seq = check_point['loss_seq']
        args.start_epoch = check_point['epoch']  # update start epoch

    print("-*-" * 10 + "\n\tTrain network\n" + "-*-" * 10)
    for epoch in range(args.start_epoch, args.epoch):
        # make shuffled data loader
        data_loader = torch.utils.data.DataLoader(data_set,
                                                  batch_size=args.batch_size,
                                                  shuffle=True)

        # train one epoch
        train_step(network, data_loader, loss_calculator, optimizer, device,
                   epoch, args.print_freq)

        # adjust learning rate
        if scheduler is not None:
            scheduler.step()

        torch.save(
            {
                'epoch': epoch + 1,
                'state_dict': network.state_dict(),
                'loss_seq': loss_calculator.loss_seq
            }, args.save_path + "check_point.pth")

    return network
Exemple #15
0
def test_network(network, args):
    if network is None:
        return

    device = torch.device("cuda" if args.gpu_flag is True else "cpu")
    network.to(device)

    data_set = get_data_set(args, train_flag=False)
    data_loader = torch.utils.data.DataLoader(data_set, batch_size=100, shuffle=False)

    test_top1, test_top5, test_loss = test_step(network, data_loader, device)

    print("-*-" * 10 + "\n\t\tTest network\n" + "-*-" * 10)
    test_acc_str = 'Top1: %2.4f, Top5: %2.4f, ' % (test_top1, test_top5)
    test_loss_str = 'Loss: %.4f. ' % test_loss
    print(test_acc_str + test_loss_str)

    return
Exemple #16
0
def display_data(input, res):
    try:
        if sys.argv[1] == "-v":
            data_x, data_y = get_data_set()
            xplot = []
            yplot = []
            xplot.append(input)
            yplot.append(res)
            for i in range(len(data_x)):
                ret = main(data_x[i])
                xplot.append(data_x[i])
                yplot.append(ret)
            plt.plot(data_x, data_y, 'ro', input, res, 'bo', xplot, yplot)
            plt.xlabel('Mileages')
            plt.ylabel('Prices')
            plt.show()
        else:
            print("Ivalid option: usage: -v (visual)")
    except:
        exit(0)
Exemple #17
0
def main(teta0, teta1, iteration):
    data_x, data_y = get_data_set()
    normalized_data_x, normalized_data_y = normalize_data(data_x, data_y)
    cost = cost_function(normalized_data_x, normalized_data_y, teta0, teta1)
    teta_history = [[], []]
    i = 0
    with open('results_file.csv', mode='w') as results_file:
        results = csv.writer(results_file,
                             delimiter=',',
                             quoting=csv.QUOTE_NONNUMERIC)
        print("Training model...")
        results.writerow(['Cost', ' teta0', ' teta1'])
        while iteration != 0:
            cost = cost_function(normalized_data_x, normalized_data_y, teta0,
                                 teta1)
            teta0, teta1 = update_tetas(teta0, teta1, cost, normalized_data_x,
                                        normalized_data_y)
            if cost == 0:
                break
            i += 1
            iteration -= 1
            results.writerow([cost, teta0, teta1])
    print("Training complete.")
Exemple #18
0
def train():
    mycounter = 3920
    crnn = mymodel.crnnNet()
    for curr_epoch in xrange(num_epochs):
        avg_acc2 = 0.0
        if curr_epoch % 3 == 0:
            mycounter += 1
        for batch in xrange(utils.BATCHES):
            tf.reset_default_graph()
            global_step = tf.Variable(0, trainable=False)
            learning_rate = tf.train.exponential_decay(
                utils.INITIAL_LEARNING_RATE,
                global_step,
                utils.DECAY_STEPS,
                utils.LEARNING_RATE_DECAY_FACTOR,
                staircase=True)
            start = time.time()
            train_names, train_inputs, train_targets = utils.get_data_set(
                '../train.txt', batch * utils.BATCH_SIZE,
                (batch + 1) * utils.BATCH_SIZE)
            print("get data time", time.time() - start)
            logits, inputs, seq_len, Wforward, Wbackward, b = crnn.net(
                tf.expand_dims(train_inputs, 3))
            cost = crnn.loss(train_targets, logits, seq_len)
            optimizer = tf.train.AdadeltaOptimizer(
                learning_rate=learning_rate).minimize(cost,
                                                      global_step=global_step)
            decoded, log_prob = tf.nn.ctc_beam_search_decoder(
                logits, seq_len, merge_repeated=False)
            acc = tf.reduce_mean(
                tf.edit_distance(tf.cast(decoded[0], tf.int32), train_targets))

            with tf.Session() as session:
                ckpt = tf.train.get_checkpoint_state("models")
                if ckpt and ckpt.model_checkpoint_path:
                    session.run(tf.local_variables_initializer())
                    saver = tf.train.Saver()
                    saver.restore(session, ckpt.model_checkpoint_path)
                else:
                    print("No checkpoint found.")
                    print("Trainng from scratch.")
                    init = tf.group(tf.global_variables_initializer(),
                                    tf.local_variables_initializer())
                    session.run(init)
                    saver = tf.train.Saver(tf.global_variables(),
                                           max_to_keep=10)
                train_cost = train_ler = 0.0
                start = time.time()
                c, steps, _, dd, accu = session.run(
                    [cost, global_step, optimizer, decoded[0], acc])
                accuracy = utils.report_accuracy(dd, train_targets,
                                                 train_names)
                avg_acc2 += accuracy
                train_cost = c
                train_ler = accu
                log = "Batch {}/{} : Epoch {}/{}, steps = {}, train_cost = {:.3f}, accuracy = {:.7f}, time = {:.3f}s"
                print(
                    log.format(batch + 1, utils.BATCHES, curr_epoch + 1,
                               num_epochs, steps, train_cost, accuracy,
                               time.time() - start))
                save_path = saver.save(session,
                                       "models/ocr.model-",
                                       global_step=mycounter)
            session.close()
        avg_acc2 /= utils.BATCHES
        print("\n train set accuracy = ", avg_acc2)

        val_cost = 0.0
        val_ler = 0.0
        avg_acc1 = 0.0
        print("\n\n\Valid cost\n\n\n")
        for batch in xrange(utils.VAL_BATCHES):
            tf.reset_default_graph()
            start = time.time()
            val_names, val_inputs, val_targets = utils.get_data_set(
                '../valid.txt', batch * utils.BATCH_SIZE,
                (batch + 1) * utils.BATCH_SIZE)
            print("get data time", time.time() - start)
            logits, inputs, seq_len, Wforward, Wbackward, b = crnn.net(
                tf.expand_dims(val_inputs, 3))
            cost = crnn.loss(val_targets, logits, seq_len)
            decoded, log_prob = tf.nn.ctc_beam_search_decoder(
                logits, seq_len, merge_repeated=False)
            acc = tf.reduce_mean(
                tf.edit_distance(tf.cast(decoded[0], tf.int32), val_targets))
            with tf.Session() as session:
                ckpt = tf.train.get_checkpoint_state("models")
                if ckpt and ckpt.model_checkpoint_path:
                    session.run(tf.local_variables_initializer())
                    saver = tf.train.Saver()
                    saver.restore(session, ckpt.model_checkpoint_path)
                    start = time.time()
                    c, dd, log_prob, ler = session.run(
                        [cost, decoded[0], log_prob, acc])
                    accuracy = utils.report_accuracy(dd, val_targets,
                                                     val_names)
                    avg_acc1 += accuracy
                    val_cost += c
                    val_ler += ler
                    log = "Batch {}/{}, batch_cost = {:.3f}, batch_ler = {:.3f},acc = {:.3f}, time = {:.3f}s"
                    print(
                        log.format(batch + 1, utils.VAL_BATCHES, c, ler,
                                   accuracy,
                                   time.time() - start))
                else:
                    session.close()
                    print("no checkpoint found")
                    break
            session.close()
        val_cost /= utils.VAL_BATCHES
        val_ler /= utils.VAL_BATCHES
        avg_acc1 /= utils.VAL_BATCHES
        log = "\n\nepoch = {}/{} , Avg val cost = {:.3f}, Avg val ler ={:.3f},avg accuracy ={:.3f}\n\n"
        print(
            log.format(curr_epoch + 1, num_epochs, val_cost, val_ler,
                       avg_acc1))

        test_cost = 0.0
        test_ler = 0.0
        avg_acc = 0.0
        print("\n\n\ntest cost\n\n\n")
        for batch in xrange(utils.TEST_BATCHES):
            tf.reset_default_graph()
            start = time.time()
            test_names, test_inputs, test_targets = utils.get_data_set(
                '../tests.txt', batch * utils.BATCH_SIZE,
                (batch + 1) * utils.BATCH_SIZE)
            print("get data time", time.time() - start)
            logits, inputs, seq_len, Wforward, Wbackward, b = crnn.net(
                tf.expand_dims(test_inputs, 3))
            cost = crnn.loss(test_targets, logits, seq_len)
            decoded, log_prob = tf.nn.ctc_beam_search_decoder(
                logits, seq_len, merge_repeated=False)
            acc = tf.reduce_mean(
                tf.edit_distance(tf.cast(decoded[0], tf.int32), test_targets))
            with tf.Session() as session:
                ckpt = tf.train.get_checkpoint_state("models")
                if ckpt and ckpt.model_checkpoint_path:
                    session.run(tf.local_variables_initializer())
                    saver = tf.train.Saver()
                    saver.restore(session, ckpt.model_checkpoint_path)
                    start = time.time()
                    c, dd, log_probs, ler = session.run(
                        [cost, decoded[0], log_prob, acc])
                    test_cost += c
                    test_ler += ler
                    accuracy = utils.report_accuracy(dd, test_targets,
                                                     test_names)
                    avg_acc += accuracy
                    log = "Batch {}/{}, batch_cost = {:.3f}, batch_ler = {:.3f},batch_accuracy = {:.3f}, time = {:.3f}s"
                    print(
                        log.format(batch + 1, utils.VAL_BATCHES, c, ler,
                                   accuracy,
                                   time.time() - start))
                else:
                    session.close()
                    print("no checkpoint found")
                    break
            session.close()
        test_cost /= utils.TEST_BATCHES
        test_ler /= utils.TEST_BATCHES
        avg_acc /= utils.TEST_BATCHES
        log = "\n\nepoch = {}/{} , Avg test cost = {:.3f}, Avg test ler ={:.3f},Avg test accuracy = {:.3f}\n\n"
        print(
            log.format(curr_epoch + 1, num_epochs, test_cost, test_ler,
                       avg_acc))
Exemple #19
0
def train():
    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(
        common.INITIAL_LEARNING_RATE,
        global_step,
        common.DECAY_STEPS,
        common.LEARNING_RATE_DECAY_FACTOR,
        staircase=True)
    logits, inputs, targets, seq_len, W, b = model.get_train_model()

    loss = tf.nn.ctc_loss(targets, logits, seq_len)
    cost = tf.reduce_mean(loss)

    optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                           momentum=common.MOMENTUM).minimize(
                                               cost, global_step=global_step)

    # Option 2: tf.contrib.ctc.ctc_beam_search_decoder
    # (it's slower but you'll get better results)
    decoded, log_prob = tf.nn.ctc_beam_search_decoder(logits,
                                                      seq_len,
                                                      merge_repeated=False)

    # Accuracy: label error rate
    acc = tf.reduce_mean(
        tf.edit_distance(tf.cast(decoded[0], tf.int32), targets))

    # Initializate the weights and biases
    init = tf.global_variables_initializer()

    def do_report():
        test_feed = {
            inputs: test_inputs,
            targets: test_targets,
            seq_len: test_seq_len
        }
        dd, log_probs, accuracy = session.run([decoded[0], log_prob, acc],
                                              test_feed)
        report_accuracy(dd, test_targets)
        # decoded_list = decode_sparse_tensor(dd)

    def do_batch():
        feed = {
            inputs: train_inputs,
            targets: train_targets,
            seq_len: train_seq_len
        }
        b_cost, steps, _ = session.run([cost, global_step, optimizer], feed)
        if steps > 0 and steps % common.REPORT_STEPS == 0:
            do_report()
            save_path = saver.save(session,
                                   "models/ocr.model",
                                   global_step=steps)
            # print(save_path)
        return b_cost, steps

    with tf.Session() as session:
        session.run(init)
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)
        for curr_epoch in xrange(num_epochs):
            # variables = tf.all_variables()
            # for i in variables:
            #     print(i.name)

            print("Epoch.......", curr_epoch)
            train_cost = train_ler = 0
            for batch in xrange(common.BATCHES):
                start = time.time()
                train_inputs, train_targets, train_seq_len = utils.get_data_set(
                    'train', batch * common.BATCH_SIZE,
                    (batch + 1) * common.BATCH_SIZE)

                print("get data time", time.time() - start)
                start = time.time()
                c, steps = do_batch()
                train_cost += c * common.BATCH_SIZE
                seconds = time.time() - start
                print("Step:", steps, ", batch seconds:", seconds)

            train_cost /= common.TRAIN_SIZE
            # train_ler /= common.TRAIN_SIZE

            val_feed = {
                inputs: train_inputs,
                targets: train_targets,
                seq_len: train_seq_len
            }

            val_cost, val_ler, lr, steps = session.run(
                [cost, acc, learning_rate, global_step], feed_dict=val_feed)

            log = "Epoch {}/{}, steps = {}, train_cost = {:.3f}, train_ler = {:.3f}, val_cost = {:.3f}, val_ler = {:.3f}, time = {:.3f}s, learning_rate = {}"
            print(
                log.format(curr_epoch + 1, num_epochs, steps, train_cost,
                           train_ler, val_cost, val_ler,
                           time.time() - start, lr))
Exemple #20
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--load',
                        type=str,
                        help='Checkpoint to load all weights from.')
    parser.add_argument('--load-gen',
                        type=str,
                        help='Checkpoint to load generator weights only from.')
    parser.add_argument('--name', type=str, help='Name of experiment.')
    parser.add_argument('--overfit',
                        action='store_true',
                        help='Overfit to a single image.')
    parser.add_argument('--batch-size',
                        type=int,
                        default=16,
                        help='Mini-batch size.')
    parser.add_argument(
        '--log-freq',
        type=int,
        default=10000,
        help='How many training iterations between validation/checkpoints.')
    parser.add_argument('--learning-rate',
                        type=float,
                        default=1e-4,
                        help='Learning rate for Adam.')
    parser.add_argument('--content-loss',
                        type=str,
                        default='mse',
                        choices=['mse', 'L1', 'edge_loss_mse', 'edge_loss_L1'],
                        help='Metric to use for content loss.')
    parser.add_argument(
        '--use-gan',
        action='store_true',
        help='Add adversarial loss term to generator and trains discriminator.'
    )
    parser.add_argument('--image-size',
                        type=int,
                        default=96,
                        help='Size of random crops used for training samples.')
    parser.add_argument('--vgg-weights',
                        type=str,
                        default='vgg_19.ckpt',
                        help='File containing VGG19 weights (tf.slim)')
    parser.add_argument('--train-dir',
                        type=str,
                        help='Directory containing training images')
    parser.add_argument(
        '--validate-benchmarks',
        action='store_true',
        help=
        'If set, validates that the benchmarking metrics are correct for the images provided by the authors of the SRGAN paper.'
    )
    parser.add_argument('--gpu',
                        type=str,
                        default='0',
                        help='Which GPU to use')
    parser.add_argument('--epoch',
                        type=int,
                        default='1000000',
                        help='How many iterations ')
    parser.add_argument('--is-val',
                        action='store_true',
                        help='How many iterations ')
    parser.add_argument('--upSample',
                        type=int,
                        default='2',
                        help='How much scale ')

    args = parser.parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    srresnet_training = tf.placeholder(tf.bool, name='srresnet_training')

    srresnet_model = srresnet.Srresnet(training=srresnet_training,\
                              learning_rate=args.learning_rate,\
                              content_loss=args.content_loss,\
                              num_upsamples=args.upSample)

    lr_A = tf.placeholder(tf.float32, [None, None, None, 3], name='LR_DWT_A')
    lr_dwt_edge = tf.placeholder(tf.float32, [None, None, None, 9],
                                 name='LR_DWT_edge')
    hr_A = tf.placeholder(tf.float32, [None, None, None, 3], name='HR_image')
    hr = tf.placeholder(tf.float64, [None, None, None, 3], name='HR')
    hr_dwt_edge = tf.placeholder(tf.float32, [None, None, None, 9],
                                 name='HR_DWT_edge')

    sr_out_pred, sr_BCD_pred, sr_pred = srresnet_model.forward(
        lr_A, lr_dwt_edge)
    # sr_out_pred = srresnet_model.forward_LL_branch(lr_A)
    # sr_BCD_pred = srresnet_model.forward_edge_branch(lr_dwt_edge)

    sr_loss = srresnet_model.loss_function(hr_A, sr_out_pred, hr_dwt_edge,
                                           sr_BCD_pred, hr, sr_pred)
    sr_opt = srresnet_model.optimize(sr_loss)
    '''
    驗證用,input和label的圖要對到。
    '''
    benchmarks = [
        Benchmark('Benchmarks\\Rain12\\input',
                  'Benchmarks\\Rain12\\label',
                  name='Rain12'),
        Benchmark('Benchmarks\\val\\input',
                  'Benchmarks\\val\\label',
                  name='Rain100H'),

        #     #Benchmark('Benchmarks/BSD100', name='BSD100')
    ]

    # Create log folder
    if args.load and not args.name:
        log_path = os.path.dirname(args.load)
    else:
        log_path = build_log_dir(args, sys.argv)

    train_data_path = 'dataset\PreprocessedData.h5'
    val_data_path = 'dataset\PreprocessedData_val.h5'
    eval_data_path = 'dataset\PreprocessedData_eval.h5'

    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())
        iteration = 0
        epoch = 0

        saver = tf.train.Saver(max_to_keep=100)

        # Load all
        if args.load:
            iteration = int(args.load.split('-')[-1])
            saver.restore(sess, args.load)
            print(saver)
            print("load_process_DEBUG")

        train_data_set = get_data_set(train_data_path, 'train')  #和製作h5 file有關
        train_label_data_set = get_data_set(train_data_path, 'label')

        val_data_set = get_data_set(val_data_path, 'val')
        val_data_label_set = get_data_set(val_data_path, 'label')

        eval_data_set = get_data_set(eval_data_path, 'eval')
        eval_data_label_set = get_data_set(eval_data_path, 'label')

        val_error_li = []
        eval_error_li = []
        fig = plt.figure()

        if args.is_val:  #暫不用
            benchmarks = [
                Benchmark('Benchmarks/Set5', name='Set5'),
                Benchmark('Benchmarks/Set14', name='Set14'),
                Benchmark('Benchmarks/BSD100', name='BSD100'),
                Benchmark('Benchmarks/UCMerced_LandUse',
                          name='UCMerced_LandUse'),
                Benchmark('Benchmarks/RSSCN7', name='RSSCN7')
            ]

            log_line = ''
            for benchmark in benchmarks:
                psnr, ssim, _, _ = benchmark.evaluate(sess, sr_pred, log_path,
                                                      iteration)
                print(' [%s] PSNR: %.2f, SSIM: %.4f' %
                      (benchmark.name, psnr, ssim),
                      end='')
                log_line += ',%.7f, %.7f' % (psnr, ssim)
            print()
            # Write to log
            with open(log_path + '/PSNR.csv', 'a') as f:
                f.write(
                    'iteration, set5_psnr, set5_ssim, set14_psnr, set14_ssim, bsd100_psnr, bsd100_ssim,UCMerced_LandUse_psnr, UCMerced_LandUse_ssim,RSSCN7_psnr, RSSCN7_ssim\n'
                )
                f.write('%d,%s\n' % (iteration, log_line))

        else:
            while True:
                t = trange(0,
                           len(train_data_set) - args.batch_size + 1,
                           args.batch_size,
                           desc='Iterations')
                #         #One epoch
                for batch_idx in t:
                    t.set_description("Training... [Iterations: %s]" %
                                      iteration)

                    #             # Each 10000 times evaluate model
                    if iteration % args.log_freq == 0:
                        #                 # Loop over eval dataset
                        for batch_idx in range(
                                0,
                                len(val_data_set) - args.batch_size + 1,
                                args.batch_size):
                            #                 # # Test every log-freq iterations
                            val_error = evaluate_model(sr_loss, val_data_set[batch_idx:batch_idx + 16], val_data_label_set[batch_idx:batch_idx + 16], \
                                                       sess, 124, args.batch_size)
                            eval_error = evaluate_model(sr_loss,eval_data_set[batch_idx:batch_idx + 16], eval_data_label_set[batch_idx:batch_idx + 16],\
                                                       sess, 124, args.batch_size)
        #                 val_error_li.append(val_error)
        #                 eval_error_li.append(eval_error)

        #                 # # Log error
        #                 # plt.plot(val_error_li)
        #                 # plt.savefig('val_error.png')
        #                 # plt.plot(eval_error_li)
        #                 # plt.savefig('eval_error.png')
        #                 # # fig.savefig()

        #print('[%d] Test: %.7f, Train: %.7f' % (iteration, val_error, eval_error), end='')
        # Evaluate benchmarks
                        log_line = ''
                        for benchmark in benchmarks:
                            psnr, ssim, _, _ = benchmark.evaluate(
                                sess, sr_out_pred, sr_BCD_pred, sr_pred,
                                log_path, iteration)

                            print(' [%s] PSNR: %.2f, SSIM: %.4f' %
                                  (benchmark.name, psnr, ssim),
                                  end='')
                            log_line += ',%.7f, %.7f' % (psnr, ssim)
        #                 # # print()
        #                 # # # Write to log
                        with open(log_path + '/loss.csv', 'a') as f:
                            f.write(
                                '%d, %.15f, %.15f%s\n' %
                                (iteration, val_error, eval_error, log_line))
        #                 # # Save checkpoint
                        saver.save(sess,
                                   os.path.join(log_path, 'weights'),
                                   global_step=iteration,
                                   write_meta_graph=False)

                    # Train SRResnet
                    batch_rain = train_data_set[batch_idx:batch_idx + 16]
                    batch_label = train_label_data_set[batch_idx:batch_idx +
                                                       16]

                    #__DEBUG__
                    # for i in range(batch_rain.shape[0]):
                    #     cv2.imshow('__rain', batch_rain[i])
                    #     cv2.imshow('__label', batch_label[i])

                    #     cv2.waitKey(0)
                    # ycbcr_batch = batch_bgr2ycbcr(batch_hr)
                    batch_rain = batch_bgr2rgb(batch_rain)
                    batch_label = batch_bgr2rgb(batch_label)
                    # batch_lr = downsample_batch(batch_hr, factor=4)

                    batch_dwt_rain = batch_Swt(batch_rain)
                    batch_dwt_label = batch_Swt(batch_label)

                    # batch_dwt_lr[:,:,:,0] /= np.abs(batch_dwt_lr[:,:,:,0]).max()*255.
                    # batch_dwt_lr[:,:,:,4] /= np.abs(batch_dwt_lr[:,:,:,4]).max()*255.
                    # batch_dwt_lr[:,:,:,8] /= np.abs(batch_dwt_lr[:,:,:,8]).max()*255.
                    batch_dwt_rain_A = np.stack([
                        batch_dwt_rain[:, :, :, 0], batch_dwt_rain[:, :, :, 4],
                        batch_dwt_rain[:, :, :, 8]
                    ],
                                                axis=-1)
                    batch_dwt_label_A = np.stack([
                        batch_dwt_label[:, :, :, 0],
                        batch_dwt_label[:, :, :, 4], batch_dwt_label[:, :, :,
                                                                     8]
                    ],
                                                 axis=-1)

                    batch_dwt_rain_A /= 255.
                    batch_dwt_label_A /= 255.
                    # batch_dwt_A[:,:,:,0] /= np.abs(batch_dwt_A[:,:,:,0]).max()
                    # batch_dwt_A[:,:,:,1] /= np.abs(batch_dwt_A[:,:,:,1]).max()
                    # batch_dwt_A[:,:,:,2] /= np.abs(batch_dwt_A[:,:,:,2]).max()

                    # batch_dwt_A[:,:,:,0] *= 255.
                    # batch_dwt_A[:,:,:,1] *= 255.
                    # batch_dwt_A[:,:,:,2] *= 255.

                    # batch_dwt_lr_A = batch_dwt(batch_dwt_A)

                    batch_rain_BCD = np.concatenate([
                        batch_dwt_rain[:, :, :, 1:4],
                        batch_dwt_rain[:, :, :, 5:8], batch_dwt_rain[:, :, :,
                                                                     9:12]
                    ],
                                                    axis=-1)
                    batch_label_BCD = np.concatenate([
                        batch_dwt_label[:, :, :, 1:4],
                        batch_dwt_label[:, :, :, 5:8], batch_dwt_label[:, :, :,
                                                                       9:12]
                    ],
                                                     axis=-1)
                    # batch_lr_BCD = np.concatenate([up_sample_batch(batch_dwt_lr_A[:,:,:,1:4], factor=2),\
                    #                                up_sample_batch(batch_dwt_lr_A[:,:,:,5:8], factor=2),\
                    #                                up_sample_batch(batch_dwt_lr_A[:,:,:,9:12], factor=2)], axis=-1)
                    # batch_lr = downsample_batch(batch_hr, factor=4)
                    # batch_lr_BCD = up_sample_batch(batch_lr_BCD, factor=2)

                    batch_rain_BCD = batch_rain_BCD / 255.
                    batch_label_BCD = batch_label_BCD / 255.

                    batch_label = batch_label / 255.

                    _, err = sess.run([sr_opt,sr_loss],\
                         feed_dict={srresnet_training: False,\
                                    lr_A: batch_dwt_rain_A,\
                                    lr_dwt_edge: batch_rain_BCD,\
                                    hr_A: batch_dwt_label_A,\
                                    hr_dwt_edge: batch_label_BCD,\
                                    hr: batch_label,\


                                    })

                    #print('__training__ %s' % iteration)
                    iteration += 1
                print('__epoch__: %s' % epoch)
                epoch += 1
Exemple #21
0
def detect():
    model_restore = "models/ocr.model-0.959379192885-161999"  #give the path of your final model file
    test_filename = "test.txt"  #txt file containing the paths of all the test images
    output_folder = "test_outputs3/"  #where the outputs will be stored
    factor1 = 35  #think of it as number of test_batches
    factor2 = 41  #think of it as the batch size

    ac = 0
    logits, inputs, targets, seq_len, W, b = model.get_train_model()

    decoded, log_prob = tf.nn.ctc_beam_search_decoder(logits,
                                                      seq_len,
                                                      merge_repeated=False)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        # Restore variables from disk.
        saver.restore(sess, model_restore)
        #saver.restore(sess, "models2/ocr.model-0.929263617018-35999")
        print("Model restored.")
        a = 0
        for x in range(0, factor1):
            test_names, test_inputs, test_targets, test_seq_len = utils.get_data_set(
                test_filename, x * factor2, (x + 1) * factor2)
            print test_inputs[0].shape

            feed_dict = {inputs: test_inputs, seq_len: test_seq_len}
            dd, lp = sess.run([decoded[0], log_prob], feed_dict=feed_dict)
            original_list = decode_sparse_tensor(test_targets)
            detected_list = decode_sparse_tensor(dd)
            names_list = test_names.tolist()
            print "lp", lp

            for x, fname_save in enumerate(names_list):
                result = detected_list[x]
                file = codecs.open(
                    output_folder + os.path.basename(fname_save) + ".rnn.txt",
                    "w", "utf-8")
                #file.write(''.join(result.tolist()))     if result is numpy
                file.write(''.join(result))
                file.close()

            if len(original_list) != (len(detected_list)):
                print("len(original_list)", len(original_list),
                      "len(detected_list)", len(detected_list),
                      " test and detect length desn't match")
                return
            print("T/F: original(length) <-------> detectcted(length)")
            total_ed = 0
            total_len = 0
            for idx, number in enumerate(original_list):

                detect_number = detected_list[idx]
                """if os.path.exists("output/"+names_list[idx] + ".out.txt"):
    			    append_write = 'a' # append if already exists
    			else:
    			    append_write = 'w' # make a new file if not
    			f = codecs.open("output/"+names_list[idx] + ".out.txt",append_write, 'utf-8')
    			f.write("\nDetected: "+''.join(detect_number)+"\n"+"Original: ",''.join(number))
    			f.close()"""

                ed = editdistance.eval(number, detect_number)

                ln = len(number)
                edit_accuracy = (ln - ed) / ln
                """if (idx % 10 == 0):
    			    print("Edit: ", ed, "Edit accuracy: ", edit_accuracy,"\n", ''.join(number).encode('utf-8'), "(", len(number), ") <-------> ", ''.join(detect_number).encode('utf-8'), "(", len(detect_number), ")")
                """
            total_ed += ed
            total_len += ln

            accuraccy = (total_len - total_ed) / total_len
            print("Test Accuracy:", accuraccy)
            ac += accuraccy
    return ac / factor1
def train():
    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(common.INITIAL_LEARNING_RATE,
                                               global_step,
                                               common.DECAY_STEPS,
                                               common.LEARNING_RATE_DECAY_FACTOR,
                                               staircase=True)
    logits, inputs, targets, seq_len, W, b = model.get_train_model()

    loss = tf.nn.ctc_loss(targets, logits, seq_len)
    cost = tf.reduce_mean(loss)

    optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                           momentum=common.MOMENTUM).minimize(cost, global_step=global_step)

    # Option 2: tf.contrib.ctc.ctc_beam_search_decoder
    # (it's slower but you'll get better results)
    decoded, log_prob = tf.nn.ctc_beam_search_decoder(logits, seq_len, merge_repeated=False)

    # Accuracy: label error rate
    acc = tf.reduce_mean(tf.edit_distance(tf.cast(decoded[0], tf.int32), targets))

    # Initializate the weights and biases
    init = tf.global_variables_initializer()

    def do_report():
        test_feed = {inputs: test_inputs,
                     targets: test_targets,
                     seq_len: test_seq_len}
        dd, log_probs, accuracy = session.run([decoded[0], log_prob, acc], test_feed)
        report_accuracy(dd, test_targets)
        # decoded_list = decode_sparse_tensor(dd)

    def do_batch():
        feed = {inputs: train_inputs, targets: train_targets, seq_len: train_seq_len}
        b_cost, steps, _ = session.run([cost, global_step, optimizer], feed)
        if steps > 0 and steps % common.REPORT_STEPS == 0:
            do_report()
            save_path = saver.save(session, "models/ocr.model", global_step=steps)
            #print(save_path)
        return b_cost, steps

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as session:
        session.run(init)
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)
        for curr_epoch in range(num_epochs):
            # variables = tf.all_variables()
            # for i in variables:
            #     print(i.name)

            print("Epoch.......", curr_epoch)
            train_cost = train_ler = 0
            for batch in range(common.BATCHES):
                start = time.time()
                train_inputs, train_targets, train_seq_len = utils.get_data_set('train', batch * common.BATCH_SIZE,
                                                                                (batch + 1) * common.BATCH_SIZE)

                #print("get data time", time.time() - start)
                start = time.time()
                c, steps = do_batch()
                train_cost += c * common.BATCH_SIZE
                seconds = time.time() - start
                print("Step:", steps, ", batch seconds:", seconds)

            train_cost /= common.TRAIN_SIZE
            # train_ler /= common.TRAIN_SIZE
            val_feed = {inputs: train_inputs,
                        targets: train_targets,
                        seq_len: train_seq_len}

            val_cost, val_ler, lr, steps = session.run([cost, acc, learning_rate, global_step], feed_dict=val_feed)

            log = "Epoch {}/{}, steps = {}, train_cost = {:.3f}, train_ler = {:.3f}, val_cost = {:.3f}, val_ler = {:.3f}, time = {:.3f}s, learning_rate = {}"
            print(log.format(curr_epoch + 1, num_epochs, steps, train_cost, train_ler, val_cost, val_ler,
                             time.time() - start, lr))
from utils import decode_sparse_tensor

# Some configs
# Accounting the 0th indice +  space + blank label = 28 characters
# num_classes = ord('9') - ord('0') + 1 + 1 + 1
num_classes = common.num_classes
print("num_classes", num_classes)
# Hyper-parameters
num_epochs = 10000
num_hidden = 64
num_layers = 1
print("num_hidden:", num_hidden, "num_layers:", num_layers)

# THE MAIN CODE!

test_inputs, test_targets, test_seq_len = utils.get_data_set('test')
print("Data loaded....")


# graph = tf.Graph()
def report_accuracy(decoded_list, test_targets):
    original_list = decode_sparse_tensor(test_targets)
    detected_list = decode_sparse_tensor(decoded_list)
    true_numer = 0
    # print(detected_list)
    if len(original_list) != len(detected_list):
        print("len(original_list)", len(original_list), "len(detected_list)", len(detected_list),
              " test and detect length desn't match")
        return
    print("T/F: original(length) <-------> detectcted(length)")
    for idx, number in enumerate(original_list):
Exemple #24
0
def train():

    #第一部分:构建model
    global_step = tf.Variable(0, trainable=False)#全局步骤计数
    learning_rate = tf.train.exponential_decay(common.INITIAL_LEARNING_RATE,
                                               global_step,
                                               common.DECAY_STEPS,
                                               common.LEARNING_RATE_DECAY_FACTOR,
                                               staircase=True)

    inputs = tf.placeholder(tf.float32, [None, None, common.OUTPUT_SHAPE[0]]) #[,,60]
    targets = tf.sparse_placeholder(tf.int32)#三元组稀疏张量
    seq_len = tf.placeholder(tf.int32, [None])#list 64  [180,180,...,180]


    W = tf.Variable(tf.truncated_normal([num_hidden,
                                         num_classes],
                                        stddev=0.1), name="W")#shape (200,781)
    b = tf.Variable(tf.constant(0., shape=[num_classes]), name="b")#781

    #cell = tf.contrib.rnn.LSTMCell(num_hidden, state_is_tuple=True)
    #outputs1, _ = tf.nn.dynamic_rnn(cell, inputs, seq_len, dtype=tf.float32)#(64, 3000, 200)
    cell_fw = tf.contrib.rnn.LSTMCell(num_hidden)
    cell_bw = tf.contrib.rnn.LSTMCell(num_hidden)
    initial_state_fw = cell_fw.zero_state(common.BATCH_SIZE,dtype=tf.float32)
    initial_state_bw = cell_bw.zero_state(common.BATCH_SIZE,dtype=tf.float32)
    (out, states)=tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs,seq_len, initial_state_fw,initial_state_bw)
    outputs1 = tf.concat(out, 2)

    shape = tf.shape(inputs)
    batch_s, max_timesteps = shape[0], shape[1] #64,3000
    outputs = tf.reshape(outputs1, [-1, num_hidden])#(19200,200)
    logits0 = tf.matmul(outputs, W) + b
    logits1 = tf.reshape(logits0, [batch_s, -1, num_classes])
    logits = tf.transpose(logits1, (1, 0, 2))#(3000, 64, 781)

    loss = tf.nn.ctc_loss( targets, logits, seq_len)
    #inputs/logits: 3-D float Tensor.If time_major == True (default),  will shaped: [max_time x batch_size x num_classes]
    cost = tf.reduce_mean(loss)
    optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                           momentum=common.MOMENTUM).minimize(cost, global_step=global_step)
    decoded, log_prob = tf.nn.ctc_beam_search_decoder(logits, seq_len, merge_repeated=False)#or "tf.nn.ctc_greedy_decoder"一种解码策略
    acc = tf.reduce_mean(tf.edit_distance(tf.cast(decoded[0], tf.int32), targets))


    #第二部分:在session 中执行图
    with tf.Session() as sess:
        #封装 成可以调试的sess
        # sess = tfdbg.LocalCLIDebugWrapperSession(sess)
        # sess.add_tensor_filter("has_inf_or_nan", tfdbg.has_inf_or_nan)

        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=500)
        for curr_epoch in range(num_epochs):
            print("Epoch.......", curr_epoch)
            train_cost = train_ler = 0
            for batch in range(common.BATCHES):
                start = time.time()
                train_inputs, train_targets, train_seq_len = utils.get_data_set('train', batch * common.BATCH_SIZE,
                                                                                  (batch + 1) * common.BATCH_SIZE)
                '''
                train_inputs:shape(64, 3000, 60)
                train_targets:3-tuple,sparse_tensor,(indices_matrix,values,shape)
                train_seq_len:length  64  [180,180,...,180]
                (Pdb) p train_targets
                    (array([[ 0,  0],
                            [ 0,  1],
                            [ 0,  2],
                            ..., 
                            [63, 0],
                            [63, 1],
                            [63, ?]]), array([25, 19, 19, ..., 19, 22,  2], dtype=int32), array([ 64, 145]))

                '''
                print("get data time", time.time() - start)
                start = time.time()
                train_feed = {inputs: train_inputs, targets: train_targets, seq_len: train_seq_len}
                b_cost, steps, _ = sess.run([cost, global_step, optimizer], train_feed)#训练
                #outputs1 = sess.run([outputs1],train_feed)
                '''
                outputs1:list,length:1 ouputs[0].shape:(64, 3000, 200)
                '''
                #outputs = sess.run([outputs],train_feed)
                '''
                outputs:list,length:1 ouputs[0].shape:(192000, 200)
                '''
                #logits = sess.run([logits],train_feed)
                '''
                logits:list,lenth=1,logits[0],shape:(3000, 64, 781)
                '''

                if steps > 0 and steps % common.REPORT_STEPS == 0:
                    #pdb.set_trace()
                    val_feed = {inputs: val_inputs,targets: val_targets,seq_len: val_seq_len}#64个验证样本
                    '''
                    val_inputs:shape(100, 3000, 60)
                    val_targets:3-tuple,sparse_tensor,(indices_matrix,values,shape)
                    val_seq_len:length  100  [180,180,...,180]
                    (Pdb) p train_targets
                        (array([[ 0,  0],
                                [ 0,  1],
                                [ 0,  2],
                                ..., 
                                [99, 0],
                                [99, 1],
                                [99, ?]]), array([25, 19, 19, ..., 19, 22,  2], dtype=int32), array([ 64, 145]))

                    '''
                    decoded0, log_probs, accuracy = sess.run([decoded[0], log_prob, acc], val_feed)
                    '''
                    decoded0:3-tuple
                    SparseTensorValue(indices=array([[ 0,  0],
                       [ 0,  1],
                       [ 0,  2],
                       ..., 
                       [99, 16]]), values=array([21, 20, 21, ..., 21, 21, 21]), dense_shape=array([100,  17]))
                    log_probs:shape (100, 1)
                    accuracy:0.92347372

                    '''
                    report_accuracy(decoded0, val_targets)
                save_path = saver.save(sess, "models/ocr.model", global_step=steps)#保存模型
               
                train_cost += b_cost * common.BATCH_SIZE
                seconds = time.time() - start
                print("Step:", steps, ", batch seconds:", seconds)

            train_cost /= common.TRAIN_SIZE

            val_feed = {inputs: val_inputs,
                        targets: val_targets,
                        seq_len: val_seq_len}

            val_cost, val_ler, lr, steps = sess.run([cost, acc, learning_rate, global_step], feed_dict=val_feed)
            log = "Epoch {}/{}, steps = {}, train_cost = {:.3f}, train_ler = {:.3f}, val_cost = {:.3f}, val_ler = {:.3f}, time = {:.3f}s, learning_rate = {}"
            print(log.format(curr_epoch + 1, num_epochs, steps, train_cost, train_ler, val_cost, val_ler,
                             time.time() - start, lr))
Exemple #25
0
def train():
    test_names, test_inputs, test_targets, test_seq_len = utils.get_data_set(
        'valid.txt')
    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(
        common.INITIAL_LEARNING_RATE,
        global_step,
        common.DECAY_STEPS,
        common.LEARNING_RATE_DECAY_FACTOR,
        staircase=True)
    logits, inputs, targets, seq_len, W, b = model.get_train_model()
    loss = tf.nn.ctc_loss(logits, targets, seq_len)
    cost = tf.reduce_mean(loss)

    optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                           momentum=common.MOMENTUM).minimize(
                                               cost, global_step=global_step)

    decoded, log_prob = tf.nn.ctc_beam_search_decoder(logits,
                                                      seq_len,
                                                      merge_repeated=False)

    acc = tf.reduce_mean(
        tf.edit_distance(tf.cast(decoded[0], tf.int32), targets))

    def do_report():
        test_feed = {
            inputs: test_inputs,
            targets: test_targets,
            seq_len: test_seq_len
        }
        dd, log_probs, accuracy = session.run([decoded[0], log_prob, acc],
                                              test_feed)
        accuracy = report_accuracy(dd, test_targets, test_names)
        save_path = saver.save(session,
                               "models/ocr.model-" + str(accuracy),
                               global_step=steps)
        # decoded_list = decode_sparse_tensor(dd)

    def do_batch():
        feed = {
            inputs: train_inputs,
            targets: train_targets,
            seq_len: train_seq_len
        }
        b_cost, steps, _ = session.run([cost, global_step, optimizer], feed)
        if steps > 0 and steps % common.REPORT_STEPS == 0:
            do_report()
        return b_cost, steps

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.49)
    with tf.Session(config=tf.ConfigProto(log_device_placement=True,
                                          allow_soft_placement=True,
                                          gpu_options=gpu_options)) as session:
        ckpt = tf.train.get_checkpoint_state("models")
        if ckpt and ckpt.model_checkpoint_path:
            saver = tf.train.Saver()
            saver.restore(session, ckpt.model_checkpoint_path)
        else:
            print("no checkpoint found")
            # Initializate the weights and biases
            init = tf.initialize_all_variables()
            session.run(init)
            saver = tf.train.Saver(tf.all_variables(), max_to_keep=100)
        for curr_epoch in xrange(num_epochs):

            print("Epoch.......", curr_epoch)
            train_cost = train_ler = 0
            for batch in xrange(common.BATCHES):
                start = time.time()
                train_names, train_inputs, train_targets, train_seq_len = utils.get_data_set(
                    'train.txt', batch * common.BATCH_SIZE,
                    (batch + 1) * common.BATCH_SIZE)

                print("get data time", time.time() - start)
                start = time.time()
                c, steps = do_batch()
                train_cost += c * common.BATCH_SIZE
                seconds = time.time() - start
                print("Step: ", steps, ", batch seconds: ", seconds)

            train_cost /= common.TRAIN_SIZE

            val_feed = {
                inputs: train_inputs,
                targets: train_targets,
                seq_len: train_seq_len
            }

            val_cost, val_ler, lr, steps = session.run(
                [cost, acc, learning_rate, global_step], feed_dict=val_feed)

            log = "Epoch {}/{}, steps = {}, train_cost = {:.3f}, train_ler = {:.3f}, val_cost = {:.3f}, val_ler = {:.3f}, time = {:.3f}s, learning_rate = {}"
            print(
                log.format(curr_epoch + 1, num_epochs, steps, train_cost,
                           train_ler, val_cost, val_ler,
                           time.time() - start, lr))
Exemple #26
0
import utils
import numpy as np
import cv2
import common

test_names, test_inputs, test_targets, test_seq_len = utils.get_data_set(
    'sets/jameel/testimg.txt', 0, 15)

print test_names

print len(test_names)
Exemple #27
0
# Some configs
# Accounting the 0th indice +  space + blank label = 28 characters
# num_classes = ord('9') - ord('0') + 1 + 1 + 1
num_classes = common.num_classes
print("num_classes", num_classes)
# Hyper-parameters
num_epochs = 10000  # 训练完整的数据集num_epochs次 (当一个完整的数据集通过了神经网络一次并且返回了一次,这个过程称为一个 epoch)
num_hidden = 64
num_layers = 1
print("num_hidden:", num_hidden, "num_layers:", num_layers)

# THE MAIN CODE!

# 这里加载的是测试数据,也就是每训练一千步就用这里的测试数据计算一下识别出的字符的准确的个数占总个数的百分比
test_inputs, test_targets, test_seq_len = utils.get_data_set('test')
print("Test Data loaded....")


# 用测试数据来验证一下训练的准确率,即识别正确的字符个数占输入字符总个数的百分比
def report_accuracy(decoded_list, test_targets):
    # 报告字符识别的准确率,即n个字符中识别正确的字符个数占总字符个数的百分比
    original_list = decode_sparse_tensor(test_targets)  # 图片标注的字符列表
    detected_list = decode_sparse_tensor(decoded_list)  # 识别出的字符列表
    true_numer = 0
    # print(detected_list)
    if len(original_list) != len(detected_list):
        print(
            "len(original_list) 当前图片的标记字符长度", len(original_list),
            "len(detected_list) 从当前图片识别出的字符长度", len(detected_list),
            " test and detect length desn't match(从当前图片识别出的字符长度与标记的字符长度不一致)")
Exemple #28
0
def main():
    '''
    Args
    '''
    parser = argparse.ArgumentParser()
    parser.add_argument('--load',
                        type=str,
                        help='Checkpoint to load all weights from.')
    parser.add_argument('--name', type=str, help='Name of experiment.')
    parser.add_argument('--batch-size',
                        type=int,
                        default=3,
                        help='Mini-batch size.')
    parser.add_argument('--image-size',
                        type=int,
                        default=48,
                        help='Size of random crops used for training samples.')
    parser.add_argument('--classes',
                        type=int,
                        default=1,
                        help='classes number')
    parser.add_argument('--gpu',
                        type=str,
                        default='0',
                        help='Which GPU to use')
    parser.add_argument('--iteration',
                        type=int,
                        default=1000,
                        help='save weights iterations.')
    parser.add_argument('--test-filename',
                        type=str,
                        default='test',
                        help='The test filenames from Benchmarks.')

    args = parser.parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    '''
    PlaceHolder feed data
    '''
    x_train = tf.placeholder(tf.float32,
                             shape=[None, None, None, 3],
                             name='x_train')
    y_true = tf.placeholder(tf.float32,
                            shape=[None, None, None, 1],
                            name='y_true')

    print('__DEBUG__NAME', x_train)

    unet = Unet(batch_size=args.batch_size,
                classes=args.classes,
                img_size=args.image_size)
    y_pred = unet.create_unet(x_train)

    y_loss = unet.loss_function(y_true, y_pred)

    # y_pred = tf.argmax(y_pred, axis = 3, name="y_pred")

    optimizer = unet.optimize(y_loss)

    train_path = './dataset/retina120_256.h5'
    # train_label = './dataset/..'

    train_data = get_data_set(train_path, 'train')
    train_label = get_data_set(train_path, 'label')

    # Create log folder
    if args.load and not args.name:
        log_path = os.path.dirname(args.load)
    else:
        log_path = build_log_dir(args, sys.argv)

    benchmarks = [Benchmarks('./Benchmarks/retina_test/', name='retina image')]

    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver(max_to_keep=100)

        iteration = 0
        n_iteration_per_epoch = len(train_data) // args.batch_size

        # Load all
        if args.load:
            iteration = int(args.load.split('-')[-1])
            saver.restore(sess, args.load)
            print(saver)
            print("load_process_DEBUG")

        while True:

            t = trange(0,
                       len(train_data) - args.batch_size + 1,
                       args.batch_size,
                       desc='Iterations')
            total_loss = 0

            for batch_idx in t:

                # y_pred = unet.create_unet(x_train, train=False)
                if iteration % args.iteration == 0:
                    for benchmark in benchmarks:
                        benchmark.evaluate(sess, y_pred, log_path, iteration)

                    saver.save(sess,
                               os.path.join(log_path, 'weights'),
                               global_step=iteration,
                               write_meta_graph=True)

                batch_train = train_data[batch_idx:batch_idx + args.batch_size]
                batch_label = train_label[batch_idx:batch_idx +
                                          args.batch_size]

                batch_train = batch_bgr2rgb(batch_train)
                batch_label = batch_bgr2gray(batch_label)
                # for i in range(batch_train.shape[0]):
                #     cv2.imshow('__DEBUG__', batch_train[i])
                #     cv2.imshow('__DEBUG__01', batch_label[i])
                #     cv2.waitKey(0)

                # batch_train = np.expand_dims(batch_train, axis=-1)
                batch_train = np.multiply(batch_train, 1.0 / 255.0)

                batch_label = np.expand_dims(batch_label, axis=-1)
                batch_label = np.multiply(batch_label, 1.0 / 255.0)
                # print('__DEBUG__', batch_label.shape)

                feed_dict_tr = {x_train: batch_train, y_true: batch_label}

                y_arr = sess.run(y_pred, feed_dict=feed_dict_tr)
                sess.run(optimizer, feed_dict=feed_dict_tr)
                loss = sess.run(y_loss, feed_dict=feed_dict_tr)

                total_loss += loss

                cont = str(np.max(y_arr) / 100) + ": " + str(
                    total_loss / n_iteration_per_epoch)

                t.set_description("%s" % loss)

                # _, err = sess.run([optimizer, y_loss],\
                # feed_dict={x:batch_train,y_true:batch_label})

                iteration += 1
Exemple #29
0
def train():
    global_step = tf.Variable(
        0, trainable=False
    )  # 代表总共训练了多少批,每批训练64个样本(即64张图),每训练一批就是一个迭代,故也可表示位总共迭代了多少次了
    learning_rate = tf.train.exponential_decay(
        common.INITIAL_LEARNING_RATE,
        global_step,
        common.DECAY_STEPS,
        common.LEARNING_RATE_DECAY_FACTOR,
        staircase=True)  # 计算训练的学习率
    logits, inputs, targets, seq_len, W, b = model.get_train_model()
    with tf.name_scope('loss'):
        loss = tf.nn.ctc_loss(targets, logits, seq_len)
        cost = tf.reduce_mean(loss)  # 计算识别的损失率,即误差
        tf.scalar_summary('loss', cost)  # 可视化损失率变化
    with tf.name_scope('train'):
        optimizer = tf.train.MomentumOptimizer(
            learning_rate=learning_rate,
            momentum=common.MOMENTUM).minimize(cost, global_step=global_step)

        # Option 2: tf.contrib.ctc.ctc_beam_search_decoder
        # (it's slower but you'll get better results)
        decoded, log_prob = tf.nn.ctc_beam_search_decoder(logits,
                                                          seq_len,
                                                          merge_repeated=False)

        # Accuracy: label error rate
        acc = tf.reduce_mean(
            tf.edit_distance(tf.cast(decoded[0], tf.int32),
                             targets))  # 计算识别的准确率,即精度
        tf.scalar_summary('accuracy', acc)  # 可视化准确率变化

    # Initializate the weights and biases
    init = tf.global_variables_initializer()

    def do_report():
        test_feed = {
            inputs: test_inputs,
            targets: test_targets,
            seq_len: test_seq_len
        }
        dd, log_probs, accuracy = session.run([decoded[0], log_prob, acc],
                                              test_feed)  # 这行代码的意思:取出三个变量的值
        report_accuracy(dd, test_targets)
        # decoded_list = decode_sparse_tensor(dd)

    def do_batch():
        # 每训练一批数据(即每迭代一次,也即每训练64个图片)系统会更新一下神经网络模型中各层的weights和biases
        # 每批训练64张图组成的序列
        feed = {
            inputs: train_inputs,
            targets: train_targets,
            seq_len: train_seq_len
        }
        b_cost, steps, _ = session.run([cost, global_step, optimizer], feed)
        if steps % 50 == 0:
            result = session.run(merged, feed_dict=feed)  # merged也是需要run的
            writer.add_summary(result,
                               steps)  # result是summary类型的,需要放入writer中,i步数(x轴)
        if steps > 0 and steps % common.REPORT_STEPS == 0:  # 每训练1000批数据(即迭代1000次,即训练10次整个数据集)存一次模型
            do_report()  # 每训练10次整个数据集用测试图片数据计算一次识别出字符个数的准确率
            save_path = saver.save(session,
                                   "models/ocr.model",
                                   global_step=steps)
            # print(save_path)
        return b_cost, steps  # 返回当前批次的损失率batch_cost和当前批次的编号

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.get_default_graph()._kernel_label_map({"CTCLoss": "WarpCTC"}):
        with tf.Session(config=config) as session:
            merged = tf.summary.merge_all()
            writer = tf.summary.FileWriter("logs/", session.graph)
            session.run(init)
            saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)
            for curr_epoch in range(
                    num_epochs):  # 对完整数据集(6400张图,即6400个样本)训练10000次
                curr_epoch_start = time.time()  # 当前Epoch训练开始的时间
                # variables = tf.all_variables()
                # for i in variables:
                #     print(i.name)

                print("Epoch(第几个完整数据集的训练).......",
                      curr_epoch)  # 当前是第几次对完整数据集进行训练
                train_cost = train_ler = 0
                for batch in range(
                        common.BATCHES
                ):  # BATCH_SIZE = 64 每批训练64个样本(即64张图),那么训练完一次整个数据集(一个Epoch)需要迭代6400/64=100次,即100个批次;迭代次数就是把整个数据集训练一遍需要几批
                    get_data_start = time.time()  # 当前批次获取数据开始的时间
                    train_inputs, train_targets, train_seq_len = utils.get_data_set(
                        'train', batch * common.BATCH_SIZE,
                        (batch + 1) * common.BATCH_SIZE)  # 每批取出64个样本即64张图进行训练

                    get_data_time = time.time(
                    ) - get_data_start  # 当前批次获取数据花费的时间
                    start = time.time()  # 当前批次训练开始的时间
                    c, steps = do_batch(
                    )  # 每训练一批(或者叫每迭代一次,也可叫每训练64张图)会更新一下神经网络模型各层的weights和biases
                    train_cost += c * common.BATCH_SIZE  # 累加每批中所有样本的损失率(也即当前批次64张图乘以当批的平均损失率c)计算当前Epoch(一次整个数据的训练)总的损失率
                    seconds = time.time() - start  # 当前批次训练花费的时间
                    print("Step(在10000个Epoch中的批次编号):", steps,
                          ", batch seconds(当前批次训练花费的时间):", seconds,
                          ", batch get data seconds(当前批次获取数据花费的时间):",
                          get_data_time, ", batch cost(当前批次的损失率):", c)

                train_cost /= common.TRAIN_SIZE  # 计算当前Epoch(即整个数据集的样本数,也即6400个样本,再即6400张图)的每个样本(也即每张图)的损失率
                # train_ler /= common.TRAIN_SIZE
                val_feed = {
                    inputs: train_inputs,
                    targets: train_targets,
                    seq_len: train_seq_len
                }  # 用当前Epoch的最后一批样本数据来取

                # 总共对整个数据集训练10000遍,每遍训练100批,每批训练64个样本,每个样本是一张图片
                # val_cost指计算cost操作的返回值,是当前的误差率;
                # val_ler指计算acc操作的返回值,是当前的准确率;
                # lr指计算learning_rate操作的返回值,是当前的学习率;
                # steps指计算global_step操作的返回值,是已经训练的总批数;
                val_cost, val_ler, lr, steps = session.run(
                    [cost, acc, learning_rate, global_step],
                    feed_dict=val_feed)

                log = "Epoch(对整个数据集进行的第几次训练){}/{}, (第几批训练)steps = {}, (当前Epoch中平均每张图的损失率)train_cost = {:.3f}, (当前Epoch中平均每张图的精确度)train_ler = {:.3f}, (当前的损失率)val_cost = {:.3f}, (当前的精确度)val_ler = {:.3f}, (当前Epoch花费的时间)time = {:.3f}s, (当前的学习率)learning_rate = {}"
                print(
                    log.format(curr_epoch + 1, num_epochs, steps, train_cost,
                               train_ler, val_cost, val_ler,
                               time.time() - curr_epoch_start, lr))
            writer.close()  # 10000个Epoch训练完时关闭summary的FileWriter
Exemple #30
0
def supervised_training():
	train_x, train_y = get_data_set("train")
	with open('..\\data\\svtrain.p', 'rb') as fp:
		idx = pickle.load(fp)
	train_x = train_x[idx, :]
	train_y = train_y[idx, :]
	num_train = train_x.shape[0]

	train_data = tf.data.Dataset.from_tensor_slices((train_x, train_y))
	train_data = train_data.shuffle(num_train)
	train_data = train_data.map(train_parse, num_parallel_calls=8)
	train_data = train_data.batch(batch_size)
	train_iter = train_data.make_initializable_iterator()
	x_train, y_train = train_iter.get_next()

	test_x, test_y = get_data_set("test")
	num_test = test_x.shape[0]

	test_data = tf.data.Dataset.from_tensor_slices((test_x, test_y))
	test_data = test_data.map(test_parse, num_parallel_calls=8)
	test_data = test_data.batch(1)
	test_iter = test_data.make_initializable_iterator()
	x_test, y_test = test_iter.get_next()

	X = tf.placeholder(tf.float32, [None, height, width, 3], name='Input')
	Y = tf.placeholder(tf.int32, [None, num_classes], name='Label')
	drop_rate = tf.placeholder(tf.float32)
	
	logits = TESnet(X, "TESnet", drop_rate, reuse=False)
	pred = tf.nn.softmax(logits)

	# Learning Rate
	with tf.variable_scope('learning_rate'):
		lr_v = tf.Variable(lr_init, trainable=False)

	# Loss Function
	with tf.name_scope("Cross_Entropy_Loss"):
		loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
	sum_loss_op = tf.summary.scalar("Cross_Entropy_Loss", loss_op)

	# Optimizer
	optimizer = tf.train.AdamOptimizer(lr_v)
	gvs = optimizer.compute_gradients(loss_op)
	capped_gvs = [(tf.clip_by_value(grad,-1.0, 1.0), var) for grad, var in gvs]
	train_op = optimizer.apply_gradients(capped_gvs)

	saver = tf.train.Saver()

	# Evaluate Model
	correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(Y,1))
	accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
	sum_acc_op = tf.summary.scalar("Accuracy", accuracy)

	num_batches = int(math.ceil(num_train/batch_size))

	with tf.Session() as sess:
		log = "\n========== Supervised Training Begin ==========\n"
		write_logs(logs_sv, log, True)
		train_start = time.time()

		# Initialize variables
		sess.run(tf.global_variables_initializer())
		
		# Op to write logs to Tensorboard
		train_sum_writer = tf.summary.FileWriter(logs_dir, tf.get_default_graph())

		for epoch in range(num_epoches):
			epoch_start = time.time()

			if (epoch == 70):
				new_lr = lr_v * lr_decay
				sess.run(tf.assign(lr_v, new_lr))
				log = "** New learning rate: %1.9f **\n" % (lr_v.eval())
				write_logs(logs_sv, log, False)
			elif epoch == 0:
				sess.run(tf.assign(lr_v, lr_init))
				log = "** Initial learning rate: %1.9f **\n" % (lr_init)
				write_logs(logs_sv, log, False)

			avg_loss = 0
			avg_acc = 0

			sess.run(train_iter.initializer)
			for batch in range(num_batches):
				batch_start = time.time()

				bx, by = sess.run([x_train, y_train])
				sess.run([train_op], feed_dict={X:bx, Y:by, drop_rate:dropout})
				loss, acc, sum_loss, sum_acc = sess.run([loss_op, accuracy, sum_loss_op, sum_acc_op], feed_dict={X:bx, Y:by, drop_rate:0.0})

				avg_loss += loss
				avg_acc += acc

				train_sum_writer.add_summary(sum_loss, epoch*num_batches+batch)
				train_sum_writer.add_summary(sum_acc, epoch*num_batches+batch)

				log = "Time {:2.5f}, Epoch {}, Batch {}, Loss = {:2.5f}, Training Accuracy = {:0.4f}".format(time.time()-batch_start, epoch, batch, loss, acc)
				write_logs(logs_sv, log, False)

			log = "\nTime {:2.5f}, Epoch {}, Average Loss = {:2.5f}, Training Average Accuracy = {:0.4f}\n"\
				.format(time.time()-epoch_start, epoch, avg_loss/num_batches, avg_acc/num_batches)
			write_logs(logs_sv, log, False)

		log = "\nSupervised Training Time: {:2.5f}".format(time.time()-train_start)
		write_logs(logs_sv, log, False)
		log = "\n========== Supervised Training End ==========\n"
		write_logs(logs_sv, log, False)

		# Save model
		save_path = saver.save(sess, sv_model_dir)
		log = "Model is saved in file: %s" % save_path
		write_logs(logs_sv, log, False)
		
		log = "\n========== Supervised Testing Begin ==========\n"
		write_logs(logs_sv, log, False)
		test_start = time.time()
		avg_acc = 0
		sess.run(test_iter.initializer)
		for i in range(num_test):
			batch_start = time.time()

			bx, by = sess.run([x_test, y_test])
			acc = sess.run(accuracy, feed_dict={X:bx, Y:by, drop_rate:0.0})
			avg_acc += acc

			log = "Time {:2.5f}, Image {:05d}, Testing Accuracy = {:0.4f}".format(time.time()-batch_start, i+1, acc)
			write_logs(logs_sv, log, False)

		log = "\nTesting Accuracy = {:0.4f}\n".format(avg_acc/num_test)
		write_logs(logs_sv, log, False)
		log = "\nSupervised Testing Time: {:2.5f}".format(time.time()-test_start)
		write_logs(logs_sv, log, False)

		sess.close()
Exemple #31
0
from __future__ import division
from __future__ import print_function

import time
import tensorflow as tf
import common
import utils
import pdb
from utils import decode_sparse_tensor
from tensorflow.python import debug as tfdbg

num_classes = len(common.CHARSET) + 1 #781
num_epochs = 2000
num_hidden = 200
num_layers = 1
train_inputs, train_targets, train_seq_len = utils.get_data_set('train')
val_inputs, val_targets, val_seq_len = utils.get_data_set('val')
print("Data loaded....")

# graph = tf.Graph()
def report_accuracy(decoded_list, train_targets):
    original_list = decode_sparse_tensor(train_targets)#list,length:100,['6', '0', '6', '/','2', '血', '1', '9', ' ', '1', '2', ':', '2', '2', '~']
    detected_list = decode_sparse_tensor(decoded_list)#list,lenth:100,预测序列['0', '2', '0', '2', '0', '2', '0', '2', '2', '2', '0', '2', '0', '2', '0', '2', '0', '2', '2', '2', '2', '2', '0', '2', '2', '0', '2']
    true_numer = 0
    # print(detected_list)
    if len(original_list) != len(detected_list):
        print("len(original_list)", len(original_list), "len(detected_list)", len(detected_list),
              " test and detect length desn't match")
        return
    print("T/F: original(length) <-------> detectcted(length)")
    for idx, number in enumerate(original_list):
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--load',
                        type=str,
                        help='Checkpoint to load all weights from.')
    parser.add_argument('--load-gen',
                        type=str,
                        help='Checkpoint to load generator weights only from.')
    parser.add_argument('--name', type=str, help='Name of experiment.')
    parser.add_argument('--overfit',
                        action='store_true',
                        help='Overfit to a single image.')
    parser.add_argument('--batch-size',
                        type=int,
                        default=16,
                        help='Mini-batch size.')
    parser.add_argument(
        '--log-freq',
        type=int,
        default=10000,
        help='How many training iterations between validation/checkpoints.')
    parser.add_argument('--learning-rate',
                        type=float,
                        default=1e-4,
                        help='Learning rate for Adam.')
    parser.add_argument('--content-loss',
                        type=str,
                        default='mse',
                        choices=['mse', 'L1', 'edge_loss_mse', 'edge_loss_L1'],
                        help='Metric to use for content loss.')
    parser.add_argument(
        '--use-gan',
        action='store_true',
        help='Add adversarial loss term to generator and trains discriminator.'
    )
    parser.add_argument('--image-size',
                        type=int,
                        default=96,
                        help='Size of random crops used for training samples.')
    parser.add_argument('--vgg-weights',
                        type=str,
                        default='vgg_19.ckpt',
                        help='File containing VGG19 weights (tf.slim)')
    parser.add_argument('--train-dir',
                        type=str,
                        help='Directory containing training images')
    parser.add_argument(
        '--validate-benchmarks',
        action='store_true',
        help=
        'If set, validates that the benchmarking metrics are correct for the images provided by the authors of the SRGAN paper.'
    )
    parser.add_argument('--gpu',
                        type=str,
                        default='0',
                        help='Which GPU to use')
    parser.add_argument('--epoch',
                        type=int,
                        default='1000000',
                        help='How many iterations ')
    parser.add_argument('--is-val',
                        action='store_true',
                        help='How many iterations ')

    args = parser.parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    srresnet_training = tf.placeholder(tf.bool, name='srresnet_training')

    srresnet_model = srresnet.Srresnet(training=srresnet_training,\
                              learning_rate=args.learning_rate,\
                              content_loss=args.content_loss)

    hr_y = tf.placeholder(tf.float32, [None, None, None, 3], name='HR_image')
    lr_x = tf.placeholder(tf.float32, [None, None, None, 3], name='LR_image')

    sr_pred = srresnet_model.forward(lr_x)
    sr_loss = srresnet_model.loss_function(hr_y, sr_pred)
    sr_opt = srresnet_model.optimize(sr_loss)

    benchmarks = [
        Benchmark('Benchmarks/Set5', name='Set5'),
        Benchmark('Benchmarks/Set14', name='Set14'),
        Benchmark('Benchmarks/BSD100', name='BSD100')
    ]

    if args.validate_benchmarks:
        for benchmark in benchmarks:
            benchmark.validate()

    # Create log folder
    if args.load and not args.name:
        log_path = os.path.dirname(args.load)
    else:
        log_path = build_log_dir(args, sys.argv)

    train_data_path = 'done_dataset\PreprocessedData.h5'
    val_data_path = 'done_dataset\PreprocessedData_val.h5'
    eval_data_path = 'done_dataset\PreprocessedData_eval.h5'

    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())
        iteration = 0
        epoch = 0

        saver = tf.train.Saver()

        # Load all
        if args.load:
            iteration = int(args.load.split('-')[-1])
            saver.restore(sess, args.load)
            print(saver)
            print("load_process_DEBUG")

        train_data_set = get_data_set(train_data_path, 'train')
        val_data_set = get_data_set(val_data_path, 'val')
        eval_data_set = get_data_set(eval_data_path, 'eval')

        val_error_li = []
        eval_error_li = []
        fig = plt.figure()

        if args.is_val:
            for benchmark in benchmarks:
                psnr, ssim, _, _ = benchmark.eval(sess, g_y_pred, log_path,
                                                  iteration)
                print(' [%s] PSNR: %.2f, SSIM: %.4f' %
                      (benchmark.name, psnr, ssim),
                      end='')

        else:
            while True:
                t = trange(0,
                           len(train_data_set) - args.batch_size + 1,
                           args.batch_size,
                           desc='Iterations')
                #One epoch
                for batch_idx in t:
                    t.set_description("Training... [Iterations: %s]" %
                                      iteration)

                    #Each 10000 times evaluate model
                    if iteration % args.log_freq == 0:
                        #Loop over eval dataset
                        for batch_idx in range(
                                0,
                                len(val_data_set) - args.batch_size + 1,
                                args.batch_size):
                            # Test every log-freq iterations
                            val_error = evaluate_model(
                                sr_loss,
                                val_data_set[batch_idx:batch_idx + 16], sess,
                                119, args.batch_size)
                            eval_error = evaluate_model(
                                sr_loss,
                                eval_data_set[batch_idx:batch_idx + 16], sess,
                                119, args.batch_size)
                        val_error_li.append(val_error)
                        eval_error_li.append(eval_error)

                        # Log error
                        plt.plot(val_error_li)
                        plt.savefig('val_error.png')
                        plt.plot(eval_error_li)
                        plt.savefig('eval_error.png')
                        # fig.savefig()

                        print('[%d] Test: %.7f, Train: %.7f' %
                              (iteration, val_error, eval_error),
                              end='')
                        # Evaluate benchmarks
                        log_line = ''
                        for benchmark in benchmarks:
                            psnr, ssim, _, _ = benchmark.evaluate(
                                sess, sr_pred, log_path, iteration)
                            print(' [%s] PSNR: %.2f, SSIM: %.4f' %
                                  (benchmark.name, psnr, ssim),
                                  end='')
                            log_line += ',%.7f, %.7f' % (psnr, ssim)
                        print()
                        # Write to log
                        with open(log_path + '/loss.csv', 'a') as f:
                            f.write(
                                '%d, %.15f, %.15f%s\n' %
                                (iteration, val_error, eval_error, log_line))
                        # Save checkpoint
                        saver.save(sess,
                                   os.path.join(log_path, 'weights'),
                                   global_step=iteration,
                                   write_meta_graph=False)

                    # Train Srresnet
                    batch_hr = train_data_set[batch_idx:batch_idx + 16]
                    batch_lr = downsample_batch(batch_hr, factor=4)
                    batch_lr, batch_hr = preprocess(batch_lr, batch_hr)
                    _, err = sess.run([sr_opt,sr_loss],\
                         feed_dict={srresnet_training: True, lr_x: batch_lr, hr_y: batch_hr})

                    #print('__training__ %s' % iteration)
                    iteration += 1
                print('__epoch__: %s' % epoch)
                epoch += 1