Exemple #1
0
def test():
    log_dir = './logdir'
    snapshot_interval = 10000
    snapshot_dir = './snapshot_dir'
    max_iter = 100000
    log_interval = 100

    lr = 0.0005

    file = 'data/test_32x32.mat'
    X_raw, y_raw = getData(filename=file)
    n_test = X_raw.shape[0]
    y_raw[y_raw == 10] = 0
    y_raw = np.reshape(y_raw, (n_test, ))

    snapshot_file = './snapshot_dir/%s' % (snapshot_name)

    with tf.Session() as sess:
        X = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
        y = tf.placeholder(tf.int32, shape=(None, ))
        rcnn = RCNN(time=3, K=192, p=0.9, numclass=10, is_training=True)
        _, _, _, preds = rcnn.buile_model(X, y)

        snapshot_saver = tf.train.Saver(max_to_keep=None)  # keep all snapshots
        snapshot_saver.restore(sess, snapshot_file)

        np.random.seed(0)
        count = 0
        start = datetime.datetime.now()
        for i in range(n_test):
            image = X_raw[i]
            labels = y_raw[i]
            # print(image.shape)
            preds_each = sess.run(preds, feed_dict={X: image})
            if preds_each == labels:
                count += 1
            else:
                continue
    acc = count / (n_test * 1.0)
    end = datetime.datetime.now()
    print("sum time: {}, acc: {}".format(end - start, acc))
				print('Saved model checkpoint with val_acc: {}'.format(str(round(current_epoch_acc, 4))))

	time_elapsed = time.time() - since
	print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))

	return model


''' ############################ Parameters ############################'''
lr = 0.001
batch_size = 32
# root_data_dir should contain 3 sub-folders: 'train' , 'validation' and  'test
root_data_dir = '/Users/royhirsch/Documents/GitHub/ProcessedData'

''' ############################    Main    ############################'''
net = RCNN()
# weight_tensor = torch.tensor([1, 100]).float()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)

# Decay LR by a factor of 0.1 * lr every n epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.0001)

# Create train and validation data loaders
trainDataLoader = BusDataLoader(root_dir=os.path.join(root_data_dir, 'train'),
                                data_loader_type='train',
                                BGpad=16,
                                outShape=224,
                                balance_data_size=1,
                                augment_pos=0)
Exemple #3
0
def main():
    log_dir = './logdir'
    snapshot_interval = 10000
    snapshot_dir = './snapshot_dir'
    max_iter = 100000
    log_interval = 100

    lr = 0.0005

    file = 'data/train_32x32.mat'
    X_raw, y_raw = getData(filename=file)
    n_train = X_raw.shape[0]
    y_raw[y_raw == 10] = 0
    y_raw = np.reshape(y_raw, (n_train, ))

    with tf.Session() as sess:
        X = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
        y = tf.placeholder(tf.int32, shape=(None, ))
        rcnn = RCNN(time=3, K=192, p=0.9, numclass=10, is_training=True)
        loss, summary_op, acc, _ = rcnn.buile_model(X, y)
        optimizer = tf.train.AdamOptimizer(learning_rate=lr,
                                           beta1=0.9,
                                           beta2=0.98,
                                           epsilon=1e-8).minimize(loss)
        init = tf.global_variables_initializer()
        sess.run(init)

        os.makedirs(snapshot_dir, exist_ok=True)
        snapshot_saver = tf.train.Saver(max_to_keep=None)  # keep all snapshots

        writer = tf.summary.FileWriter(log_dir, sess.graph)
        np.random.seed(0)
        loss_mean = 0
        acc_mean = 0
        start = datetime.datetime.now()
        for n_iter in range(max_iter):
            index = np.random.choice(n_train, 64, replace=True)
            image = X_raw[index]
            labels = y_raw[index]
            # print(image.shape)
            loss_batch, summary_op_batch, acc_batch, _ = sess.run(
                [loss, summary_op, acc, optimizer],
                feed_dict={
                    X: image,
                    y: labels
                })
            loss_mean += loss_batch
            acc_mean += acc_batch
            if (n_iter + 1) % log_interval == 0 or (n_iter + 1) == max_iter:
                loss_mean = loss_mean / (log_interval * 1.0)
                acc_mean = acc_mean / (log_interval * 1.0)
                batch_time = datetime.datetime.now()
                print("time: {},iter = {}\n\tloss = {}, accuracy (cur) = {} ".
                      format(batch_time - start, n_iter + 1, loss_mean,
                             acc_mean))
                loss_mean = 0
                acc_mean = 0

            writer.add_summary(summary_op_batch, global_step=n_iter)

            if (n_iter + 1) % snapshot_interval == 0 or (n_iter +
                                                         1) == max_iter:
                snapshot_file = os.path.join(snapshot_dir,
                                             "%08d" % (n_iter + 1))
                snapshot_saver.save(sess,
                                    snapshot_file,
                                    write_meta_graph=False)
                print('snapshot saved to ' + snapshot_file)

    end = datetime.datetime.now()
    print("sum time: {}".format(end - start))
    writer.close()
Exemple #4
0
	time_elapsed = time.time() - since
	print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))

	return model


''' ############################ Parameters ############################'''
lr = 0.001

batch_size = 32
# root_data_dir should contain 3 sub-folders: 'train' , 'validation' and  'test
root_data_dir = '/Users/Mor\'s Yoga/Documents/GitHub/DetectionProject/ProcessedData'

''' ############################    Main    ############################'''
# net = create_model()
net = RCNN(num_regressions=4, train_phase='regressions')
# net.load_state_dict(torch.load(PATH_TO_STATE_DICT_PR_FILE))
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=1000)

# Decay LR by a factor of 0.1 * lr every n epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.0001)

# Create train and validation data loaders
trainDataLoader = BusDataLoader(root_dir=os.path.join(root_data_dir, 'train'),
                                data_loader_type='train',
                                BGpad=16,
                                outShape=224,
                                balance_data_size=1,
                                augment_pos=0)