Esempio n. 1
0
def invoke(localdir, targetdir, args):
	if args.pretrain:
        	pretrain(args)
	if args.train:
		fixup(localdir, targetdir, args)	
		train(args)
	if args.pretest:
		pretest(args)
	if args.test:
		test(args)
	if args.eval:
		evaluation(args)
Esempio n. 2
0
def main(args):
    device = torch.device(
        "cuda:0" if torch.cuda.is_available() and args.cuda else "cpu")
    results = []
    columns = []

    for num_quadrant_inputs in args.num_quadrant_inputs:
        # adds an s in case of plural quadrants
        maybes = "s" if num_quadrant_inputs > 1 else ""

        print("Training with {} quadrant{} as input...".format(
            num_quadrant_inputs, maybes))

        # Dataset
        datasets, dataloaders, dataset_sizes = get_data(
            num_quadrant_inputs=num_quadrant_inputs, batch_size=128)

        # Train baseline
        baseline_net = baseline.train(
            device=device,
            dataloaders=dataloaders,
            dataset_sizes=dataset_sizes,
            learning_rate=args.learning_rate,
            num_epochs=args.num_epochs,
            early_stop_patience=args.early_stop_patience,
            model_path="baseline_net_q{}.pth".format(num_quadrant_inputs),
        )

        # Train CVAE
        cvae_net = cvae.train(
            device=device,
            dataloaders=dataloaders,
            dataset_sizes=dataset_sizes,
            learning_rate=args.learning_rate,
            num_epochs=args.num_epochs,
            early_stop_patience=args.early_stop_patience,
            model_path="cvae_net_q{}.pth".format(num_quadrant_inputs),
            pre_trained_baseline_net=baseline_net,
        )

        # Visualize conditional predictions
        visualize(
            device=device,
            num_quadrant_inputs=num_quadrant_inputs,
            pre_trained_baseline=baseline_net,
            pre_trained_cvae=cvae_net,
            num_images=args.num_images,
            num_samples=args.num_samples,
            image_path="cvae_plot_q{}.png".format(num_quadrant_inputs),
        )

        # Retrieve conditional log likelihood
        df = generate_table(
            device=device,
            num_quadrant_inputs=num_quadrant_inputs,
            pre_trained_baseline=baseline_net,
            pre_trained_cvae=cvae_net,
            num_particles=args.num_particles,
            col_name="{} quadrant{}".format(num_quadrant_inputs, maybes),
        )
        results.append(df)
        columns.append("{} quadrant{}".format(num_quadrant_inputs, maybes))

    results = pd.concat(results, axis=1, ignore_index=True)
    results.columns = columns
    results.loc["Performance gap", :] = results.iloc[0, :] - results.iloc[1, :]
    results.to_csv("results.csv")
Esempio n. 3
0
def unpickle(file):
    import _pickle as cPickle
    fo = open(file, 'rb')
    dict = cPickle.load(fo, encoding='latin1')
    fo.close()
    return dict


train_d = unpickle('./cifar-100-python/train')

X = np.reshape(train_d['data'], (50000, 3, 32, 32)).transpose(0, 2, 3, 1)
Y = train_d['fine_labels']

X_train = X[:40000]
Y_train = Y[:40000]
X_validation = X[40000:]
Y_validation = Y[40000:]

Y_train_onehot = []

enc = OneHotEncoder(sparse=False)
# one_hot = [0 for i in range(100)]
Y_train = enc.fit_transform(np.array(Y_train).reshape(-1, 1))
Y_validation = enc.fit_transform(np.array(Y_validation).reshape(-1, 1))

# print Y_train.shape
baseline.train(X_train, Y_train, X_validation, Y_validation)
# import collections
# counter=collections.Counter(Y_train)
# print(counter)
Esempio n. 4
0
def classification_baseline():
    import baseline
    enc = OneHotEncoder(sparse=False)
    Y_train = enc.fit_transform(np.array(Y_train).reshape(-1, 1))
    Y_validation = enc.fit_transform(np.array(Y_validation).reshape(-1, 1))
    baseline.train(X_train, Y_train, X_validation, Y_validation)
Esempio n. 5
0
from datetime import datetime

def load_training_data():
	with open(commons.AUDIO_BASELINE_TRAIN_X, 'rb') as f:
		train_x = pickle.load(f)
	with open(commons.AUDIO_BASELINE_TRAIN_Y, 'rb') as f:
		train_y = pickle.load(f)
	with open(commons.AUDIO_BASELINE_TRAIN_WHO, 'r') as f:
		train_who = []
		for line in f.readlines():
			train_who.append(line.strip())	

	return (train_x, train_y, train_who)

def load_test_data():
	with open(commons.AUDIO_BASELINE_TEST_X, 'rb') as f:
		test_x = pickle.load(f)
	with open(commons.AUDIO_BASELINE_TEST_Y, 'rb') as f:
		test_y = pickle.load(f)
	with open(commons.AUDIO_BASELINE_TEST_WHO, 'r') as f:
		test_who = []
		for line in f.readlines():
			test_who.append(line.strip())

	return (test_x, test_y, test_who)

if __name__ == '__main__':
	baseline.train(data=load_training_data(), model_path=commons.AUDIO_BASELINE_MODEL)
	baseline.test(data=load_test_data(), model_path=commons.AUDIO_BASELINE_MODEL)
Esempio n. 6
0
                train_x.append(x)
                train_y.append(y)
            elif imdb_id in test_ids:
                test_x.append(x)
                test_y.append(y)

    print("\rDone. ", datetime.now())

    with open(FUSION_TRAIN_X, 'wb') as f:
        pickle.dump(train_x, f)
    with open(FUSION_TRAIN_Y, 'wb') as f:
        pickle.dump(train_y, f)
    with open(FUSION_TRAIN_WHO, 'w') as f:
        for who in train_who:
            f.write("{}\n".format(who))
    with open(FUSION_TEST_X, 'wb') as f:
        pickle.dump(test_x, f)
    with open(FUSION_TEST_Y, 'wb') as f:
        pickle.dump(test_y, f)
    with open(FUSION_TEST_WHO, 'w') as f:
        for who in test_who:
            f.write("{}\n".format(who))

    return (train_x, train_y, train_who), (test_x, test_y, test_who)


if __name__ == '__main__':
    train_data, test_data = generate_fusion_data()
    baseline.train(data=train_data, model_path=FUSION_MODEL)
    baseline.test(data=test_data, model_path=FUSION_MODEL)
Esempio n. 7
0
def load_training_data():
    generate_scene_data()
    with open(SCENE_DIR + '/' + TRAIN_X, 'rb') as f:
        train_x = pickle.load(f)
    with open(SCENE_DIR + '/' + TRAIN_Y, 'rb') as f:
        train_y = pickle.load(f)
    with open(SCENE_DIR + '/' + TRAIN_WHO, 'r') as f:
        train_who = []
        for line in f.readlines():
            train_who.append(line.strip())
    return (train_x, train_y, train_who)


def load_test_data():
    generate_scene_data()
    with open(SCENE_DIR + '/' + TEST_X, 'rb') as f:
        test_x = pickle.load(f)
    with open(SCENE_DIR + '/' + TEST_Y, 'rb') as f:
        test_y = pickle.load(f)
    with open(SCENE_DIR + '/' + TEST_WHO, 'r') as f:
        test_who = []
        for line in f.readlines():
            test_who.append(line.strip())
    return (test_x, test_y, test_who)


if __name__ == '__main__':
    baseline.train(data=load_training_data(), model_path=SCENE_MODEL)
    baseline.test(data=load_test_data(), model_path=SCENE_MODEL)
Esempio n. 8
0
        pickle.dump(test_y, f)
    with open(VGGFACE_DIR + '/' + TEST_WHO, 'w') as f:
        for who in test_who:
            f.write("{}\n".format(who))


def load_training_data():
    generate_vggface_data()
    with open(VGGFACE_DIR + '/' + TRAIN_X, 'rb') as f:
        train_x = pickle.load(f)
    with open(VGGFACE_DIR + '/' + TRAIN_Y, 'rb') as f:
        train_y = pickle.load(f)
    return (train_x, train_y)


def load_test_data():
    generate_vggface_data()
    with open(VGGFACE_DIR + '/' + TEST_X, 'rb') as f:
        test_x = pickle.load(f)
    with open(VGGFACE_DIR + '/' + TEST_Y, 'rb') as f:
        test_y = pickle.load(f)
    with open(VGGFACE_DIR + '/' + TEST_WHO, 'r') as f:
        test_who = []
        for line in f.readlines():
            test_who.append(line.strip())
    return (test_x, test_y, test_who)


if __name__ == '__main__':
    baseline.train(data=load_training_data(), model_path=VGGFACE_MODEL)
    baseline.test(data=load_test_data(), model_path=VGGFACE_MODEL)