def main(mode="test"): if (sys.argv[1] == "train"): module = net.Mynet() train_dataset = data.trainSet("traindata") train_loader = torch.utils.data.DataLoader(dataset=train_dataset) net.train(10, train_loader, module) if (sys.argv[1] == "test"): module = torch.load("module/my_model.pkl") test_dataset = data.trainSet("traindata") test_loader = torch.utils.data.DataLoader(dataset=test_dataset) net.test(test_loader, module) if (sys.argv[1] == "print"): module = torch.load("module/my_model.pkl") print(module.state_dict())
def main(_): pai_constant_init() utils.SAVE_MODEL = True while True: model_num = 0 save_num = 7 net.verificate(model_num) net.train(model_num, save_num) if utils.SAVE_MODEL: net.verificate(save_num, model_num) else: net.verificate(model_num) break
def bayes_func(repeat_time, learning_rate, batch_size): utils.TRAIN_EPOCH_REPEAT_NUM = int(repeat_time) utils.BASE_LEARNING_RATE = learning_rate utils.BATCH_SIZE = int(batch_size) utils.SAVE_MODEL = False model_num = 0 _net = net.Net(model_num) _net.load_model(model_num) _net.logger.info( 'repeat_time: {}, learning_rate: {}, batch_size: {}'.format( repeat_time, learning_rate, batch_size)) net.train(model_num, write_summary=False) acc, loss = net.verificate(model_num) return -loss
def train(msg): comm, ticket = msg.text.split() ticker = Ticker(ticket, asynchronous=True) df = ticker.history(period='5d', interval='1m') if df[ticket] == 'No data found, symbol may be delisted': bot.send_message(msg.chat.id, 'Не нашел такой :(') else: bot.send_message(msg.chat.id, 'Нужно немного подождать') net.train(ticket) bot.send_message( msg.chat.id, f'Модель по {ticket} обучена и будет доступна в течение часа.') price = net.magic(ticket) bot.send_message( msg.chat.id, f'Предполагаемая цена закрытия следующей свечи: {price}')
def upload_files(): if request.method == 'POST': uploaded_files = request.files.getlist('files[]') name = request.form['name'] path = os.path.join(app.config['UPLOAD_FOLDER'], name) if not os.path.exists(path): os.makedirs(path) for f in uploaded_files: f.save(os.path.join(path, f.filename)) des = 'tmp/%s' % util.get_uuid() src = 'tmp/%s' % util.get_uuid() dirpath = os.path.join(app.config['UPLOAD_FOLDER'], name) shutil.copytree(dirpath, os.path.join(src, name)) net.align_dataset(des, src, 0.25, True, 32, 160) shutil.copytree( os.path.join(des, name), os.path.join(app.config['UPLOAD_TRAIN_ALIGN_FOLDER'], name)) shutil.rmtree(src) shutil.rmtree(des) shutil.copytree( os.path.join(app.config['UPLOAD_TRAIN_ALIGN_FOLDER'], name), os.path.join('tmp', name)) res = net.train(False, 'TRAIN', 'tmp', 20, 10, '20170512-110547/20170512-110547.pb', 'classifiers/%s_classifier.pkl' % name, 1000, 160) shutil.rmtree(os.path.join('tmp', name)) img_list = [os.path.join(path, f.filename) for f in uploaded_files] img_align_path = os.path.join(app.config['UPLOAD_TRAIN_ALIGN_FOLDER'], name) img_align_list = [ os.path.join( os.path.join(img_align_path, f.filename.split('.')[0] + '.png')) for f in uploaded_files ] return render_template('index.html', img_align_list=img_align_list, name=name)
def test_neural_net(): # Parameter initialization wh, bh, w_out, b_out = initialize_parameters(x_tr[0].shape[0], hidden_sizes, out_size) # Train the network wh, bh, w_out, b_out = train(x_tr, y_tr, epochs, hidden_sizes, wh, bh, w_out, b_out, learning_rate, p, alpha, beta1, beta2, eps, lambda_, batch_size) # Save parameters for reuse with open('dump.p', 'wb') as dump_file: dump((wh, w_out, bh, b_out), dump_file) # Quick accuracy with open('dump.p', 'rb') as file: wh, wo, bh, bo = load(file) print('Test accuracy of network 1:', accuracy(x_te, y_te, wh, wo, bh, bo, alpha))
import numpy as np import matplotlib import matplotlib.pyplot as plt from utils import getData from net import Network,train matplotlib.use('Agg') #Flower iterations = 1000 X,y = getData(2) netF = Network(2,3) netF,loss = train(netF,X,y,iterations,100) preds = netF.predict(X) plt.imshow(preds.reshape(133,140,3)) plt.title('Flower') plt.savefig('Flower2.png') plt.clf() plt.plot(np.arange(iterations),loss) plt.ylabel('Loss') plt.xlabel('Iterations') plt.title('Flower Loss') plt.savefig('Flowerloss2.png') #Lincoln iterations = 1000
X_test = X[:, X.shape[1] * 4 // 5:] Y_test = y[:, X.shape[1] * 4 // 5:] num_epochs = 20000 #number of passes through the training set layers_units = [ X.shape[0], 10, y.shape[0] ] #layer 0 is the input layer - each value in list = number of nodes in that layer print("Layer 1:", X.shape[0]) learning_rate = 1e-1 #size of our step print("No. of training examples:", X_train.shape[1]) print("No. of test examples:", X_test.shape[1]) print() parameters, train_costs = net.train(X_train, Y_train, num_epochs, layers_units, learning_rate, .1) # evaluate_model(train_costs, parameters, X_train, Y_train, X_test, Y_test) xSize, ySize = 600, 450 screen = pygame.display.set_mode((xSize, ySize)) pygame.display.set_caption("Neural Network") pygame.init() mouseHold = False size = 16 scale = 10 content = np.zeros((size, size)) result = [0 for _ in range(classes)]
def main(argv=None): with tf.Graph().as_default(): print('Start.') start_time = time.time() begin_time = start_time print('Loading images.') data, label = loadDataLabel(DATADIR, shuffle=True) validation_size = len(label) // 20 validation_data = data[:validation_size, ...] validation_labels = label[:validation_size, ...] data = data[validation_size:, ...] label = label[validation_size:, ...] train_size = len(label) validation_size = len(validation_labels) print('Loaded %d images.' % (train_size + validation_size)) print('Train size: %d' % train_size) print('Valid size: %d' % validation_size) elapsed_time = time.time() - start_time print('Loading images with label elapsed %.1f s' % elapsed_time) print('Building net......') start_time = time.time() x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 9], name='data') y = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 3]) keep_prob = tf.placeholder(tf.float32, name='prob') x_valid = tf.placeholder(tf.float32, shape=[validation_size, 9]) y_valid = tf.placeholder(tf.float32, shape=[validation_size, 3]) # Train model. train_prediction = inference(x, keep_prob) train_prediction_valid = inference(x_valid, keep_prob, reuse=True) batch = tf.Variable(0, dtype=tf.float32) learning_rate = tf.train.exponential_decay( 0.1, # Base learning rate. batch * BATCH_SIZE, # Current index into the dataset. train_size * 100, # Decay step. 0.95, # Decay rate. staircase=True) tf.summary.scalar('learn', learning_rate) loss = total_loss(train_prediction, y) loss_valid = total_loss(train_prediction_valid, y_valid) loss_ce = cross_entropy_loss(train_prediction, y) loss_ce_valid = cross_entropy_loss(train_prediction_valid, y_valid) loss_l2 = l2_loss() tf.summary.scalar('loss', loss) tf.summary.scalar('loss_valid', loss_valid) trainer = train(loss, learning_rate, batch) elapsed_time = time.time() - start_time print('Building net elapsed %.1f s' % elapsed_time) start_time = time.time() best_validation_loss = 100000.0 saver = tf.train.Saver() with tf.Session() as sess: merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter('graph/train', sess.graph) # Inital the whole net. tf.global_variables_initializer().run() print('Initialized!') for step in range(int(NUM_EPOCHS * train_size) // BATCH_SIZE): offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE) batch_data = data[offset:offset + BATCH_SIZE, ...] batch_labels = label[offset:offset + BATCH_SIZE, ...] # Train net. feed_dict = { x: batch_data, y: batch_labels, keep_prob: KEEP_PROB } sess.run(trainer, feed_dict=feed_dict) # Valid net. if (step % VALID_GAP == 0): feed_dict = { x: batch_data, y: batch_labels, x_valid: validation_data, y_valid: validation_labels, keep_prob: 1.0 } summary, l, lr, l_valid, l_ce, l_ce_valid, l_l2 = sess.run( [ merged, loss, learning_rate, loss_valid, loss_ce, loss_ce_valid, loss_l2 ], feed_dict=feed_dict) train_writer.add_summary(summary, step) if (step * BATCH_SIZE > NUM_EPOCHS * train_size * 0.9) & ( l_valid < best_validation_loss): best_validation_loss = l_valid saver.save(sess, NETPATH) print('Saving net at step %d' % step) print('Learning rate: %f' % lr) print('Train Data total loss:%f' % l) print('Valid Data total loss:%f\n' % l_valid) sys.stdout.flush() if step % EVAL_FREQUENCY == 0: elapsed_time = time.time() - start_time start_time = time.time() print('Step %d (epoch %.2f), %.3f ms pre step' % (step, step * BATCH_SIZE / train_size, 1000 * elapsed_time / EVAL_FREQUENCY)) print('Learning rate: %f' % lr) print('L2 loss:%f' % l_l2) print('Train Data cross entropy loss:%f' % l_ce) print('Train Data total loss:%f' % l) print('Valid Data cross entropy loss:%f' % l_ce_valid) print('Valid Data total loss:%f\n' % l_valid) sys.stdout.flush() train_writer.close() elapsed_time = time.time() - begin_time print('Total time: %.1f s' % elapsed_time)
def main(): ''' This is the entrypoint of the whole application. From the CMD, the user can specify either 'train' or 'test' after 'python app.py' to choose the mode which they would like to execute. ''' # handle arguments from CMD argument_parser = argparse.ArgumentParser(description='Choose whether you want to train network or test network') argument_parser.add_argument('config', help='choose whether to train or test network') args = argument_parser.parse_args().config print('the inputted args is', args) if args is None: print('This configuration is unsupported. Please enter [train] or [test]') return # file paths for training data faces_file_path = 'face-images-with-marked-landmark-points/face_images.npz' csv_file_path = 'face-images-with-marked-landmark-points/facial_keypoints.csv' # helper classes image_reader = ImageReader(faces_file_path) csv_reader = CsvReader(csv_file_path) image_data = image_reader.get_array() image_data = image_data['face_images'] image_data = np.moveaxis(image_data, -1, 0) # Must map all the images into 3-channels image_data = map_images_to_3_channels(image_data) # Sample image # ----------------------------------------- # image = image_data[1] # image = convert_to_3_channels(image) # print(image.shape) # plt.imshow((image * 255).astype(np.uint8)) # plt.show() # ----------------------------------------- # this is a dataframe df = csv_reader.get_facial_features() # This gets all the coordinates from the dataframe coordinates_list = extract_coordinates_list_unzipped(df) coordinates_list = np.array(coordinates_list) coordinates_list = scale_coordinates(coordinates_list) # draw coordinates on image # validation to ensure data is clean validate_data(image_data, coordinates_list) # extracting and separating data as appropriate X_train, y_train, X_test, y_test = split(image_data, coordinates_list) if args == 'train': print('[train] configuration selected. Training.') # image_reader = ImageReader('training_data.npz') # data = image_reader.get_array() # X_train = data['images'] # y_train = data['coordinates_list'] loader = Loader() X_train = loader.load_from_filepath('images.npy') y_train = loader.load_from_filepath('coordinates_list.npy') print('images size', len(X_train)) print('coordinates size', len(y_train)) assert len(X_train) == len(y_train) train(get_custom_model(), X_train, y_train, X_test, y_test) elif args == 'test': print('[test] configuration selected. Testing.') model = get_trained_model('modelMKIII.h5') prediction = predict_images(model, X_test) assert len(X_test) == len(prediction) draw_images(X_test, prediction) else: print('This configuration is unsupported. Please enter [train] or [test]')
def compute_score(acc, min_thres, max_thres): if acc <= min_thres: base_score = 0.0 elif acc >= max_thres: base_score = 100.0 else: base_score = float(acc - min_thres) / (max_thres - min_thres) \ * 100 return base_score if __name__ == '__main__': TRAIN = True if TRAIN: train() cifar10_test = Cifar10(test=True, shuffle=False, one_hot=False) cifar10_test_images, cifar10_test_labels = cifar10_test._images, cifar10_test._labels start = timeit.default_timer() np.random.seed(0) predicted_cifar10_test_labels = test(cifar10_test_images) np.random.seed() stop = timeit.default_timer() run_time = stop - start correct_predict = (cifar10_test_labels.flatten() == predicted_cifar10_test_labels.flatten()).astype( np.int32).sum() incorrect_predict = len(cifar10_test_labels) - correct_predict accuracy = float(correct_predict) / len(cifar10_test_labels) print('Acc: {}. Testing took {}s.'.format(accuracy, stop - start))
def main2(): # open the file we have to fill results = 'results-1-indoors.txt' with open(results, 'w') as file: file.write("Net name " + " trial ind " + "gtg " + "svm " + "ann" + "\n") root = '.' current_dataset = 'indoors' out_dir = os.path.join(root, 'out', current_dataset) feature_dir = os.path.join(out_dir, 'feature_data') feature_test_dir = os.path.join(out_dir, 'feature_data_test') svm_labels_dir = os.path.join(out_dir, 'svm_labels') net_dir = os.path.join(out_dir, 'nets') nets_dir_test = os.path.join(out_dir, 'nets_test') gtg_labels_dir = os.path.join(out_dir, 'gtg_labels') only_labelled = os.path.join(out_dir, 'only_labelled') nr_classes = 256 nets_and_features = create_dict_nets_and_features() for pkl_name in os.listdir(feature_dir): with open(os.path.join(feature_dir, pkl_name), 'rb') as pkl: net_name, labels, features, fnames = pickle.load(pkl) W = gtg.sim_mat(features) nr_objects = features.shape[0] labelled, unlabelled = utils2.create_mapping2(labels, 0.02) ps = utils2.gen_init_rand_probability(labels, labelled, unlabelled, nr_classes) gtg_accuracy, Ps_new = utils2.get_accuracy(W, ps, labels, labelled, unlabelled, len(unlabelled)) gtg_labels = Ps_new.argmax(axis=1) nname, ind = pkl_name.split('_') names_folds = os.listdir('Datasets/indoors/train_' + str(ind[0])) names_folds.sort() gtg_label_file = os.path.join(gtg_labels_dir, nname + '.txt') utils2.gen_gtg_label_file(fnames, names_folds, gtg_labels, gtg_label_file) # generate the new dataset gen_gtg_dataset('indoors/train_' + str(ind[0]), gtg_label_file, ind[0]) stats = (.485, .456, .406, .229, .224, .225) del W dataset = 'Datasets/' + current_dataset dataset_train = os.path.join(dataset, 'train_labelled_' + ind[0]) dataset_test = os.path.join(dataset, 'test_' + ind[0]) max_epochs = 1 batch_size = 8 train_loader = prepare_loader_train(dataset_train, stats, batch_size) test_loader = prepare_loader_val(dataset_test, stats, batch_size) net, feature_size = create_net(nr_classes, nets_and_features, net_type=nname) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=1e-4) trained_net = train(net, nname, train_loader, test_loader, optimizer, criterion, max_epochs, net_dir, ind[0]) net.load_state_dict(torch.load(trained_net)) net_accuracy_gtg = evaluate(net, test_loader) print('Accuracy: ' + str(net_accuracy_gtg)) # do the same thing but with a linear SVM svm_linear_classifier = svm.LinearSVC() svm_linear_classifier.fit(features[labelled, :], labels[labelled]) labels_svm = svm_linear_classifier.predict(features[unlabelled]) labels_svm = labels_svm.astype(int) gtg_labels[unlabelled] = labels_svm svm_label_file = os.path.join(svm_labels_dir, nname + '.txt') utils2.gen_gtg_label_file(fnames, names_folds, gtg_labels, svm_label_file) gen_gtg_dataset('indoors/train_' + str(ind[0]), svm_label_file, ind[0], 'train_labelled_svm') dataset_train = os.path.join(dataset, 'train_labelled_svm_' + ind[0]) train_loader = prepare_loader_train(dataset_train, stats, batch_size) test_loader = prepare_loader_val(dataset_test, stats, batch_size) net, feature_size = create_net(nr_classes, nets_and_features, net_type=nname) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=1e-4) trained_net = train(net, nname, train_loader, test_loader, optimizer, criterion, max_epochs, net_dir, ind[0]) net.load_state_dict(torch.load(trained_net)) net_accuracy_svm = evaluate(net, test_loader) print('Accuracy: ' + str(net_accuracy_svm)) # bllah # now check the accuracy of the net trained only in the labelled set label_file = os.path.join(only_labelled, nname + '.txt') utils2.only_labelled_file(fnames, labelled, label_file) gen_labelled_dataset('indoors/train_' + str(ind[0]), label_file, ind[0]) dataset_train = os.path.join(dataset, 'train_only_labelled_' + ind[0]) train_loader = prepare_loader_train(dataset_train, stats, batch_size) test_loader = prepare_loader_val(dataset_test, stats, batch_size) net, feature_size = create_net(nr_classes, nets_and_features, net_type=nname) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=1e-4) trained_net = train(net, nname, train_loader, test_loader, optimizer, criterion, max_epochs, nets_dir_test, ind[0]) net.load_state_dict(torch.load(trained_net)) net_accuracy = evaluate(net, test_loader) # # finally, do gtg with the testing set # with open(os.path.join(feature_test_dir, pkl_name), 'rb') as pkl:k # net_name_test, labels_test, features_test, fnames_test = pickle.load(pkl) # # features_combined = np.vstack((features[labelled,:], features_test)) # labels_combined = np.vstack((labels[labelled], labels_test)) # W = gtg.sim_mat(features_combined) # labelled = np.arange(features[labelled,:].shape[0]) # unlabelled = np.arange(features[labelled,:].shape[0], features_combined.shape[0]) # # ps = utils2.gen_init_rand_probability(labels_combined, labelled, unlabelled, nr_classes) # gtg_accuracy_test, Ps_new = utils2.get_accuracy(W, ps, labels_combined, labelled, unlabelled, len(unlabelled)) with open(results, 'a') as file: file.write(nname + " " + ind[0] + " " + str(net_accuracy_gtg) + " " + str(net_accuracy_svm) + " " + str(net_accuracy) + "\n") print()
if not (os.path.exists(checkpoint)): os.makedirs(checkpoint) if not (os.path.exists(image)): os.makedirs(image) trainset = loadmat(data_dir + 'train_32x32.mat') testset = loadmat(data_dir + 'test_32x32.mat') net = net.GAN(real_size, z_size, learning_rate) dataset = data_preprocessor.Dataset(trainset, testset) train_accuracies, test_accuracies, samples = net.train(net, dataset, epoches, batch_size, figsize=(10, 5)) fig, ax = plt.subplots() plt.plot(train_accuracies, label='Train', alpha=0.5) plt.plot(test_accuracies, label='Test', alpha=0.5) plt.title('Accuracy') plt.legend() for i in range(len(samples)): fig.ax = net.view_samples(i, samples, 5, 10, figsize=(10, 5)) fig.savefig('image/smaples_{:03d}.png'.format(i)) plt.close
import net import config as c import utils.data_visualization_utils as dv import utils.preprocessing_utils as pp c.clear_folders() c.create_folders() pp.scale_and_rotate_raw_images() pp.create_train_samples() #dv.test_set_info() net.train() #test.create_test_data() #print(test.scoring(prediction_file='test/predictions.pk', target_file='test/example_targets.pkl')) #gan.train()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Apr 20 01:54:02 2021 @author: fatemeh tahrirchi """ import datasets, net from preprocessing import Preprocessing, CharVectorizer from net import VDCNN, train, save import lmdb import numpy as np from tqdm import tqdm import argparse import torch from torch.utils.data import DataLoader, Dataset import os, subprocess import warnings warnings.simplefilter(action='ignore', category=FutureWarning) MODELS_FOLDER = 'models/vdcnn' DATA_FOLDER = 'datasets' DATASET = 'yelp_review_full' #['yelp_review_full','yelp_review_polarity'] PREPROCES_TYPE = 'lower' #['lower','denoiser','add_pos','add_hashtag','add_NOT'] # get device to calculate on (either CPU or GPU with minimum memory load) def get_gpu_memory_map(): result = subprocess.check_output([ 'nvidia-smi', '--query-gpu=memory.used', '--format=csv,nounits,noheader' ], encoding='utf-8')
x_tr, x_te, y_tr, y_te = split_data(x, y, .8) alpha = .0 beta1 = .9 beta2 = .999 eps = 1e-8 lambda_ = 0 epochs = 100 learning_rate = .0005 p = .9 hidden_sizes = [1000, 100] out_size = np.unique(y_tr).shape[0] wh, bh, w_out, b_out = initialize_parameters(x_tr[0].shape[0], hidden_sizes, out_size) wh, bh, w_out, b_out = train(x_tr, y_tr, epochs, hidden_sizes, wh, bh, w_out, b_out, learning_rate, p, alpha, beta1, beta2, eps, lambda_) with open('dump-iris.p', 'wb') as dump_file: dump((wh, w_out, bh, b_out), dump_file) with open('dump-iris.p', 'rb') as file: wh, wo, bh, bo = load(file) predicted_class_scores = predict(x_te, wh, bh, wo, bo, alpha) predicted_classes = np.argmax(predicted_class_scores, axis=1) correct_classes = len(np.where(predicted_classes == y_te)[0]) print('Test accuracy of iris network:', correct_classes / len(x_te)) fixed_data = np.array([[4.7, 3.2, 1.3, .2], [6.6, 2.9, 4.6, 1.3], [5.8, 2.8, 5.1, 2.4]]) print('Sample output:', predict(fixed_data, wh, bh, wo, bo, alpha))
import socket import time import pickle import torch from common_methods import receive_msg, send_msg from net import TwoLayerNet, train HEADERSIZE = 10 N, D_in, H, D_out = 64, 1000, 100, 10 x = torch.randn(N, D_in) y = torch.randn(N, D_out) model = TwoLayerNet(D_in, H, D_out) train(model, x, y) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) print("Socket is created.") s.bind((socket.gethostname(), 1243)) # Set the queue size for connection s.listen(5) print("Listening for incoming connection ...") connected = False accept_timeout = 100 s.settimeout(accept_timeout) d = model.state_dict() send_msg(s, d, HEADERSIZE)
def main(argv=None): with tf.Graph().as_default(): print 'Start.' start_time = time.time() begin_time = start_time print 'Loading images.' data, label = loadDataLabel(DATADIR, shuffle=True, various=True) train_size = len(label) print 'Loaded %d images.' % train_size elapsed_time = time.time() - start_time print('Loading images with label elapsed %.1f s' % elapsed_time) print 'Building net......' start_time = time.time() def get_input_x(x, offset=0, length=BATCH_SIZE): a = x[offset:(offset + length), ...] return np.reshape(a, [length, FRAME_COUNT * 2]) def get_input_y(y, offset=0, length=BATCH_SIZE): b = y[offset:(offset + length)] return np.reshape(b, [ length, ]) x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, FRAME_COUNT * 2], name='data') y = tf.placeholder(tf.int32, shape=[ BATCH_SIZE, ]) keep_prob = tf.placeholder(tf.float32, name='prob') # Train model. train_prediction = inference(x, keep_prob) batch = tf.Variable(0, dtype=tf.float32) learning_rate = tf.train.exponential_decay( 0.01, # Base learning rate. batch * BATCH_SIZE, # Current index into the dataset. train_size * 80, # Decay step. 0.95, # Decay rate. staircase=True) tf.summary.scalar('learn', learning_rate) loss = total_loss(train_prediction, y) tf.summary.scalar('loss', loss) trainer = train(loss, learning_rate, batch) elapsed_time = time.time() - start_time print('Building net elapsed %.1f s' % elapsed_time) print 'Begin training..., train dataset size:{0}'.format(train_size) start_time = time.time() best_validation_loss = 100000.0 saver = tf.train.Saver() with tf.Session() as sess: merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter('graph/train', sess.graph) # Inital the whole net. tf.global_variables_initializer().run() print('Initialized!') for step in xrange(int(NUM_EPOCHS * train_size) // BATCH_SIZE): offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE) batch_data = get_input_x(offset=offset, x=data) batch_labels = get_input_y(offset=offset, y=label) # Train CNN net. feed_dict = { x: batch_data, y: batch_labels, keep_prob: KEEP_PROB } summary, _, l, lr, predictions = sess.run( [merged, trainer, loss, learning_rate, train_prediction], feed_dict=feed_dict) train_writer.add_summary(summary, step) if l < best_validation_loss: print 'Saving net.' print('Net loss:%.3f, learning rate: %.6f' % (l, lr)) best_validation_loss = l saver.save(sess, NETPATH) if step % EVAL_FREQUENCY == 0: elapsed_time = time.time() - start_time start_time = time.time() print('Step %d (epoch %.2f), %.1f ms' % (step, np.float32(step) * BATCH_SIZE / train_size, 1000 * elapsed_time / EVAL_FREQUENCY)) print('Net loss:%.3f, learning rate: %.6f' % (l, lr)) sys.stdout.flush() train_writer.close() elapsed_time = time.time() - begin_time print('Total time: %.1f s' % elapsed_time)
from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', one_hot=True) import net net = net.network() BSIZE = 128 MAX_ITER = 20000 for i in range(MAX_ITER): x_train, y_train = mnist.train.next_batch(BSIZE) x_train = x_train.reshape([-1,28,28,1]) ls,ac = net.train(x_train,y_train) print('ITER:\t%d\tLoss:\t%.4f\tAcc:\t%.4f'%(i,ls,ac))
import numpy as np import timeit from net import train, test from iterator import DatasetIterator from collections import OrderedDict from pprint import pformat if __name__ == '__main__': # load CIFAR10 dataset cifar10 = tf.keras.datasets.cifar10 (x_train, y_train), (x_test, y_test) = cifar10.load_data() batch_size = 64 # change TRAIN to true if you want to create a new model and save it to the ckpt folder TRAIN = False if TRAIN: train(x_train, y_train, batch_size) cifar10_test = DatasetIterator(x_test, y_test, batch_size) cifar10_test_images, cifar10_test_labels = x_test, y_test # start timer start = timeit.default_timer() np.random.seed(0) # get results from test set predicted_cifar10_test_labels = test(cifar10_test_images) np.random.seed() # end timer stop = timeit.default_timer() run_time = stop - start # calculate accuracy correct_predict = (cifar10_test_labels.flatten() == predicted_cifar10_test_labels.flatten()).astype(
print("Batchified training data") torch.manual_seed(1) model = net.Net(len(CLASSES)).to(device) print(next(model.parameters()).is_cuda) #model = resnet.ResNet(resnet.BasicBlock,[2,2,2,2],num_classes=11) #model.conv1 = torch.nn.Conv2d(1,64,kernel_size=7,stride=2,padding=3,bias=False) optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=1e-3) lTrainAcc = [] lTestAcc = [] lF1 = [] for epoch in range(1, 100): trainAcc = net.train(model, device, batchTrainingData, optimizer, epoch) lTrainAcc.append(trainAcc) totalCorrect = 0 total = 0 dist = np.zeros((11, 11)) countTargets = np.zeros((11)) for batch in batchValidationData: data = batch[0].to(device) target = batch[1].to(device) output = net.test(model, device, data) pred = output.max( 1, keepdim=True)[1] # get the index of the max log-probability target = target.long().view_as(pred) totalCorrect += pred.eq(target).sum().item() total += len(data) for i in range(len(pred)):
#Test on plotting quadratic curve import numpy as np from net import Network, train import matplotlib.pyplot as plt import matplotlib matplotlib.use('Agg') iterations = 500 X = np.random.rand(1000, 1) * 2 * np.pi y = (1 + np.sin(X)) / 2 net = Network(1, 1) net, loss = train(net, X, y, iterations, 10) plt.scatter(X, net.predict(X)) plt.xlabel('X') plt.ylabel('Predicted y: sin(x)') plt.savefig('sin2.png') plt.clf() plt.plot(np.arange(iterations), loss) plt.xlabel('Iterations') plt.ylabel('Loss') plt.title('Sin Approximation Loss') plt.savefig('sinloss2.png')
tst_set = [(np.reshape(x, (np.sqrt(len(x)), np.sqrt(len(x)))), y) for x, y in tst_set][:500] ''' tst_set= np.ones_like(tst_set)-tst_set ''' vld_set = tst_set[:1000] print("Loading2") net, optimizer, num_epochs, batch_size, num_class = CNN() print("Training network...") n.train(net, optimizer, num_epochs, batch_size, trn_set, num_class, vld_set=None) print('Saving network (pkl)...') pickle.dump(net, open("save1.pkl", "wb")) #print("Saving network...") #n.save_net(net, num_spec_layers = [1, 3], name_n = 'cnn(3_5_2)(0_1_2_8)', active = True ) print("Testing network...") accuracy = n.test(net, tst_set) print("Test accuracy: %0.2f%%" % (accuracy * 100 / (len(tst_set)))) ''' net_load = n.Loaded_nn(name='save(math1)')