def main(): trainX, trainY, testX, testY = load_mnist() # print "Shapes: ", trainX.shape, trainY.shape, testX.shape, testY.shape # print "\nDigit sample" #print_digit(trainX[1], trainY[1]) for i in xrange(9995,10000): print testY[i] train_cnn.train(trainX, trainY)
def main(): trainX, trainY, testX, testY = load_mnist() print "Shapes: ", trainX.shape, trainY.shape, testX.shape, testY.shape print "\nDigit sample" print_digit(trainX[1], trainY[1]) train_cnn.train(trainX, trainY) labels = train_cnn.test(testX) accuracy = np.mean((labels == testY)) * 100.0 print "\nCNN Test accuracy: %lf%%" % accuracy
def main(): trainX, trainY, testX, testY = load_mnist() print("Shapes: ", trainX.shape, trainY.shape, testX.shape, testY.shape) print("\nDigit sample") print_digit(trainX[1], trainY[1]) #train_dense.train(trainX, trainY,testX,testY) #labels = train_dense.test(testX) #accuracy = np.mean((labels == testY)) * 100.0 #print ("\nDNN Test accuracy: %lf%%" % accuracy) train_cnn.train(trainX, trainY, testX, testY)
model = multichannel_CNN.CNN(args) # model.word_embeddings.weight.data.copy_(torch.from_numpy(pretrained_weight)) if args.multi_cnn2: model = multi_CNN.CNN(args) # model.word_embeddings.weight.data.copy_(torch.from_numpy(pretrained_weight)) if args.cnn_simple: model = CNN.CNN(args) # model.word_embeddings.weight.data.copy_(torch.from_numpy(pretrained_weight)) if args.cnn_char: model = CNN_char.CNN(args) # model.word_embeddings.weight.data.copy_(torch.from_numpy(pretrained_weight)) if args.gru: model = gru.GRU(args) if args.bi_gru: model = bi_gru.BiGRU(args) if args.bnlstm: # model = LSTM_bn(cell_class=BNLSTMCell, input_size=1, hidden_size=args.hidden_size, max_length=1e8) model = model_bnlstm.BNLSTM(args) # train print("Training start") if args.train_cnn: # if args.cnn_char: # train_cnn.train_char(train_iter, dev_iter, test_iter, train_iter_char, dev_iter_char, test_iter_char, model, # text_field, label_field, args) # else: train_cnn.train(train_iter, dev_iter, test_iter, model, text_field, label_field, args) else: train_lstm.train(train_iter, dev_iter, test_iter, model, text_field, label_field, args)
# print(padID) # wv_cat = loader.vector_loader(count_words_reset) # pretrained_weight = wv_cat # args.pretrained_weight = pretrained_weight # update args and print args.embed_num = len(text_field.vocab) args.class_num = len(label_field.vocab) - 1 args.save_dir = os.path.join( args.save_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) # model if args.bilstm: model = Bi_LSTM_random_emb.BiLSTM(args) if args.use_cuda: model = model.cuda() if os.path.exists("./Test_Result.txt"): os.remove("./Test_Result.txt") # train print("Training start") if args.train_cnn: train_cnn.train(train_iter, dev_iter, test_iter, model, text_field, label_field, args) else: train_lstm.train(train_iter, dev_iter, test_iter, model, text_field, label_field, args)
import train_cnn from dataset_sorter import DatasetSorter from feature_database import FeatureDatabase from caffe_config import CaffeConfig import sys sys.path.insert(0, 'src/grasp_selection/') import experiment_config as ec if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('config') args = parser.parse_args() config = ec.ExperimentConfig(args.config) feature_db = feature_database.FeatureDatabase(config) caffe_config = CaffeConfig() dataset_sorter = feature_db.feature_dataset_sorter() if dataset_sorter == None: dataset_sorter = DatasetSorter(feature_db) feature_db.save_dataset_sorter(dataset_sorter) train_cnn.train(feature_db, caffe_config, dataset_sorter)