예제 #1
0
def main(args):

    # Loads the dataset
    X_TRAIN = pickle.load(
        open(f"{args['data_destination']}/X_TRAIN.pickle", "rb"))
    Y_TRAIN = pickle.load(
        open(f"{args['data_destination']}/Y_TRAIN.pickle", "rb"))
    #X_TEST = pickle.load(open(f"{args['data_destination']}/X_TEST.pickle", "rb"))
    #Y_TEST = pickle.load(open(f"{args['data_destination']}/Y_TEST.pickle", "rb"))
    #X_VALIDATION = pickle.load(open(f"{args['data_destination']}/X_VALIDATION.pickle", "rb"))
    #Y_VALIDATION = pickle.load(open(f"{args['data_destination']}/Y_VALIDATION.pickle", "rb"))

    #normalize
    X_TRAIN = X_TRAIN / 255.0
    #X_TEST = X_TEST/255.0
    #X_VALIDATION = X_VALIDATION/255.0

    dct_model = model.buildModel(
        tuple(int(dim) for dim in args['shape'].split(',')),
        int(args['classes']))
    model.train(dct_model, X_TRAIN, Y_TRAIN, int(args['batch_size']),
                int(args['epochs']))
    model_sum = dct_model.summary()
    print(f'model summary: {model_sum}')
    # model.eval(dct_model, X_TEST, Y_TEST, int(args['batch_size']))
    try:
        model.predict(dct_model, X_TEST)
    except:
        print("model.predict gave an error")
    model.saveModel(dct_model)
예제 #2
0
파일: main.py 프로젝트: andrewchen353/SISRD
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--train', action='store_true')
    parser.add_argument('--test', action='store_true')
    parser.add_argument('--model')
    args = parser.parse_args()

    if args.train and args.model:
        nn = model.generate_model()
        training_input = data_utils.load_data(training_input_dir)
        training_output = data_utils.load_data(training_output_dir)
        nn.fit(training_input, training_output, batch_size=128, epochs=50)
        model.saveModel(nn, args.model)
        test = input("Do you want to test with the test images too? ")
        if test == 'yes':
            test_input = data_utils.load_data(test_input_dir)
            test_output = nn.predict(test_input)
            print(test_output.shape)
            data_utils.save_images(test_output_dir, test_input_dir,
                                   test_output)
    elif args.test and args.model:
        nn = model.loadModel(args.model)
        test_input = data_utils.load_data(test_input_dir)
        test_output = nn.predict(test_input)
        print(test_output.shape)
        data_utils.save_images(test_output_dir, test_input_dir, test_output)
def main():
    now = datetime.datetime.now()
    logger = Logger(opt.saveDir + '/logs_{}'.format(now.isoformat()))
    model, optimizer = getModel(opt)

    criterion = torch.nn.MSELoss()

    if opt.GPU > -1:
        print('Using GPU', opt.GPU)
        model = model.cuda(opt.GPU)
        criterion = criterion.cuda(opt.GPU)

    val_loader = torch.utils.data.DataLoader(
        Dataset(opt, 'val'),
        batch_size=1,
        shuffle=True if opt.DEBUG > 1 else False,
        num_workers=1)

    if opt.test:
        _, preds = val(0, opt, val_loader, model, criterion)
        torch.save({
            'opt': opt,
            'preds': preds
        }, os.path.join(opt.saveDir, 'preds.pth'))
        return

    train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                               batch_size=opt.trainBatch,
                                               shuffle=True,
                                               num_workers=int(opt.nThreads))

    for epoch in range(1, opt.nEpochs + 1):
        mark = epoch if opt.saveAllModels else 'last'
        log_dict_train, _ = train(epoch, opt, train_loader, model, criterion,
                                  optimizer)
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if epoch % opt.valIntervals == 0:
            log_dict_val, preds = val(epoch, opt, val_loader, model, criterion)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            saveModel(
                os.path.join(opt.saveDir, 'model_{}.checkpoint'.format(mark)),
                model)  # optimizer
        logger.write('\n')
        if epoch % opt.dropLR == 0:
            lr = opt.LR * (0.1**(epoch // opt.dropLR))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()
    torch.save(model.cpu(), os.path.join(opt.saveDir, 'model_cpu.pth'))
예제 #4
0
def hmm_train_save():
    # define a hmm
    m = model.HiddenMarkovModel()
    # load dataset
    loader = loadDataset()
    # learn model
    m.learnDataset(loader)
    # just a very simple test
    sentence = wordsToIds("what is your name")[1:-1]
    print(idsToWords(sentence))
    output = m.getOutput(sentence)
    print(idsToWords(output))

    # save the model
    model.saveModel(m, "hmm.pkl")
예제 #5
0
파일: main.py 프로젝트: andrewchen353/SISRD
def train(modelName, epochs, batch, lr, validation_split):
    key = modelName.split('_')[0]
    if key not in model.lookUp:
        print('Invalid model given')
        exit(1)
    if exists(models_path + modelName + '.h5'):
        print('This model has already been created, increase version number')
        exit(1)
    nn = model.lookUp[key](lr)
    print('Loading training input images from: ' + training_input_dir)
    training_input = data_utils.load_data(training_input_dir)
    print('Loading training output images from: ' + training_output_dir)
    training_output = data_utils.load_data(training_output_dir)
    print('Beginning training...')
    nn.fit(training_input,
           training_output,
           batch_size=batch,
           epochs=epochs,
           validation_split=validation_split)
    print('Saving model to: ' + models_path + modelName + '.h5')
    model.saveModel(nn, models_path + modelName + '.h5')
    return nn
예제 #6
0
def main():
    data = load_dataCSV()
    
    look_back = 28
    jump=4
    
    train_data, test_data = dp.rescale_data(data)
    trainX, trainY = dp.create_dataset(train_data, look_back)
    trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
    testX, testY = dp.create_dataset(test_data, look_back)  
    
    model = mod.getModel(look_back)
    model.fit(
        trainX,
        trainY,
        batch_size=128,
        nb_epoch=300,
        validation_split=0.10)
    
    pred,perfs=mod.testModel(model,testX,testY,jump,look_back)
    
    actual_test_data=test_data[len(test_data)-len(pred):]

    
    print("\n Average Covarance between predicted and actual prices on only predicted days:")
    print(np.mean(perfs))
    
    print("\n Covarance between predicted and actual prices on all days:")    
    print(np.cov(actual_test_data,pred)[1][0])
    
    plt.figure(3)
    plt.plot(actual_test_data)
    
    plt.figure(4)
    plt.plot(pred)
    
    mod.saveModel(model,'lstm3')
예제 #7
0
    #    X_train, Y_train = downsample(X_train, Y_train, 1)
    m.fit(X_train, Y_train)
    chunk_size = len(test_index) // n_chunks
    for ch in np.array(list(chunks(test_index, chunk_size))):
        sys.stderr.write(str(datetime.datetime.now().time()) + ": started predicting (predict))\n")
        predicted[ch] = m.predict(data_X[ch])
        sys.stderr.write(str(datetime.datetime.now().time()) + ": stopped predicting (predict))\n")
    #    for inst in test_index.tolist():
    #        sys.stderr.write(str(datetime.datetime.now().time()) + ": started predicting (predict))\n")
    #        print len(data_X[inst])
    #        predicted[inst] = m.predict([data_X[inst]])
    #        sys.stderr.write(str(datetime.datetime.now().time()) + ": stopped predicting (predict))\n")
    evaluate(m, Y_test, base, predicted[test_index], targets)
    count = count + 1

print "Training set Evaluation:"
m.fit(data_X, data_Y)
for ch in np.array(list(chunks(range(0, len(data_X), 1), 2000))):
    sys.stderr.write(str(datetime.datetime.now().time()) + ": started predicting (predict))\n")
    tr_pred[ch] = m.predict(data_X[ch])
    sys.stderr.write(str(datetime.datetime.now().time()) + ": stopped predicting (predict))\n")
evaluate(m, data_Y, baseline, tr_pred, targets)

print "Final Evaluation:"
evaluate(m, data_Y, baseline, predicted, targets)

# Train model and save it
if args.save_model != None:
    sys.stderr.write("Saving model to: " + args.save_model + "\n")
    model.saveModel(m, args.save_model)
예제 #8
0
with codecs.open(args.input_file, 'rb', 'utf-8') as input_file:
    for line in input_file:
        feat_values = line.split("\t")
        feat_row = []
        if num_feat == 0:
            num_feat = len(feat_values)
        elif num_feat != len(feat_values):
            print "Error: Different number of features"
            sys.exit(1)
        data_X.append(feat_values)
        num_instances += 1

with codecs.open(args.target_file, 'rb', 'utf-8') as target_file:
    for line in target_file:
        feat_values = line.split("\t")
        data_Y.append(feat_values[0])

if len(data_Y) != num_instances:
    print "Error: Different number of lines"
    sys.exit(1)

Xtr = np.array(data_X, dtype=float)
Ytr = np.array(data_Y, dtype=float)

m = model.Model(args.model_type, args.model_params)

# Train model and save it
if args.save_model != None:
    m.model.fit(Xtr, Ytr)
    model.saveModel(m, args.save_model)
예제 #9
0
# Prepare the data
train_X = []
train_Y = []
while True:
    line = fh.readline().rstrip("\n")
    if not line:
        break
    feat_values = line.split("\t")
    feat_row = dict()
    target_row = dict()
    
    for i in range(len(feature_names)):
        # Collect target values for the other classifiers
        if feature_names[i] in targets:
            target_row.update({feature_names[i]:feat_values[i]})
        elif "new" not in feature_names[i] and not feature_names[i] in features_ignore and not ignored_field(feature_names[i]) and feat_values[i] != "":
            feat_row.update({feature_names[i]:feat_values[i]})
            registered_feat_names.update({feature_names[i]:1})
    train_X.append(feat_row)
    train_Y.append(target_row)

# Train and save model
n_features = int(feat_ratio * len(registered_feat_names.keys()))
if n_features > len(feature_names):
    raise ValueError(n_features)

m = model.Model(model_type, args.model_params, sparse, n_features)
m.fit(train_X, train_Y)
model.saveModel(m, args.output)