示例#1
0
def main():

    device = (torch.device('cuda') if torch.cuda.is_available()
              else torch.device('cpu'))
     
    model = Net(1).to(device=device)

    data_path = "../Mnist/"

    mnist = instantiate_training_data(data_path)
    mnist_val = instantiate_val_data(data_path)
    
    train_loader = torch.utils.data.DataLoader(mnist, batch_size=64)
    val_loader = torch.utils.data.DataLoader(mnist_val, batch_size=64)
    
    optimizer = optim.SGD(model.parameters(), lr=1e-2)
    
    loss_fn = nn.CrossEntropyLoss()

    training_string = "Training"
    val_string = "Val"
    
    print(f"Training on device {device}.")
    
    training_loop(
        n_epochs = 100,
        optimizer = optimizer,
        model = model,
        loss_fn = loss_fn,
        train_loader = train_loader,
        device = device,
        )
    
    evaluate_training(model, train_loader, training_string)
    evaluate_validation(model, val_loader, val_string)
示例#2
0
def main():
    """
    Entry point for training

    Load dataset according to args and train model
    """
    args = Argparser().args
    torch.backends.cudnn.benchmark = True

    data_path = f'./{args.input_dir}/{args.data_dir}/'
    dataset = ShapeNetDataset(data_path)
    data_loader = DataLoader(dataset=dataset,
                             batch_size=args.batch_size,
                             num_workers=torch.cuda.device_count() *
                             4 if args.device.upper() == 'CUDA' else 4,
                             shuffle=True,
                             drop_last=True)
    d_path = f'./{args.models_path}/{args.obj}_d.tar'
    g_path = f'./{args.models_path}/{args.obj}_g.tar'
    d_model, g_model, d_optim, g_optim = initialize_model(args, d_path, g_path)

    # Always save model if something goes wrong, disconnects or what not
    try:
        gan = '' if args.unpac else 'Pac'
        two = '' if args.unpac else '2'
        print(
            f'Training {gan}{args.gan_type.upper()}{two} on {args.device.upper()}'
        )
        training_loop(data_loader, d_model, g_model, d_optim, g_optim, args)
    finally:
        save_model(args.models_path, d_path, g_path, d_model, g_model, d_optim,
                   g_optim, args)
示例#3
0
def main_test():
    # dummy data
    print("Testing functionality of Similarity Regression")
    learned_embs = torch.randn((777, 10))
    poss_matches = torch.randn((2555, 30))
    known_matches = torch.randn((777, 30))

    test_model = SimilarityRegression(emb_dim=10, rna_dim=2555)
    optimizer = SGD(test_model.parameters(), lr=0.001)
    test_model.init_weights()
    # input_data = (learned_embs, known_matches)
    batch_size = 5
    num_epochs = 5
    input_data = DataLoader(LowDimData(learned_embs, known_matches),
                            batch_size=batch_size)
    training_loop(batch_size, num_epochs, test_model, optimizer, input_data,
                  poss_matches)
示例#4
0
#DATAPATH = './data_cnn'
#vocabulary = np.load(os.path.join(DATAPATH, 'voc_100.npy'))
index = range(len(vocabulary))
voca_dict = dict(zip(vocabulary, index))
train_data, val_data, test_data = data_formatting(path = DATAPATH, time_name = config['time_name'])

logger.info('train size # sent ' + str(len(train_data)))
logger.info('dev size # sent ' + str(len(val_data)))
logger.info('test size # sent ' + str(len(test_data)))


logger.info(str(config))

model = AttentionRNN(config)
if config['cuda']:
     model.cuda()

# Loss and Optimizer
loss = nn.CrossEntropyLoss()
#optimizer = torch.optim.SGD(model.parameters(), lr=config['learning_rate'])
optimizer = torch.optim.Adam(model.parameters())
print(model.parameters())
# Train the model
training_iter = data_iter(train_data[:int(config['data_portion']*len(train_data))], config['batch_size'])

dev_iter = eval_iter(val_data[:int(config['data_portion']*len(val_data))], config['batch_size'])
logger.info('Start to train...')
#os.mkdir(config['savepath'])
training_loop(config, model, loss, optimizer, train_data[:int(config['data_portion']*len(train_data))], training_iter, dev_iter, logger, config['savepath'])

def main(args):

    config_dict = {
        'cnn_rnn': config_AttentionRNN.config_loading,
        'bigru_max': config_Hierachical_BiGRU_max.config_loading
    }

    config = config_dict[args.model]()
    config['model'] = args.model

    DATAPATH = config['DATAPATH']
    vocabulary = np.load(os.path.join(DATAPATH, 'voc_100.npy'))
    index = range(len(vocabulary))
    voca_dict = dict(zip(vocabulary, index))
    config['vocab_size'] = len(index)

    if args.time_name != 'default':
        #config['time_name'] = args.time_name
        config['savepath'] = config['savepath'].replace(
            config['time_name'], args.time_name)
        config['time_name'] = args.time_name

    logger_name = "mortality_prediction"
    logger = logging.getLogger(logger_name)
    logger.setLevel(logging.INFO)
    # file handler
    if os.path.exists(config['savepath']):
        pass
    else:
        os.mkdir(config['savepath'])

    fh = logging.FileHandler(config['savepath'] + 'output.log')
    fh.setLevel(logging.INFO)
    logger.addHandler(fh)

    # stream handler
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    logger.addHandler(console)

    train_data, val_data, test_data, max_length = data_formatting(
        config=config,
        path=DATAPATH,
        time_name=config['time_name'],
        concat=config['concat'])

    logger.info('loading data...')

    logger.info('train size # sent ' + str(len(train_data)))
    logger.info('dev size # sent ' + str(len(val_data)))
    logger.info('test size # sent ' + str(len(test_data)))

    logger.info(str(config))

    if config['model'] == 'cnn_rnn':
        model = AttentionRNN(config)
    elif config['model'] == 'bigru_max':
        model = Hierachical_BiGRU_max(config)

    print(model.parameters())

    if config['cuda']:
        model.cuda()

    # Loss and Optimizer
    loss = nn.CrossEntropyLoss()
    time_loss = nn.CrossEntropyLoss(ignore_index=-1, size_average=True)

    if config['optimizer'] == 'SGD':
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=config['learning_rate'])
    elif config['optimizer'] == 'Adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=config['learning_rate'])

    # Train the model
    training_iter = data_iter(
        train_data[:int(config['data_portion'] * len(train_data))],
        config['batch_size'])

    dev_iter = eval_iter(
        val_data[:int(config['data_portion'] * len(val_data))],
        config['batch_size'])

    testing_iter = test_iter(
        test_data[:int(config['data_portion'] * len(test_data))], 1)

    logger.info('Start to train...')
    #os.mkdir(config['savepath'])
    training_loop(config, model, loss, time_loss, optimizer,
                  train_data[:int(config['data_portion'] * len(train_data))],
                  training_iter, dev_iter, testing_iter, logger,
                  config['savepath'])
示例#6
0
import argparse
from time import sleep

from unityagents import UnityEnvironment

from maddpg import MADDPGAgent
from config import config
from train import training_loop
from play import play_loop


parser = argparse.ArgumentParser()
parser.add_argument('--train', action='store_true', dest='train', help='Set the train mode')
parser.add_argument('--file_prefix', default=None, help='Set the file for agent to load weights with using prefix')
parser.add_argument('--playthroughs', default=10, type=int, help='Number of playthroughs played in a play mode')
parser.add_argument('--sleep', default=0, type=int, help='Time before environment starts in a play mode [seconds]')
arguments = parser.parse_args()

env = UnityEnvironment(file_name='./Tennis.app', seed=config.general.seed)
brain_name = env.brain_names[0]
agent = MADDPGAgent(config=config, file_prefix=arguments.file_prefix)

if arguments.train:
    print('Train mode \n')
    training_loop(env, brain_name, agent, config)
else:
    print('Play mode \n')
    sleep(arguments.sleep)
    play_loop(env, brain_name, agent, playthrougs=arguments.playthroughs)
示例#7
0
def main():
    args = create_parser()

    # naming variables for convenience with legacy code
    dtype, profiles, probefeatures, proteinfeatures = args.dtype, args.profiles, args.probefeatures, args.proteinfeatures

    #### get feature matrices and names
    pearson_file, emb_file = args.file_2, args.emb_file

    Y = np.genfromtxt(profiles, skip_header=1)[:, 1:]
    Y = np.nan_to_num(Y).T  # (407, 16382)

    ynames = np.array(open(profiles, 'r').readline().strip().split()[1:])
    #sevenmers = np.genfromtxt(profiles, skip_header=1)[:, 0]
    Y_train, Y_test, trainprots, testprots = original_script_dataset_processing(
        ynames, Y, arg1="0", pearson_file=pearson_file)

    learned_embs_df = pd.read_csv(emb_file, sep='\t')
    learned_embs_df.rename(columns={learned_embs_df.columns[0]: "name"},
                           inplace=True)
    learned_embs_df.columns = [
        "{0}_le_col".format(x) for x in learned_embs_df.columns
    ]

    learned_renamed = split_labeled_protein_names(learned_embs_df,
                                                  "name_le_col")
    learned_renamed = average_overlapping_embs(learned_renamed, "name_le_col")

    Y_train_final, embs_train, trainprots_final = filter_embs(
        Y_train, trainprots, learned_renamed)
    Y_test_final, embs_test, testprots_final = filter_embs(
        Y_test, testprots, learned_renamed)

    #Y_train_final = low_rank_approx(SVD=None, A=Y_train_final, r=False)
    #yyt = Y_train_final

    yyt = np.dot(Y_train_final, Y_train_final.T)

    # project Y_test_final onto Y_train_final to approx proj onto singular vectors
    yyt_dev = np.dot(Y_test_final, Y_train_final.T)
    #print(yyt_dev.shape)
    #yyt_dev = low_rank_approx(SVD=None, A = Y_test_final, r = 24 )
    #print(yyt_dev.shape)

    print("embs shape", embs_train.shape)
    learned_embs = torch.FloatTensor(embs_train)  # torch.randn((213,10))
    # replacing YYT on LHS with transposed embeddings
    poss_matches = torch.FloatTensor(embs_train.T)

    known_matches = torch.FloatTensor(yyt)

    learned_embs_dev = torch.FloatTensor(embs_test)  # torch.randn((213,10))
    poss_matches_dev = torch.FloatTensor(yyt_dev)
    known_matches_dev = torch.FloatTensor(yyt_dev)

    (args.rna_dim, args.emb_dim) = embs_train.shape
    test_model = SimilarityRegression(emb_dim=args.emb_dim,
                                      rna_dim=args.rna_dim)
    #for x in test_model.parameters():
    #    x.data = x.data.normal_(0.0, 0.5)

    if args.use_cuda:
        learned_embs = learned_embs.cuda()
        poss_matches = poss_matches.cuda()
        known_matches = known_matches.cuda()
        learned_embs_dev = learned_embs_dev.cuda()
        poss_matches_dev = poss_matches_dev.cuda()
        known_matches_dev = known_matches_dev.cuda()
        test_model.cuda()

    optimizer = optim.Adam(test_model.parameters(),
                           lr=args.lr)  #, betas=(0.5, 0.999)) # ?

    #test_model.init_weights()

    input_data = DataLoader(LowDimData(learned_embs, known_matches),
                            batch_size=args.batch_size)
    dev_input_data = DataLoader(LowDimData(learned_embs_dev,
                                           known_matches_dev),
                                batch_size=args.batch_size)
    training_loop(
        args.batch_size,
        args.num_epochs,
        test_model,
        optimizer,
        input_data,  ##
        poss_matches,
        dev_input_data,
        embed_file=args.emb_file,
        print_every=args.print_every,
        eval_every=args.eval_every)
    net.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    epochs = checkpoint['epoch']
    criterion = checkpoint['loss']
    epochs = global_settings.NEW_EPOCH


    ###         Training 
    best_acc = 0.0
    train_loss, train_accuracy = [], []
    for epoch in range(1, global_settings.NEW_EPOCH):
        if epoch > args.warm:
            train_scheduler.step(epoch)

        train_epoch_loss, train_epoch_accuracy =  train.training_loop(epoch)
        train_loss.append(train_epoch_loss)
        train_accuracy.append(train_epoch_accuracy)
        val_loss, val_accuracy = train.eval_training()

        #start to save best performance model after learning rate decay to 0.01
        if best_acc < val_accuracy:
            # torch.save(net.state_dict(), 
            #     checkpoint_path.format(net=args.net, epoch=epoch, type='best'))
            torch.save({
                'epoch':global_settings.EPOCH,
                'model_state_dict': net.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'loss': loss_function
            }, checkpoint_path.format(net=args.net, epoch=epoch, type='best'))
            best_acc = val_accuracy
示例#9
0
    for k in args:
        args[k] = args[k].strip('\"')
    return args


if __name__ == "__main__":
    HYPERPARAMETERS_PATH = sys.argv[1]
    with open(HYPERPARAMETERS_PATH) as json_file:
        args = json.load(json_file)

    args = strip_quotes(args)

    os.environ["CUDA_VISIBLE_DEVICES"] = args["gpu_id"]
    output_dir = args["output_dir"]
    input_dir = args["input_dir"]

    training_loop(input_dir=input_dir,
                  output_dir=output_dir,
                  img_size_x=int(args["img_size_x"]),
                  img_size_y=int(args["img_size_y"]),
                  batch_size=int(args["batch_size"]),
                  num_epochs_1=int(args["num_epochs_1"]),
                  num_epochs_2=int(args["num_epochs_2"]),
                  lr_1=float(args["lr_1"]),
                  lr_2=float(args["lr_2"]),
                  gradient_accumulation=int(args["gradient_accumulation"]),
                  cv_fold=int(args["cv_fold"]),
                  num_workers=8,
                  model_type=args["model_type"],
                  model_fname=args["model_fname"])
示例#10
0
# Create optimizers for the generators and discriminators
g_optimizer = optim.Adam(g_params, lr, [beta1, beta2])
d_x_optimizer = optim.Adam(D_X.parameters(), lr, [beta1, beta2])
d_y_optimizer = optim.Adam(D_Y.parameters(), lr, [beta1, beta2])

# training the network
from train import training_loop

n_epochs = 1000
G_XtoY, G_YtoX, D_X, D_Y, d_x_optimizer, d_y_optimizer, g_optimizer, losses = training_loop(
    dataloader_X,
    dataloader_Y,
    test_dataloader_X,
    test_dataloader_Y,
    G_XtoY,
    G_YtoX,
    D_X,
    D_Y,
    g_optimizer,
    d_x_optimizer,
    d_y_optimizer,
    n_epochs=n_epochs)

# visualize the losses

fig, ax = plt.subplots(figsize=(12, 8))
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator, X', alpha=0.5)
plt.plot(losses.T[1], label='Discriminator, Y', alpha=0.5)
plt.plot(losses.T[2], label='Generators', alpha=0.5)
plt.title("Training Losses")