Пример #1
0
 def __init__(self):
     self.num_games = 0  # number of games played
     self.epsilon = 0  # randomness
     self.gamma = 0.9  # discount rate
     self.memory = deque(
         maxlen=MAX_MEMORY)  # pops from left if memory limit is exceeded
     self.model = Linear(11, 256, 3)
     self.trainer = Trainer(self.model, lr=LEARNING_RATE, gamma=self.gamma)
Пример #2
0
    def __init__(self, load_path=''):
        self.n_games = 0
        self.epsilon = 0
        self.gamma = 0.9
        self.load_path = load_path
        self.memory = deque(maxlen=MAX_MEMORY)
        self.model = Net(11, 256, 3)

        if load_path:
            self.model.load_state_dict(torch.load(load_path))
        self.trainer = Trainer(self.model, LR, self.gamma)
Пример #3
0
    def __model(self, tf_mix, tf_target, tf_lr):
        # define model flow
        # stft
        stft_module = STFT_Module(
            frame_length=self.stft_params["frame_length"],
            frame_step=self.stft_params["frame_step"],
            fft_length=self.stft_params["fft_length"],
            epsilon=self.epsilon,
            pad_end=self.stft_params["pad_end"])

        mr_stft_module = STFT_Module(
            frame_length=self.mr_stft_params["frame_length"],
            frame_step=self.mr_stft_params["frame_step"],
            fft_length=self.mr_stft_params["fft_length"],
            epsilon=self.epsilon,
            pad_end=self.mr_stft_params["pad_end"])

        # mix data transform
        tf_spec_mix = stft_module.STFT(tf_mix)
        tf_amp_spec_mix = stft_module.to_amp_spec(tf_spec_mix, normalize=False)
        tf_mag_spec_mix = tf.log(tf_amp_spec_mix + self.epsilon)
        tf_mag_spec_mix = tf.expand_dims(tf_mag_spec_mix,
                                         -1)  # (Batch, Time, Freq, Channel))
        tf_amp_spec_mix = tf.expand_dims(tf_amp_spec_mix, -1)
        tf_f_512_mag_spec_mix = stft_module.to_F_512(tf_mag_spec_mix)

        #mr mix data transform
        tf_mr_spec_mix = mr_stft_module.STFT(tf_mix)
        tf_mr_spec_mix = tf_mr_spec_mix[:, 1:513, :]
        tf_mr_amp_spec_mix = stft_module.to_amp_spec(tf_mr_spec_mix,
                                                     normalize=False)
        tf_mr_mag_spec_mix = tf.log(tf_mr_amp_spec_mix + self.epsilon)
        tf_mr_mag_spec_mix = tf.expand_dims(
            tf_mr_mag_spec_mix, -1)  # (Batch, Time, Freq, Channel))
        tf_mr_f_256_mag_spec_mix = tf_mr_mag_spec_mix[:, :, :256]

        # target data transform
        tf_spec_target = stft_module.STFT(tf_target)
        tf_amp_spec_target = stft_module.to_amp_spec(tf_spec_target,
                                                     normalize=False)
        tf_amp_spec_target = tf.expand_dims(tf_amp_spec_target, -1)

        mr_u_net_ver2 = MRUNet_ver2(
            input_shape=(tf_f_512_mag_spec_mix.shape[1:]),
            mr_input_shape=(tf_mr_f_256_mag_spec_mix.shape[1:]))

        tf_est_masks = mr_u_net_ver2(tf_f_512_mag_spec_mix,
                                     tf_mr_f_256_mag_spec_mix)

        #F: 512  → 513
        zero_pad = tf.zeros_like(tf_mag_spec_mix)
        zero_pad = tf.expand_dims(zero_pad[:, :, 1, :], -1)
        tf_est_masks = tf.concat([tf_est_masks, zero_pad], 2)

        tf_ora_masks = Masks.iaf(tf_amp_spec_mix, tf_amp_spec_target,
                                 self.epsilon)
        tf_loss = 10 * Loss.mean_square_error(tf_est_masks, tf_ora_masks)
        tf_train_step = Trainer.Adam(tf_loss, tf_lr)

        return tf_train_step, tf_loss, tf_amp_spec_target, tf_mag_spec_mix, tf_spec_mix, tf_est_masks, tf_ora_masks
Пример #4
0
    def __model(self, tf_mix, tf_target, tf_lr):
        # define model flow
        # stft
        stft_module = STFT_Module(
            frame_length=self.stft_params["frame_length"],
            frame_step=self.stft_params["frame_step"],
            fft_length=self.stft_params["fft_length"],
            epsilon=self.epsilon,
            pad_end=self.stft_params["pad_end"])

        mr2_stft_module = STFT_Module(
            frame_length=self.mr2_stft_params["frame_length"],
            frame_step=self.mr2_stft_params["frame_step"],
            fft_length=self.mr2_stft_params["fft_length"],
            epsilon=self.epsilon,
            pad_end=self.mr2_stft_params["pad_end"])
        # mix data transform
        tf_spec_mix = stft_module.STFT(tf_mix)
        tf_amp_spec_mix = stft_module.to_amp_spec(tf_spec_mix, normalize=False)
        tf_mag_spec_mix = tf.log(tf_amp_spec_mix + self.epsilon)
        tf_mag_spec_mix = tf.expand_dims(tf_mag_spec_mix,
                                         -1)  # (Batch, Time, Freq, Channel))
        tf_amp_spec_mix = tf.expand_dims(tf_amp_spec_mix, -1)
        tf_f_512_mag_spec_mix = stft_module.to_F_512(tf_mag_spec_mix)

        #mr2 mix data transform
        #zero pad to fit stft time length 128
        mr2_zero_pad = tf.zeros_like(tf_mix)
        tf_mr2_mix = tf.concat(
            [mr2_zero_pad[:, :384], tf_mix, mr2_zero_pad[:, :384]], axis=1)
        tf_mr2_spec_mix = mr2_stft_module.STFT(tf_mr2_mix)
        tf_mr2_amp_spec_mix = stft_module.to_amp_spec(tf_mr2_spec_mix,
                                                      normalize=False)
        tf_mr2_mag_spec_mix = tf.log(tf_mr2_amp_spec_mix + self.epsilon)
        tf_mr2_mag_spec_mix = tf.expand_dims(tf_mr2_mag_spec_mix, -1)
        tf_mr2_mag_spec_mix = tf_mr2_mag_spec_mix[:, :, :1024, :]

        # target data transform
        tf_spec_target = stft_module.STFT(tf_target)
        tf_amp_spec_target = stft_module.to_amp_spec(tf_spec_target,
                                                     normalize=False)
        tf_amp_spec_target = tf.expand_dims(tf_amp_spec_target, -1)

        mini_u_net_ver4 = mini_UNet_ver4(
            input_shape=(tf_f_512_mag_spec_mix.shape[1:]),
            mr2_input_shape=(tf_mr2_mag_spec_mix.shape[1:]))

        tf_est_masks, _, _, _, _, _ = mini_u_net_ver4(tf_f_512_mag_spec_mix,
                                                      tf_mr2_mag_spec_mix)

        #F: 512  → 513
        zero_pad = tf.zeros_like(tf_mag_spec_mix)
        zero_pad = tf.expand_dims(zero_pad[:, :, 1, :], -1)
        tf_est_masks = tf.concat([tf_est_masks, zero_pad], 2)
        tf_est_spec = tf.math.multiply(tf_est_masks, tf_amp_spec_mix)
        tf_loss = 10 * Loss.mean_square_error(tf_est_spec, tf_amp_spec_target)
        tf_train_step = Trainer.Adam(tf_loss, tf_lr)

        return tf_train_step, tf_loss, tf_amp_spec_target, tf_mag_spec_mix, tf_spec_mix, tf_est_masks, tf_est_spec
def load_training_model_from_factory(configs, ngpu):
    if configs['model']['type'] == 'AutoEncoder':
        net, optimizer = load_training_net_from_factory(configs)
        loss = load_loss_from_factory(configs)
        from model import AE_trainer as Trainer
        trainer = Trainer(net, loss, configs['op']['loss'], optimizer, ngpu)

    else:
        raise Exception("Wrong model type!")

    return trainer
Пример #6
0
def main():
    with tf.Session() as sess:
        trainer = Trainer(input_channels=256, output_channels=256)
        coord = tf.train.Coordinator()
        reader = MultipleAudioReader('./data',
                                     coord,
                                     sample_rate=44100,
                                     sample_size=2**13)
        batch_num = 1

        input_batch_f = reader.dequeue_many(batch_num)
        trainer.create_network(input_batch_f)
        op_summary = tf.summary.merge_all()
        writer = tf.summary.FileWriter('result/log',
                                       graph=tf.get_default_graph())
        tf.global_variables_initializer().run()

        reader.start_threads(sess, 8)

        for iteration in range(10000000):
            loss_future, loss_past, _, summary = sess.run([
                trainer.loss_future,
                trainer.loss_past,
                trainer.op_train,
                op_summary,
            ])
            print('iter:{iter:08d} loss future:{future:04f} past:{past:04f}'.
                  format(
                      iter=iteration,
                      future=loss_future,
                      past=loss_past,
                  ))
            writer.add_summary(summary, iteration)
            if iteration % 10000 == 0:
                saver = tf.train.Saver()
                if not os.path.exists('result/snapshot/'):
                    os.makedirs('result/snapshot/', exist_ok=True)
                saver.save(sess,
                           'result/snapshot/{:08d}.data'.format(iteration))
        pass
Пример #7
0
    def __model(self, tf_mix, tf_target, tf_lr):
        # define model flow
        # stft
        stft_module = STFT_Module(
            frame_length=self.stft_params["frame_length"],
            frame_step=self.stft_params["frame_step"],
            fft_length=self.stft_params["fft_length"],
            epsilon=self.epsilon,
            pad_end=self.stft_params["pad_end"])

        # mix data transform
        tf_spec_mix = stft_module.STFT(tf_mix)

        #             tf_mag_spec_mix = stft_module.to_magnitude_spec(tf_spec_mix, normalize=False)
        tf_amp_spec_mix = stft_module.to_amp_spec(tf_spec_mix, normalize=False)
        tf_mag_spec_mix = tf.log(tf_amp_spec_mix + self.epsilon)
        tf_mag_spec_mix = tf.expand_dims(tf_mag_spec_mix,
                                         -1)  # (Batch, Time, Freq, Channel))
        tf_amp_spec_mix = tf.expand_dims(tf_amp_spec_mix, -1)
        tf_f_512_mag_spec_mix = stft_module.to_F_512(tf_mag_spec_mix)

        # target data transform
        tf_spec_target = stft_module.STFT(tf_target)
        tf_amp_spec_target = stft_module.to_amp_spec(tf_spec_target,
                                                     normalize=False)
        tf_amp_spec_target = tf.expand_dims(tf_amp_spec_target, -1)

        conv_ffn = Conv_FFN(
            input_shape=(tf_f_512_mag_spec_mix.shape[1:]),
            out_dim=512,
            h_dim=512,
        )

        tf_est_masks = conv_ffn(tf_f_512_mag_spec_mix)

        #F: 512  → 513
        zero_pad = tf.zeros_like(tf_mag_spec_mix)
        zero_pad = tf.expand_dims(zero_pad[:, :, 1, :], -1)
        tf_est_masks = tf.concat([tf_est_masks, zero_pad], 2)
        tf_est_spec = tf.math.multiply(tf_est_masks, tf_amp_spec_mix)
        tf_loss = 10 * Loss.mean_square_error(tf_est_spec, tf_amp_spec_target)
        tf_train_step = Trainer.Adam(tf_loss, tf_lr)

        return tf_train_step, tf_loss, tf_amp_spec_target, tf_mag_spec_mix, tf_spec_mix, tf_est_masks, tf_est_spec
Пример #8
0
def main():
    model = Model(cf.segment_class, cf.level_class, cf.image_scale)
    if torch.cuda.is_available():
        model.cuda()
    else:
        print("No cuda QAQ")
    trainer = Trainer(model,
                      torch.optim.Adam(model.parameters(), cf.lr),
                      epoch=cf.epoch,
                      use_cuda=torch.cuda.is_available(),
                      loss_weight=cf.loss_weight,
                      loss_func=3)
    trainer.train(init_from_exist=cf.import_model)
    trainer.test()
Пример #9
0
def main(alpha=None, gamma=None):
    config = Config(args.config_path)
    if args.mode:
        config.mode = args.mode
    if args.train_id:
        config.train_id = args.train_id
    if args.num_epochs:
        config.num_epochs = args.num_epochs
    if args.base_dir:
        config.base_dir = args.base_dir

    config.use_bayes_opt = args.use_bayes_opt
    config.use_preprocess = args.use_preprocess
    config.use_swa = args.use_swa

    train_path = os.path.join(config.base_dir, config.train_dir, config.train_id)
    result_path = os.path.join(config.base_dir, config.result_dir, config.train_id)
    data_path = os.path.join(config.base_dir, config.data_dir)

    if not os.path.isdir(train_path):
        os.mkdir(train_path)

    if not os.path.isdir(result_path):
        os.mkdir(result_path)

    init_logger(os.path.join(result_path, 'log.txt'))
    set_seed(config)

    # get data loader
    tokenizer = AutoTokenizer.from_pretrained(config.bert_model_name)

    param = {"root": data_path, "batch_size": config.batch_size, "tokenizer": tokenizer, "config": config}
    train_dataloader = data_loader(**param, phase='train')
    validate_dataloader = data_loader(**param, phase='validate')
    test_dataloader = data_loader(**param, phase='test')

    # create model config 확인
    model = Trainer(config, train_dataloader, validate_dataloader, test_dataloader)

    if config.mode == 'train':
        result = model.train(alpha=alpha, gamma=gamma)
    elif config.mode == 'test':
        model.load_model(config.model_weight_file)
        result = model.evaluate('test')

    del model
    return result
Пример #10
0
                                        pin_memory=True,
                                        num_workers=8)

blocks = get_blocks(cifar10=True)
model = FBNet(
    num_classes=config.num_cls_used if config.num_cls_used > 0 else 10,
    blocks=blocks,
    init_theta=config.init_theta,
    alpha=config.alpha,
    beta=config.beta,
    speed_f=config.speed_f)

trainer = Trainer(network=model,
                  w_lr=config.w_lr,
                  w_mom=config.w_mom,
                  w_wd=config.w_wd,
                  t_lr=config.t_lr,
                  t_wd=config.t_wd,
                  t_beta=config.t_beta,
                  init_temperature=config.init_temperature,
                  temperature_decay=config.temperature_decay,
                  logger=_logger,
                  lr_scheduler=lr_scheduler_params,
                  gpus=args.gpus)

trainer.search(train_queue,
               val_queue,
               total_epoch=config.total_epoch,
               start_w_epoch=config.start_w_epoch,
               log_frequence=args.log_frequence)
Пример #11
0
    valid_ground_truth_path = './data/valid/ground_truth.txt'

    test_input_path = "./data/test/test.json"
    test_ground_truth_path = "./data/test/ground_truth.txt"

    config = {
        "max_length": 512,
        "epochs": 6,
        "batch_size": 3,
        "learning_rate": 2e-5,
        "fp16": True,
        "fp16_opt_level": "O1",
        "max_grad_norm": 1.0,
        "warmup_steps": 0.1,
    }
    hyper_parameter = HyperParameters()
    hyper_parameter.__dict__ = config
    algorithm = "LFESM"

    trainer = Trainer(
        training_dataset,
        bert_pretrained_model,
        hyper_parameter,
        algorithm,
        valid_input_path,
        valid_ground_truth_path,
        test_input_path,
        test_ground_truth_path,
    )
    trainer.train(MODEL_DIR)
def main():

    # get args
    parser = argparse.ArgumentParser(description="Im2Latex Training Program")
    # parser.add_argument('--path', required=True, help='root of the model')

    # model args
    parser.add_argument("--emb_dim",
                        type=int,
                        default=80,
                        help="Embedding size")
    parser.add_argument("--dec_rnn_h",
                        type=int,
                        default=512,
                        help="The hidden state of the decoder RNN")
    parser.add_argument("--data_path",
                        type=str,
                        default="/root/private/im2latex/data/",
                        help="The dataset's dir")
    parser.add_argument("--add_position_features",
                        action='store_true',
                        default=False,
                        help="Use position embeddings or not")
    # training args
    parser.add_argument("--max_len",
                        type=int,
                        default=150,
                        help="Max size of formula")
    parser.add_argument("--dropout",
                        type=float,
                        default=0.,
                        help="Dropout probility")
    parser.add_argument("--cuda",
                        action='store_true',
                        default=True,
                        help="Use cuda or not")
    parser.add_argument("--batch_size", type=int, default=8)
    parser.add_argument("--epoches", type=int, default=200)
    parser.add_argument("--lr", type=float, default=3e-4, help="Learning Rate")
    parser.add_argument("--min_lr",
                        type=float,
                        default=3e-5,
                        help="Learning Rate")
    parser.add_argument("--sample_method",
                        type=str,
                        default="teacher_forcing",
                        choices=('teacher_forcing', 'exp', 'inv_sigmoid'),
                        help="The method to schedule sampling")
    parser.add_argument("--decay_k", type=float, default=1.)

    parser.add_argument("--lr_decay",
                        type=float,
                        default=0.5,
                        help="Learning Rate Decay Rate")
    parser.add_argument("--lr_patience",
                        type=int,
                        default=3,
                        help="Learning Rate Decay Patience")
    parser.add_argument("--clip",
                        type=float,
                        default=2.0,
                        help="The max gradient norm")
    parser.add_argument("--save_dir",
                        type=str,
                        default="./ckpts",
                        help="The dir to save checkpoints")
    parser.add_argument("--print_freq",
                        type=int,
                        default=100,
                        help="The frequency to print message")
    parser.add_argument("--seed",
                        type=int,
                        default=2020,
                        help="The random seed for reproducing ")
    parser.add_argument("--from_check_point",
                        action='store_true',
                        default=False,
                        help="Training from checkpoint or not")
    parser.add_argument("--batch_size_per_gpu", type=int, default=16)
    parser.add_argument("--gpu_num", type=int, default=4)
    device_ids = [0, 1, 2, 3]

    args = parser.parse_args()
    max_epoch = args.epoches
    from_check_point = args.from_check_point
    if from_check_point:
        checkpoint_path = get_checkpoint(args.save_dir)
        checkpoint = torch.load(checkpoint_path)
        args = checkpoint['args']
    print("Training args:", args)

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    # Building vocab
    print("Load vocab...")
    vocab = load_vocab(args.data_path)

    use_cuda = True if args.cuda and torch.cuda.is_available() else False
    print(use_cuda)
    device = torch.device("cuda" if use_cuda else "cpu")

    # data loader
    print("Construct data loader...")
    # train_loader = DataLoader(
    #     Im2LatexDataset(args.data_path, 'train', args.max_len),
    #     batch_size=args.batch_size,
    #     collate_fn=partial(collate_fn, vocab.token2idx),
    #     pin_memory=True if use_cuda else False,
    #     num_workers=4)
    train_loader = DataLoader(
        Im2LatexDataset(args.data_path, 'train', args.max_len),
        batch_size=args.batch_size_per_gpu * args.gpu_num,
        collate_fn=partial(collate_fn, vocab.token2idx),
        pin_memory=True if use_cuda else False,
        num_workers=2)
    # val_loader = DataLoader(
    #     Im2LatexDataset(args.data_path, 'validate', args.max_len),
    #     batch_size=args.batch_size,
    #     collate_fn=partial(collate_fn, vocab.token2idx),
    #     pin_memory=True if use_cuda else False,
    #     num_workers=4)
    val_loader = DataLoader(Im2LatexDataset(args.data_path, 'validate',
                                            args.max_len),
                            batch_size=args.batch_size_per_gpu * args.gpu_num,
                            collate_fn=partial(collate_fn, vocab.token2idx),
                            pin_memory=True if use_cuda else False,
                            num_workers=2)

    # construct model
    print("Construct model")
    vocab_size = len(vocab)
    model = Im2LatexModel(vocab_size,
                          args.emb_dim,
                          args.dec_rnn_h,
                          add_pos_feat=args.add_position_features,
                          dropout=args.dropout)
    model = nn.DataParallel(model, device_ids=device_ids)
    # model = model.
    model = model.cuda()
    print("Model Settings:")
    print(model)

    # construct optimizer
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    lr_scheduler = ReduceLROnPlateau(optimizer,
                                     "min",
                                     factor=args.lr_decay,
                                     patience=args.lr_patience,
                                     verbose=True,
                                     min_lr=args.min_lr)

    if from_check_point:
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        epoch = checkpoint['epoch']
        lr_scheduler.load_state_dict(checkpoint['lr_sche'])
        # init trainer from checkpoint
        trainer = Trainer(optimizer,
                          model,
                          lr_scheduler,
                          train_loader,
                          val_loader,
                          args,
                          use_cuda=use_cuda,
                          init_epoch=epoch,
                          last_epoch=max_epoch)
    else:
        trainer = Trainer(optimizer,
                          model,
                          lr_scheduler,
                          train_loader,
                          val_loader,
                          args,
                          use_cuda=use_cuda,
                          init_epoch=1,
                          last_epoch=args.epoches)
    # begin training
    trainer.train()
Пример #13
0
results_dir = "./results/" + args.dataset + "/split_" + args.split

if not os.path.exists(model_dir):
    os.makedirs(model_dir)
if not os.path.exists(results_dir):
    os.makedirs(results_dir)

file_ptr = open(mapping_file, 'r')
actions = file_ptr.read().split('\n')[:-1]
file_ptr.close()
actions_dict = dict()
for a in actions:
    actions_dict[a.split()[1]] = int(a.split()[0])

num_classes = len(actions_dict)
trainer = Trainer(num_layers_PG, num_layers_R, num_R, num_f_maps, features_dim,
                  num_classes, args.dataset, args.split)
if args.action == "train":
    batch_gen = BatchGenerator(num_classes, actions_dict, gt_path,
                               features_path, sample_rate)
    batch_gen.read_data(vid_list_file)
    trainer.train(model_dir,
                  batch_gen,
                  num_epochs=num_epochs,
                  batch_size=bz,
                  learning_rate=lr,
                  device=device)

if args.action == "predict":
    trainer.predict(model_dir, results_dir, features_path, vid_list_file_tst,
                    num_epochs, actions_dict, device, sample_rate)
Пример #14
0
from model import Trainer

PATH = '../CICFlowMeter-4.0/bin/data/daily/'

target_file = sorted(glob.glob(PATH + '*.csv'))[-1]


separator = ','
reader = open(target_file, 'r')
header = reader.readline().split(separator)
count = 0

print('Number of columns: %d' % len(header))
print('Reading %s\n' % target_file)

model = Trainer()
model.load_model('./SVM_classifier.sav')

while True:
    row = reader.readline()
    if not row:
        time.sleep(0.1)
        continue
    count += 1
    # sys.stdout.write('\r' + str(count))
    # sys.stdout.flush()

    # Preprocess
    row = row.split(separator)[:-1]
    row = np.delete(np.array(row), [0, 1, 2, 3, 5, 6], 0)
    row = row.astype(np.float32)
Пример #15
0
    def __model(self, tf_mix, tf_target, tf_lr):
        stft_module = STFT_Module(
            frame_length=self.stft_params["frame_length"],
            frame_step=self.stft_params["frame_step"],
            fft_length=self.stft_params["fft_length"],
            epsilon=self.epsilon,
            pad_end=self.stft_params["pad_end"])

        mr1_stft_module = STFT_Module(
            frame_length=self.mr1_stft_params["frame_length"],
            frame_step=self.mr1_stft_params["frame_step"],
            fft_length=self.mr1_stft_params["fft_length"],
            epsilon=self.epsilon,
            pad_end=self.mr1_stft_params["pad_end"])

        mr2_stft_module = STFT_Module(
            frame_length=self.mr2_stft_params["frame_length"],
            frame_step=self.mr2_stft_params["frame_step"],
            fft_length=self.mr2_stft_params["fft_length"],
            epsilon=self.epsilon,
            pad_end=self.mr2_stft_params["pad_end"])

        # mix data transform
        tf_spec_mix = stft_module.STFT(tf_mix)
        tf_amp_spec_mix = stft_module.to_amp_spec(tf_spec_mix, normalize=False)
        tf_mag_spec_mix = tf.log(tf_amp_spec_mix + self.epsilon)
        #                 tf_mag_spec_mix = tf.expand_dims(tf_mag_spec_mix, -1)# (Batch, Time, Freq, Channel))
        #                 tf_amp_spec_mix = tf.expand_dims(tf_amp_spec_mix, -1)
        tf_f_512_mag_spec_mix = stft_module.to_F_512(tf_mag_spec_mix)

        #mr1 mix data transform
        tf_mr1_spec_mix = mr1_stft_module.STFT(tf_mix)
        tf_mr1_spec_mix = tf_mr1_spec_mix[:, 1:513, :]
        tf_mr1_amp_spec_mix = stft_module.to_amp_spec(tf_mr1_spec_mix,
                                                      normalize=False)
        tf_mr1_mag_spec_mix = tf.log(tf_mr1_amp_spec_mix + self.epsilon)
        #                 tf_mr1_mag_spec_mix = tf.expand_dims(tf_mr1_mag_spec_mix, -1)# (Batch, Time, Freq, Channel))
        tf_mr1_f_256_mag_spec_mix = tf_mr1_mag_spec_mix[:, :, :256]

        #mr2 mix data transform
        #zero pad to fit stft time length 128
        mr2_zero_pad = tf.zeros_like(tf_mix)
        tf_mr2_mix = tf.concat(
            [mr2_zero_pad[:, :384], tf_mix, mr2_zero_pad[:, :384]], axis=1)
        tf_mr2_spec_mix = mr2_stft_module.STFT(tf_mr2_mix)
        tf_mr2_amp_spec_mix = stft_module.to_amp_spec(tf_mr2_spec_mix,
                                                      normalize=False)
        tf_mr2_mag_spec_mix = tf.log(tf_mr2_amp_spec_mix + self.epsilon)
        #                 tf_mr2_mag_spec_mix = tf.expand_dims(tf_mr2_mag_spec_mix, -1)
        tf_mr2_mag_spec_mix = tf_mr2_mag_spec_mix[:, :, :1024]

        # target data transform
        tf_spec_target = stft_module.STFT(tf_target)
        tf_amp_spec_target = stft_module.to_amp_spec(tf_spec_target,
                                                     normalize=False)
        #                 tf_amp_spec_target = tf.expand_dims(tf_amp_spec_target, -1)

        ffn_ver2 = FFN_ver2(
            out_dim=512,
            h_dim=512,
        )

        tf_est_masks = ffn_ver2(tf_f_512_mag_spec_mix,
                                tf_mr1_f_256_mag_spec_mix, tf_mr2_mag_spec_mix)

        #F: 512  → 513
        zero_pad = tf.zeros_like(tf_mag_spec_mix)
        zero_pad = tf.expand_dims(zero_pad[:, :, 1], -1)
        tf_est_masks = tf.concat([tf_est_masks, zero_pad], 2)
        print("est_mask", tf_est_masks.shape)
        print("amp_spec_mix", tf_amp_spec_mix.shape)
        tf_est_spec = tf.math.multiply(tf_est_masks, tf_amp_spec_mix)
        tf_loss = 10 * Loss.mean_square_error(tf_est_spec, tf_amp_spec_target)
        tf_train_step = Trainer.Adam(tf_loss, tf_lr)

        return tf_train_step, tf_loss, tf_amp_spec_target, tf_mag_spec_mix, tf_spec_mix, tf_est_masks, tf_est_spec
Пример #16
0
    maxlen=args.maxlen,
    buffer= args.buffer,
)
if args.ds == "full":
    dstrain = ChessDataInMemory(dataConfig)
else:
    dstrain = ChessData(dataConfig)

modelConfig = ModelConfig(
    vocab_size = len(dstrain.m2id),
    n_positions=args.maxlen,
    n_ctx=args.maxlen,
    n_embd=args.n_embd,
    n_layer=args.n_layer,
    n_head=args.n_head
)
model = BaseHFGPT(modelConfig)
print(f"Model Size: {sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values())}")

trainerConf = TrainerConfig(
    max_epochs = args.num_epochs,
    batch_size = args.batch_size,
    lr = args.lr,
    betas = (args.beta1, args.beta2),
    tb_path = model_folder,
    save_every = args.save_every,
    ckpt_path = model_path
)
trainer = Trainer(model, dstrain, trainerConf)
trainer.train()
Пример #17
0
    test_loader = DataLoader(test_dataset,
                             batch_size=batch,
                             shuffle=False,
                             drop_last=True)

    setting = 'dataset:' + str(dataset_name) + '\n' + \
            'dir:' + str(dir) + '\n' +  \
            'batch:' + str(batch) + '\n' +  \
            'C-SGEN layers:' + str(C_SGEN_layers) + '\n' +  \
            'epochs:' + str(iteration)

    print(setting, '\n')

    model = C_SGEN().to(torch.device('cuda'))

    trainer = Trainer(model.train(), C_SGEN_layers)
    tester = T(model.eval(), C_SGEN_layers)

    Best_MSE = 100

    for epoch in range(1, (iteration + 1)):
        train_loss = trainer.train(train_loader)
        test_loss, RMSE_test, predicted_test, true_test = tester.test(
            test_loader)
        print('Epoch:', epoch, 'MSE:', test_loss)

        if test_loss < Best_MSE:
            Best_MSE = test_loss
            Best_epoch = epoch
            T_val, P_val = np.array(true_test), np.array(predicted_test)
            pear = pearson(T_val, P_val)
Пример #18
0
def train(option, data_x, data_y, data_z):
    n_iter = option.n_iter
    setattr(tf.GraphKeys, "VARIABLES", "variables")
    DG_xz = DG()
    DG_xz.initial()
    DG_yz = DG()
    DG_yz.initial()
    DG_xy = DG()
    DG_xy.initial()

    tf.reset_default_graph()

    model1 = Model()
    model1.initial(option)

    # decoder and encoder
    qvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "inference_x")
    pvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "generative_x")
    dvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                              "discriminator_x")

    qvars_y = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                "inference_y")
    pvars_y = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                "generative_y")
    dvars_y = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                "discriminator_y")

    opt = tf.train.AdamOptimizer(1e-3, beta1=0.5)

    train_op = Trainer()
    train_op.initial(model1, opt, qvars, pvars, dvars, qvars_y, pvars_y,
                     dvars_y)

    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.1
    sess = tf.Session(config=config)
    sess.run(tf.global_variables_initializer())
    sess.graph.finalize()

    DG_xz = DG()
    DG_xz.initial()
    DG_yz = DG()
    DG_yz.initial()
    DG_xy = DG()
    DG_xy.initial()
    """ training """
    X_dataset = data_x.get_dataset()
    Y_dataset = data_y.get_dataset()
    Z_dataset = data_z.get_dataset()

    #[sess, DG_xz, DG_yz, model1] = train_model1(sess, train_op, option.n_epoch, opt, model1, X_dataset, Y_dataset, Z_dataset, qvars, pvars, dvars, qvars_y, pvars_y, dvars_y)

    #global summary_writer
    #summary_writer = tf.train.SummaryWriter('/tmp/mnist_logs', sess.graph)

    [sess, DG_xy,
     model1] = train_model2(train_op, option.n_epoch_2, opt, model1, sess,
                            X_dataset, Y_dataset, Z_dataset, qvars, pvars,
                            dvars, qvars_y, pvars_y, dvars_y)
    '''
    
    for i in range(0,n_iter):
        #if(i!=0):
            
            #saver = tf.train.Saver()
            #saver.restore(sess=sess,save_path="model/my-model"+str(i-1)+".ckpt")
            
            
        time_start=time.time()
        print("#####################")
        print("iteration:")
        print(i)
        print("#####################")
        print("#####################")
        print("#####################")
        #[sess, DG_xz, DG_yz, model1] = train_model1_3(train_op, option.n_epoch_3, opt, model1, sess, X_dataset, Y_dataset, Z_dataset, qvars, pvars, dvars, qvars_y, pvars_y, dvars_y)
        [sess, DG_xz, DG_yz, model1] = train_model1(train_op, option.n_epoch_3, opt, model1, sess, X_dataset, Y_dataset, Z_dataset, qvars, pvars, dvars, qvars_y, pvars_y, dvars_y)
        print("#####################")
        
        
        print("#####################")
        [sess, DG_xy, model1] = train_model2(train_op, option.n_epoch_4,opt, model1, sess, X_dataset, Y_dataset, Z_dataset, qvars, pvars, dvars, qvars_y, pvars_y, dvars_y)
        print("#####################")
        #saver = tf.train.Saver()
        #saver.save(sess=sess,save_path="model/my-model"+str(i)+".ckpt")
        #summary_writer.add_summary(summary_str, total_step)
        time_end=time.time()
        print('Model1_2: totally cost',time_end-time_start)
    
    #
    [sess, DG_xz, DG_yz, model1] =  train_model1(train_op, option.n_epoch_5, opt, model1, sess, X_dataset, Y_dataset, Z_dataset, qvars, pvars, dvars, qvars_y, pvars_y, dvars_y)
    [sess, DG_xy, model1] =         train_model2(train_op, option.n_epoch_6, opt, model1, sess, X_dataset, Y_dataset, Z_dataset, qvars, pvars, dvars, qvars_y, pvars_y, dvars_y)
    '''
    return sess, DG_xy, DG_xz, DG_yz, model1
Пример #19
0
def train(hps, device, batch_size, test_batch_size, epochs, learning_rate,
          num_gpus, hosts, backend, current_host, model_dir, output_dir, seed,
          log_interval, beta1, nz, nc, ngf, ndf, dataloader):

    trainer = Trainer(nz,
                      nc,
                      ngf,
                      ndf,
                      weights_init,
                      device=device,
                      num_gpus=num_gpus)
    trainer.fixed_noise = torch.randn(batch_size, nz, 1, 1, device=device)

    # setup optimizer
    trainer.optimizerD = optim.Adam(trainer.netD.parameters(),
                                    lr=learning_rate,
                                    betas=(beta1, 0.999))
    trainer.optimizerG = optim.Adam(trainer.netG.parameters(),
                                    lr=learning_rate,
                                    betas=(beta1, 0.999))

    for epoch in range(epochs):
        trainer.train(epoch=epoch,
                      epochs=epochs,
                      log_batch=log_batch,
                      sample_batch=sample_batch,
                      dataloader=dataloader,
                      log_interval=log_interval,
                      output_dir=output_dir)

        # do checkpointing
        checkpoint_epoch(trainer, epoch, output_dir)

    trainer.save_model(model_dir)

    return

    is_distributed = len(hosts) > 1 and backend is not None
    logger.debug("Distributed training - {}".format(is_distributed))

    if is_distributed:
        # Initialize the distributed environment.
        world_size = len(hosts)
        os.environ['WORLD_SIZE'] = str(world_size)
        host_rank = hosts.index(current_host)
        os.environ['RANK'] = str(host_rank)
        dist.init_process_group(backend=backend,
                                rank=host_rank,
                                world_size=world_size)
        logger.info(
            'Initialized the distributed environment: \'{}\' backend on {} nodes. '
            .format(backend, dist.get_world_size()) +
            'Current host rank is {}. Number of gpus: {}'.format(
                dist.get_rank(), num_gpus))

    # set the seed for generating random numbers
    torch.manual_seed(seed)
    if device_name == "cuda":
        torch.cuda.manual_seed(seed)

    logging.getLogger().setLevel(logging.DEBUG)

    logger.debug("Processes {}/{} ({:.0f}%) of train data".format(
        len(train_loader.sampler), len(train_loader.dataset),
        100. * len(train_loader.sampler) / len(train_loader.dataset)))

    logger.debug("Processes {}/{} ({:.0f}%) of test data".format(
        len(test_loader.sampler), len(test_loader.dataset),
        100. * len(test_loader.sampler) / len(test_loader.dataset)))

    model = Net().to(device)
    if is_distributed and use_cuda:
        # multi-machine multi-gpu case
        model = torch.nn.parallel.DistributedDataParallel(model)
    else:
        # single-machine multi-gpu case or single-machine or multi-machine cpu case
        model = torch.nn.DataParallel(model)

    optimizer = optim.SGD(model.parameters(), lr=learning_rate)

    for epoch in range(1, epochs + 1):
        model.train()
        for batch_idx, (data, target) in enumerate(train_loader, 1):
            data, target = data.to(device), target.to(device)
            optimizer.zero_grad()
            output = model(data)
            loss = F.nll_loss(output, target)
            loss.backward()
            if is_distributed and not device == "cuda":
                # average gradients manually for multi-machine cpu case only
                _average_gradients(model)
            optimizer.step()
            if batch_idx % log_interval == 0:
                logger.info(
                    'Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}'.format(
                        epoch, batch_idx * len(data),
                        len(train_loader.sampler),
                        100. * batch_idx / len(train_loader), loss.item()))
        test(model, test_loader, device)
    save_model(model_dir, model)
Пример #20
0
def main(args, device, model_load_dir, model_save_dir, results_save_dir):

    if args.action == 'train' and args.extract_save_pseudo_labels == 0:
        # load train dataset and test dataset
        print(f'Load train data: {args.train_data}')
        train_loader = DataLoader(args, args.train_data, 'train')
        print(f'Load test data: {args.test_data}')
        test_loader = DataLoader(args, args.test_data, 'test')

        print(f'Start training.')
        trainer = Trainer(
                    args.num_stages,
                    args.num_layers,
                    args.num_f_maps,
                    args.features_dim,
                    train_loader.num_classes,
                    device,
                    train_loader.weights,
                    model_save_dir
                    )

        eval_args = [
            args,
            model_save_dir,
            results_save_dir,
            test_loader.features_dict,
            test_loader.gt_dict,
            test_loader.eval_gt_dict,
            test_loader.vid_list,
            args.num_epochs,
            device,
            'eval',
            args.classification_threshold,
        ]

        batch_gen = BatchGenerator(
            train_loader.num_classes,
            train_loader.gt_dict,
            train_loader.features_dict,
            train_loader.eval_gt_dict
            )

        batch_gen.read_data(train_loader.vid_list)
        trainer.train(
            model_save_dir,
            batch_gen,
            args.num_epochs,
            args.bz,
            args.lr,
            device,
            eval_args,
            pretrained=model_load_dir)

    elif args.extract_save_pseudo_labels and args.pseudo_label_type != 'PL':
        # extract/ generate pseudo labels and save in "data/pseudo_labels"
        print(f'Load test data: {args.test_data}')
        test_loader = DataLoader(args, args.test_data, args.extract_set, results_dir=results_save_dir)
        print(f'Extract {args.pseudo_label_type}')
        
        if args.pseudo_label_type == 'local':
            get_save_local_fusion(args, test_loader.features_dict, test_loader.gt_dict)
        elif args.pseudo_label_type == 'merge':
            merge_PL_CP(args, test_loader.features_dict, test_loader.gt_dict)
        elif args.pseudo_label_type == 'CMPL':
            CMPL(args, test_loader.features_dict, test_loader.gt_dict)
        elif args.pseudo_label_type == 'CP':
            extract_CP(args, test_loader.features_dict)
        
        print('Self labelling process finished')


    else:
        print(f'Load test data: {args.test_data}')
        test_loader = DataLoader(args, args.test_data, args.extract_set, results_dir=results_save_dir)

        if args.extract_save_pseudo_labels and args.pseudo_label_type == 'PL':
            print(f'Extract {args.pseudo_label_type}')
            extract_save_PL = 1
        else:
            print(f'Start inference.')
            extract_save_PL = 0

        trainer = Trainer(
            args.num_stages,
            args.num_layers,
            args.num_f_maps,
            args.features_dim,
            test_loader.num_classes,
            device,
            test_loader.weights,
            results_save_dir)

        trainer.predict(
            args,
            model_load_dir,
            results_save_dir,
            test_loader.features_dict,
            test_loader.gt_dict,
            test_loader.eval_gt_dict,
            test_loader.vid_list,
            args.num_epochs,
            device,
            'test',
            args.classification_threshold,
            uniform=args.uniform,
            save_pslabels=extract_save_PL,
            CP_dict=test_loader.CP_dict,
            )
Пример #21
0
    div2k_valid = DIV2K(crop_size=args.crop_size,
                        subset='valid',
                        images_dir=Image_dir,
                        caches_dir=Cache_dir)

    train_ds = div2k_train.dataset_hr(batch_size=args.batch_size,
                                      random_transform=True,
                                      normalize_dataset=False)
    valid_ds = div2k_valid.dataset_hr(batch_size=args.batch_size,
                                      random_transform=True,
                                      normalize_dataset=False)

    valid_lr, valid_hr = div2k_valid.get_single(818)

    trainer = GANTrainer(util,
                         args.crop_size,
                         log_dir=log_dir,
                         num_resblock=args.num_resblock)
    trainer.summary()

    try:
        if weights_path is not None:
            print('loading weights')
            trainer.load_checkpoint(weights_path)
        else:
            print('no weights for initalization are available')
    except Exception as e:
        print(e)

    if args.train_generator:
        trainer.fit(train_dataset=train_ds,
                    valid_dataset=valid_ds,
Пример #22
0
def main(args):

    train_dir = args.train_dir
    train_csv = args.train_csv
    test_dir = args.test_dir
    test_csv = args.test_csv

    ratio = args.train_valid_ratio
    batch_size = args.batch_size
    epochs = args.epochs

    train_flag = args.train
    pretrain_weight = args.pretrain_weight
    verbose = args.verbose

    if (train_flag == 0):
        if (verbose == 2):
            print("Reading Training Data...")

        train_csv = pd.read_csv(train_csv)
        train_csv, valid_csv = train_valid_split(train_csv, ratio)

        train = RetinopathyDataset(train_csv, train_dir)
        valid = RetinopathyDataset(valid_csv, train_dir)

        if (verbose == 2):
            print("Creating DataLoader...")

        train_dataloader = DataLoader(train,
                                      batch_size=batch_size,
                                      shuffle=True,
                                      num_workers=4)
        valid_dataloader = DataLoader(valid,
                                      batch_size=batch_size * 4,
                                      shuffle=False,
                                      num_workers=4)

        if (verbose == 2):
            print("Creating EfficientNet Model...")

        model = EfficientNetFinetune(
            level="efficientnet-b5",
            finetune=False,
            pretrain_weight="./weights/pretrained/aptos2018.pth")

        trainer = Trainer(model,
                          train_dataloader,
                          valid_dataloader,
                          epochs,
                          early_stop="QK",
                          verbose=verbose)

        if (verbose == 2):
            print("Strat Training...")
        trainer.train()

    if (train_flag == 1):
        if (verbose == 2):
            print("Strat Predicting...")

        test_csv = pd.read_csv(test_csv)
        test = RetinopathyDataset(test_csv, test_dir, test=True)
        test_dataloader = DataLoader(test,
                                     batch_size=batch_size * 4,
                                     shuffle=False,
                                     num_workers=4)
        model = EfficientNetFinetune(level="efficientnet-b5",
                                     finetune=False,
                                     test=True,
                                     pretrain_weight=pretrain_weight)
        tester(model, test_dataloader, verbose)
Пример #23
0
print("LOADING DATA")
print("*********************************")

dataset = Dataset(name=hyperparams_dict["dataset"],
                  horizon=hyperparams_dict["horizon"],
                  history_length=hyperparams_dict["history_length"],
                  path=DATADIR)

hyperparams_dict["num_nodes"] = dataset.num_nodes
hyperparams = Parameters(**hyperparams_dict)

print("*********************************")
print("TRAINING MODELS")
print("*********************************")

trainer = Trainer(hyperparams=hyperparams, logdir=LOGDIR)
trainer.fit(dataset=dataset)

print("*********************************")
print("COMPUTING METRICS")
print("*********************************")

early_stop_mae_h_repeats = dict()
early_stop_mape_h_repeats = dict()
early_stop_rmse_h_repeats = dict()
early_stop_mae_h_ave = dict()
early_stop_mape_h_ave = dict()
early_stop_rmse_h_ave = dict()
for i, h in enumerate(trainer.history):
    early_stop_idx = np.argmin(h['mae_val'])
    early_stop_mae = np.round(h['mae_test'][early_stop_idx], decimals=3)
Пример #24
0
    drop_last=True,  # drop the last batch that cannot be divided by batch_size
    pin_memory=True)
print('BUILDING MODEL')
criterion = nn.MSELoss()
encoder = resnet_encoder(7, kernel_size=31).cuda()
decoder = dresnet_decoder(7, kernel_size=31).cuda()
model = FullyConnectedConv1d_SpeechEnhancement(encoder=encoder,
                                               decoder=decoder).cuda()
resynthesizer = data.AudioResynthesizer(
    model=model,
    data_folder_path=serialized_testing_data_folder,
    saving_folder=saving_folder,
    transform=quick_transforms)
# model = WaveNet(layers=3,in_channels=1,output_length=32,kernel_size=3,bias=False,residual_channels=16).cuda()
# optimizer = torch.optim.Adam(model.parameters(),weight_decay=10.e-5)
optimizer = torch.optim.SGD(model.parameters(),
                            lr=0.1,
                            momentum=0.9,
                            weight_decay=10.e-5)
trainer = Trainer(model,
                  training_loader,
                  optimizer,
                  criterion,
                  test_loader=testing_loader,
                  verbose=True,
                  saving_folder=saving_folder,
                  resynthesizer=resynthesizer,
                  device_ids=[0, 1],
                  checkpoint=True)
trainer.train(70, drop_learning_rate=[10, 40, 50])
Пример #25
0
val_data = data[int(len(data) * 0.8):]

device = torch.device("cpu")
if torch.cuda.is_available():
    device = torch.device("cuda")

encoder = Encoder_RNN(channel_num_list=[1, 16],
                      dense_num_list=[(16) * nodes_nums, encode_dim],
                      nodes_nums=nodes_nums,
                      dropout=0.).to(device)
decoder = Decoder_RNN(channel_num_list=[16, 1],
                      dense_num_list=[encode_dim, (16) * nodes_nums],
                      seq_len=seq_len,
                      nodes_nums=nodes_nums,
                      dropout=0.).to(device)
trainer = Trainer(encoder, decoder).to(device)
optimizer = optim.Adam(trainer.parameters())
loss_fn = VAELoss()

adj = torch.FloatTensor(adj).to(device)
adj.requires_grad = False


def evaluate(model, val_data, batch_size, seq_len):
    model.eval()
    loss_all = []
    for i, xs_seq in enumerate(yield_data_time(val_data, batch_size, seq_len)):
        xs_seq = torch.FloatTensor(xs_seq).to(device)
        out, mu, logvar = model(xs_seq, adj)
        loss = loss_fn(xs_seq, out, mu, logvar, [0, 1])
        loss_all.append(loss.cpu().data.numpy())
Пример #26
0
class Agent:
    def __init__(self, load_path=''):
        self.n_games = 0
        self.epsilon = 0
        self.gamma = 0.9
        self.load_path = load_path
        self.memory = deque(maxlen=MAX_MEMORY)
        self.model = Net(11, 256, 3)

        if load_path:
            self.model.load_state_dict(torch.load(load_path))
        self.trainer = Trainer(self.model, LR, self.gamma)

    def get_state(self, game):
        # 0          1         2        3
        # U          L         R        D
        # [[1, -10], [0, -10], [0, 10], [1, 10]]
        head = game.snake_pos
        near_head = [
            [head[0], head[1] - 10],
            [head[0] - 10, head[1]],
            [head[0] + 10, head[1]],
            [head[0], head[1] + 10],
        ]

        directions = [
            game.direction == 0,
            game.direction == 1,
            game.direction == 2,
            game.direction == 3,
        ]

        state = [
            (directions[0] and game.is_colision(near_head[0])) or
            (directions[1] and game.is_colision(near_head[1])) or
            (directions[2] and game.is_colision(near_head[2])) or
            (directions[3] and game.is_colision(near_head[3])),

            (directions[0] and game.is_colision(near_head[1])) or
            (directions[1] and game.is_colision(near_head[3])) or
            (directions[2] and game.is_colision(near_head[0])) or
            (directions[3] and game.is_colision(near_head[2])),

            (directions[0] and game.is_colision(near_head[2])) or
            (directions[1] and game.is_colision(near_head[0])) or
            (directions[2] and game.is_colision(near_head[3])) or
            (directions[3] and game.is_colision(near_head[1])),

            game.food_pos[0] < head[0],
            game.food_pos[0] > head[0],
            game.food_pos[1] < head[1],
            game.food_pos[1] > head[1],
        ] + directions
        return np.array(state, dtype=int)

    def remember(self, state, action, reward, next_state, done):
        self.memory.append((state, action, reward, next_state, done))

    def train_long_memory(self):
        if len(self.memory) > BATCH_SIZE:
            mini_sample = random.sample(self.memory, BATCH_SIZE)
        else:
            mini_sample = self.memory

        states, actions, rewards, next_states, dones = zip(*mini_sample)
        self.trainer.train_step(states, actions, rewards, next_states, dones)


    def train_short_memory(self, state, action, reward, next_state, done):
        self.trainer.train_step(state, action, reward, next_state, done)


    def get_action(self, state):
        if not self.load_path:
            self.epsilon = 80 - self.n_games

        final_move = [0, 0, 0]
        if random.randint(0, 200) < self.epsilon:
            move = random.randint(0, 2)
            final_move[move] = 1
        else:
            state0 = torch.tensor(state, dtype=torch.float)
            prediction = self.model(state0)
            move = torch.argmax(prediction).item()
            final_move[move] = 1

        return final_move
Пример #27
0
if not os.path.exists(results_dir):
    os.makedirs(results_dir)

file_ptr = open(mapping_file, 'r')
actions = file_ptr.read().split('\n')[:-1]
file_ptr.close()
actions_dict = dict()
for a in actions:
    actions_dict[a.split()[1]] = int(a.split()[0])

num_classes = len(actions_dict)

trainer = Trainer(num_stages,
                  num_layers,
                  num_f_maps,
                  features_dim,
                  num_classes,
                  pooling_type=pooling_type,
                  dropout=dropout)
if args.action == "train":
    batch_gen = BatchGenerator(num_classes, actions_dict, gt_path,
                               features_path, sample_rate)
    batch_gen.read_data(vid_list_file)
    trainer.train(model_dir,
                  batch_gen,
                  num_epochs=num_epochs,
                  batch_size=bz,
                  learning_rate=lr,
                  device=device)

if args.action == "predict":
Пример #28
0
if not os.path.exists(model_dir):
    os.makedirs(model_dir)
if not os.path.exists(results_dir):
    os.makedirs(results_dir)

file_ptr = open(mapping_file, 'r')
actions = file_ptr.read().split('\n')[:-1]
file_ptr.close()
actions_dict = dict()
for a in actions:
    actions_dict[a.split()[1]] = int(a.split()[0])

num_classes = len(actions_dict)

# train
trainer = Trainer(num_stages, num_layers, num_f_maps, features_dim,
                  num_classes)
no_change = 1
if args.action == "train":
    batch_gen = BatchGenerator(num_classes, actions_dict, segmentation_path,
                               features_path, sample_rate)
    batch_gen.read_data(vid_list_file)
    weights = batch_gen.set_class_weights()
    trainer.ce(weight=weights)
    while (no_change):
        trainer.train(model_dir,
                      batch_gen,
                      num_epochs=num_epochs,
                      batch_size=bz,
                      learning_rate=lr,
                      device=device)
        trainer.predict(model_dir, temp_results_dir, features_path,
Пример #29
0
def main():
    # get args
    parser = argparse.ArgumentParser(description="Im2Latex Training Program")
    # parser.add_argument('--path', required=True, help='root of the model')

    # model args
    parser.add_argument("--emb_dim",
                        type=int,
                        default=80,
                        help="Embedding size")
    parser.add_argument("--dec_rnn_h",
                        type=int,
                        default=512,
                        help="The hidden state of the decoder RNN")
    parser.add_argument("--data_path",
                        type=str,
                        default="./data/",
                        help="The dataset's dir")
    parser.add_argument("--add_position_features",
                        action='store_true',
                        default=False,
                        help="Use position embeddings or not")
    # training args
    parser.add_argument("--max_len",
                        type=int,
                        default=150,
                        help="Max size of formula")
    parser.add_argument("--dropout",
                        type=float,
                        default=0.4,
                        help="Dropout probility")
    parser.add_argument("--cuda",
                        action='store_true',
                        default=True,
                        help="Use cuda or not")
    parser.add_argument("--batch_size", type=int, default=16)  # 指定batch_size
    parser.add_argument("--epoches", type=int, default=15)
    parser.add_argument("--lr", type=float, default=3e-4, help="Learning Rate")
    parser.add_argument("--min_lr",
                        type=float,
                        default=3e-5,
                        help="Learning Rate")
    parser.add_argument("--sample_method",
                        type=str,
                        default="teacher_forcing",
                        choices=('teacher_forcing', 'exp', 'inv_sigmoid'),
                        help="The method to schedule sampling")
    parser.add_argument(
        "--decay_k",
        type=float,
        default=1.,
        help="Base of Exponential decay for Schedule Sampling. "
        "When sample method is Exponential deca;"
        "Or a constant in Inverse sigmoid decay Equation. "
        "See details in https://arxiv.org/pdf/1506.03099.pdf")

    parser.add_argument("--lr_decay",
                        type=float,
                        default=0.5,
                        help="Learning Rate Decay Rate")
    parser.add_argument("--lr_patience",
                        type=int,
                        default=3,
                        help="Learning Rate Decay Patience")
    parser.add_argument("--clip",
                        type=float,
                        default=2.0,
                        help="The max gradient norm")
    parser.add_argument("--save_dir",
                        type=str,
                        default="./ckpts",
                        help="The dir to save checkpoints")
    parser.add_argument("--print_freq",
                        type=int,
                        default=100,
                        help="The frequency to print message")
    parser.add_argument("--seed",
                        type=int,
                        default=2020,
                        help="The random seed for reproducing ")
    parser.add_argument("--from_check_point",
                        action='store_true',
                        default=False,
                        help="Training from checkpoint or not")  # 是否finetune
    parser.add_argument("--exp", default="")  # 实验名称,ckpt的名称

    args = parser.parse_args()
    max_epoch = args.epoches
    from_check_point = args.from_check_point
    if from_check_point:
        checkpoint_path = get_checkpoint(args.save_dir)
        checkpoint = torch.load(checkpoint_path)
        args = checkpoint['args']
    print("Training args:", args)

    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    # Building vocab
    print("Load vocab...")
    vocab = load_vocab(args.data_path)

    use_cuda = True if args.cuda and torch.cuda.is_available() else False
    device = torch.device("cuda" if use_cuda else "cpu")

    # data loader
    print("Construct data loader...")
    train_loader = DataLoader(
        Im2LatexDataset(args.data_path, 'train', args.max_len),  # 测试偶尔用test
        # Im2LatexDataset(args.data_path, 'test', args.max_len),
        batch_size=args.batch_size,
        collate_fn=partial(collate_fn, vocab.sign2id),
        pin_memory=True
        if use_cuda else False,  # 锁页内存,这样的话数据都会加载到内存中,交换更快,但是要求设备更高
        num_workers=4)
    val_loader = DataLoader(Im2LatexDataset(args.data_path, 'validate',
                                            args.max_len),
                            batch_size=args.batch_size,
                            collate_fn=partial(collate_fn, vocab.sign2id),
                            pin_memory=True if use_cuda else False,
                            num_workers=4)

    # construct model
    print("Construct model")
    vocab_size = len(vocab)
    model = Im2LatexModel(vocab_size,
                          args.emb_dim,
                          args.dec_rnn_h,
                          add_pos_feat=args.add_position_features,
                          dropout=args.dropout)
    model = model.to(device)
    print("Model Settings:")
    print(model)

    # construct optimizer
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    lr_scheduler = ReduceLROnPlateau(optimizer,
                                     "min",
                                     factor=args.lr_decay,
                                     patience=args.lr_patience,
                                     verbose=True,
                                     min_lr=args.min_lr)

    if from_check_point:
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        epoch = checkpoint['epoch']
        lr_scheduler.load_state_dict(checkpoint['lr_sche'])
        # init trainer from checkpoint
        max_epoch = epoch + max_epoch  # 修改一个bug
        print('From %s To %s...' % (epoch, max_epoch))
        trainer = Trainer(optimizer,
                          model,
                          lr_scheduler,
                          train_loader,
                          val_loader,
                          args,
                          use_cuda=use_cuda,
                          init_epoch=epoch,
                          last_epoch=max_epoch)
    else:
        trainer = Trainer(optimizer,
                          model,
                          lr_scheduler,
                          train_loader,
                          val_loader,
                          args,
                          use_cuda=use_cuda,
                          init_epoch=1,
                          last_epoch=args.epoches,
                          exp=args.exp)
    # begin training
    trainer.train()
Пример #30
0
import torch

torch.manual_seed(1)
from model import Trainer

if __name__ == '__main__':
    FEATURES = 4
    WIDTH = 4
    DEVICE = torch.device('cpu')
    dataset_inputs = torch.randn((2 ** 20, 16, FEATURES)) * WIDTH
    dataset_outputs = dataset_inputs.cos().mean(-1) * dataset_inputs.square().mean(-1).sqrt()
    model = Trainer(FEATURES, (dataset_inputs, dataset_outputs), DEVICE, max_loss=(WIDTH*FEATURES)**2)
    model.fit(100)