예제 #1
0
파일: train.py 프로젝트: komeme/basic_nmt
def trainIters(translator, pairs, n_iters, print_every=1000, plot_every=100):
    start = time.time()
    plot_losses = []
    print_loss_total = 0  # Reset every print_every
    plot_loss_total = 0  # Reset every plot_every

    training_pairs = [tensorsFromPair(translator.input_lang, translator.output_lang, random.choice(pairs)) for _ in range(n_iters)]

    for iter in range(1, n_iters + 1):
        training_pair = training_pairs[iter - 1]
        input_tensor = training_pair[0]
        target_tensor = training_pair[1]

        loss = translator.train(input_tensor, target_tensor)
        print_loss_total += loss
        plot_loss_total += loss

        if iter % print_every == 0:
            print_loss_avg = print_loss_total / print_every
            print_loss_total = 0
            print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
                                         iter, iter / n_iters * 100, print_loss_avg))

        # if iter % plot_every == 0:
        #     plot_loss_avg = plot_loss_total / plot_every
        #     plot_losses.append(plot_loss_avg)
        #     plot_loss_total = 0

    showPlot(plot_losses)
예제 #2
0
def train(epoch):
    global tb_writer_flag
    model.train()
    total_loss = 0
    batches = int(len(train_data.dataset) / args.batch_size)
    batch_estimate_time = time.time()
    train_start_time = time.time()
    for batch, sample in enumerate(train_data):
        inputs, targets, input_lens, target_lens = sample
        inputs = inputs.transpose(0, 1)
        targets = targets.transpose(0, 1)
        if args.cuda:
            inputs = inputs.cuda()
            targets = targets.cuda()
        # write the first data to tensorboard
        if not tb_writer_flag:
            # tb_writer.add_graph(model, (inputs, targets))
            tb_writer_flag = True
        optimizer.zero_grad()
        output, hidden = model(inputs, targets, input_lens, target_lens,
                               return_decoder_all_h=False,
                               use_teacher_forcing=np.random.rand()>0.5,
                               SOS_index=corpus.dictionary.word2idx['<SOS>'])
        output = output.permute(1, 2, 0)
        targets = targets.permute(1, 0)
        #  output shape: (N, ntok, S), targets shape: (N, S)
        loss = criterion(output, targets)

        # TODO:Activiation Regularization
        # if args.alpha:
        #     loss = loss + args.alpha * output.pow(2).mean()
        # TODO: emporal Activation Regularization (slowness)
        # if args.beta:
        #     loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
        loss.backward()

        # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
        if args.clip:
            torch.nn.utils.clip_grad_norm_(params, args.clip)
        optimizer.step()

        total_loss += loss.data.item()
        if batch % args.log_interval == 0 and batch > 0:
            elapsed = time.time() - batch_estimate_time
            batch_estimate_time = time.time()
            cur_loss = total_loss / args.log_interval
            tb_writer.add_scalar('training loss',
                                 cur_loss,
                                 batch+epoch*batches)
            print('| {:5d}/{:5d} batches | lr {:05.5f} '
                  '| {:5.2f} ms/batch  | loss {:5.2f} | ppl {:8.2f} '
                  '| bpc {:8.3f} | Time: {}'.
                  format(batch, batches, optimizer.param_groups[0]['lr'],
                         elapsed * 1000 / args.log_interval,
                         cur_loss, np.exp(cur_loss), cur_loss / np.log(2),
                         timeSince(train_start_time, batch / batches)
                         )
                  )
            total_loss = 0
예제 #3
0
파일: main.py 프로젝트: PengLU1101/openie
def train(discriminator, train_src, train_tgt, num_epoch, print_every, lr, L2,
          patience):
    """
    datapairslist = batchize(inputseqs_dev,
                             realtags_dev,
                             batch_size=16,
                             volatile=True)
    """

    try:
        best_val_loss = 0
        best_epoch = 0
        print_loss_total = 0
        start = time.time()
        print("Begin training...")

        for epoch_index in range(1, num_epoch + 1):
            train_loss = train_epoch(encoder, vqcrf, train_src, train_tgt,
                                     epoch_index, lr)
            prec, rec, val_loss = inference(encoder, vqcrf, inputseqs_dev,
                                            realtags_dev)

            print_loss_total += train_loss
            if not best_val_loss or val_loss > best_val_loss:

                if not os.path.isfile(args.model_path):
                    model_path = os.path.join(args.model_path,
                                              'discriminator.pkl')
                    torch.save(discriminator.state_dict(), model_path)
                    best_val_loss = val_loss
                    best_epoch = epoch_index
            else:
                lr /= 2
                if epoch_index - best_epoch > 5:
                    optimizer = getattr(optim,
                                        args.optim)(discriminator.parameters(),
                                                    weight_decay=L2)

                if epoch_index - best_epoch > patience:
                    print("Stop early at the %d epoch, the best epoch: %d" %
                          (epoch_index, best_epoch))
                    break
            if epoch_index % print_every == 0:
                print_loss_total = 0
                print_loss_avg = print_loss_total / min(
                    print_every, num_epoch - epoch_index)

                print('-' * 100)
                print('%s (%d %d%%)| best_f1:%.4f| F1:%.10f| prec:%.4f| '
                      'rec:%.4f| best_epoch:%d' %
                      (utils.timeSince(start, epoch_index / (num_epoch + 1)),
                       epoch_index, epoch_index / (num_epoch + 1) * 100,
                       best_val_loss, val_loss, prec, rec, best_epoch))
                print('-' * 100)

    except KeyboardInterrupt:
        print('-' * 100)
        print('Stop early... the best epoch is %d' % best_epoch)
예제 #4
0
def trainEpochs(encoder, decoder, encoder_optimizer, decoder_optimizer, 
        encoder_scheduler, decoder_scheduler, criterion, dataiter, args):
    n_epochs = args.n_epochs
    print_every = args.print_every
    plot_every = args.plot_every

    start = time.time()
    batch_i = 0
    n_batches = n_epochs * len(dataiter)
    plot_losses = []
    epoch_loss = 0 # Reset every epoch
    print_loss_total = 0  # Reset every print_every
    plot_loss_total = 0  # Reset every plot_every

    for epoch in range(args.n_epochs):

        for input_tensor, input_lengths, target_tensor, target_lengths in dataiter:
            batch_i += 1

            loss = train(input_tensor, input_lengths, target_tensor, target_lengths, 
                encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, args)
            epoch_loss += loss
            print_loss_total += loss
            plot_loss_total += loss

            if batch_i % args.print_every == 0:
                print_loss_avg = print_loss_total / print_every
                print_loss_total = 0
                print('%s (%d %d%%) %.4f' % (timeSince(start, batch_i / n_batches),
                                            batch_i, batch_i / n_batches * 100, print_loss_avg))

            if batch_i % args.plot_every == 0:
                plot_loss_avg = plot_loss_total / plot_every
                plot_losses.append(plot_loss_avg)
                plot_loss_total = 0

            if (epoch + 1) % args.save_every == 0:
                checkpoint = {
                    'epoch': epoch,
                    'encoder_state_dict': encoder.state_dict(),
                    'decoder_state_dict': decoder.state_dict(),
                    'encoder_optim_state': encoder_optimizer.state_dict(),
                    'decoder_optim_state': decoder_optimizer.state_dict(),
                }
                torch.save(checkpoint, args.save_data_path + "/epoch{}_checkpoint.pt".format(epoch))

            # for testing only
            if args.n_batches > 0 and batch_i == args.n_batches:
                break

        encoder_scheduler.step(epoch_loss)
        decoder_scheduler.step(epoch_loss)
        epoch_loss = 0
        dataiter.reset()
        
        print("Epoch {}/{} finished".format(epoch, args.n_epochs - 1))

    showPlot(plot_losses, args)
예제 #5
0
def train(train_env, encoder, decoder, n_iters, log_every=100, val_envs={}):
    ''' Train on training set, validating on both seen and unseen. '''

    agent = Seq2SeqAgent(train_env, "", encoder, decoder, max_episode_len)
    print 'Training with %s feedback' % feedback_method
    encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate, weight_decay=weight_decay)
    decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate, weight_decay=weight_decay) 

    data_log = defaultdict(list)
    start = time.time()
   
    for idx in range(0, n_iters, log_every):

        interval = min(log_every,n_iters-idx)
        iter = idx + interval
        data_log['iteration'].append(iter)

        # Train for log_every interval
        agent.train(encoder_optimizer, decoder_optimizer, interval, feedback=feedback_method)
        train_losses = np.array(agent.losses)
        assert len(train_losses) == interval
        train_loss_avg = np.average(train_losses)
        data_log['train loss'].append(train_loss_avg)
        loss_str = 'train loss: %.4f' % train_loss_avg

        # Run validation
        for env_name, (env, evaluator) in val_envs.iteritems():
            agent.env = env
            agent.results_path = '%s%s_%s_iter_%d.json' % (RESULT_DIR, model_prefix, env_name, iter)
            # Get validation loss under the same conditions as training
            agent.test(use_dropout=True, feedback=feedback_method, allow_cheat=True)
            val_losses = np.array(agent.losses)
            val_loss_avg = np.average(val_losses)
            data_log['%s loss' % env_name].append(val_loss_avg)
            # Get validation distance from goal under test evaluation conditions
            agent.test(use_dropout=False, feedback='argmax')
            agent.write_results()
            score_summary, _ = evaluator.score(agent.results_path)
            loss_str += ', %s loss: %.4f' % (env_name, val_loss_avg)
            for metric,val in score_summary.iteritems():
                data_log['%s %s' % (env_name,metric)].append(val)
                if metric in ['success_rate']:
                    loss_str += ', %s: %.3f' % (metric, val)

        agent.env = train_env

        print('%s (%d %d%%) %s' % (timeSince(start, float(iter)/n_iters),
                                             iter, float(iter)/n_iters*100, loss_str))

        df = pd.DataFrame(data_log)
        df.set_index('iteration')
        df_path = '%s%s_log.csv' % (PLOT_DIR, model_prefix)
        df.to_csv(df_path)
        
        split_string = "-".join(train_env.splits)
        enc_path = '%s%s_%s_enc_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, iter)
        dec_path = '%s%s_%s_dec_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, iter)
        agent.save(enc_path, dec_path)
예제 #6
0
def eval_db_agent(env, params):
    if params['use_preproc']:
        preprocessor = Preprocessor(params['state_dim'], params['history'], params['use_luminance'],
                                    params['resize_shape'])
        params['state_dim'] = preprocessor.state_shape
    else:
        preprocessor = None

    agent = VAE(params['state_dim'], params['action_dim'])
    if params['use_cuda']:
        agent = agent.cuda()
        agent.load_state_dict(torch.load('./agents/{0}_{1}'.format(params['arch'], params['env_name'])))
    else:
        agent.load_state_dict(
            torch.load('./agents/{0}_{1}'.format(params['arch'], params['env_name']), map_location='cpu'))
    agent.eval()

    agent_steps = 0
    episode_rewards = []
    start = time.time()
    for episode in xrange(1, params['num_episodes'] + 1):
        env_state = env.reset()
        episode_reward = 0.0
        for t in xrange(1, params['max_steps'] + 1):
            if params['env_render']:
                env.render()

            if preprocessor:
                state = preprocessor.process_state(env_state)
            else:
                state = env_state

            var_state = createVariable(state, use_cuda=params['use_cuda'])
            action, state_val = agent.sample_action_eval(var_state)

            reward = 0.0
            for _ in range(1):
                env_state, r, terminal, _ = env.step(action)
                reward += r
                if terminal:
                    break

            episode_reward += reward

            if terminal:
                break

        episode_rewards.append(episode_reward)
        agent_steps += t

        if preprocessor:
            preprocessor.reset()

        print 'Episode {0} | Total Steps {1} | Total Reward {2} | Mean Reward {3} | Total Time {4}' \
            .format(episode, agent_steps, episode_reward, sum(episode_rewards[-100:]) / 100,
                    timeSince(start, episode / params['num_episodes']))
예제 #7
0
def train(args, train_env, agent, log_every=log_every, val_envs=None):
    ''' Train on training set, validating on both seen and unseen. '''

    if val_envs is None:
        val_envs = {}

    print('Training with %s feedback' % feedback_method)
    visEncoder_optimizer = optim.Adam(filter_param(
        agent.visEncoder.parameters()),
                                      lr=learning_rate,
                                      weight_decay=weight_decay)
    lanEncoder_optimizer = optim.Adam(filter_param(
        agent.lanEncoder.parameters()),
                                      lr=learning_rate,
                                      weight_decay=weight_decay)
    dotSim_optimizer = optim.Adam(filter_param(agent.dotSim.parameters()),
                                  lr=learning_rate,
                                  weight_decay=weight_decay)

    data_log = defaultdict(list)
    start = time.time()

    split_string = "-".join(train_env.splits)

    def make_path(n_iter):
        return os.path.join(
            args.snapshot_dir, '%s_%s_iter_%d' % (get_model_prefix(
                args, train_env.image_features_list), split_string, n_iter))

    best_metrics = {}
    last_model_saved = {}
    for idx in range(0, args.n_iters, log_every):
        agent.env = train_env

        interval = min(log_every, args.n_iters - idx)
        iter = idx + interval
        data_log['iteration'].append(iter)

        # Train for log_every interval
        agent.train(visEncoder_optimizer, lanEncoder_optimizer,
                    dotSim_optimizer, interval)
        train_losses = np.array(agent.losses)
        assert len(train_losses) == interval
        train_loss_avg = np.average(train_losses)
        data_log['train loss'].append(train_loss_avg)
        loss_str = 'train loss: %.4f' % train_loss_avg

        save_log = []

        print(
            ('%s (%d %d%%) %s' % (timeSince(start,
                                            float(iter) / args.n_iters), iter,
                                  float(iter) / args.n_iters * 100, loss_str)))
        if not args.no_save:
            if save_every and iter % save_every == 0:
                agent.save(make_path(iter))
예제 #8
0
파일: main.py 프로젝트: arpit9295/ce7455
def trainIters(encoder,
               decoder,
               n_iters=parameters['train_iters'],
               print_every=parameters['print_every'],
               plot_every=parameters['plot_every'],
               learning_rate=parameters['learning_rate']):
    start = time.time()
    plot_losses = []
    print_loss_total = 0  # Reset every print_every
    plot_loss_total = 0  # Reset every plot_every

    encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
    decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
    random_pairs = [random.choice(pairs) for i in range(n_iters)]
    training_word_pairs = [
        lang_word_indexer.pairToTensors(pair) for pair in random_pairs
    ]
    if use_char_embedding:
        input_char_indexes = [
            lang_char_indexer.sentenceToTensor(input_lang, pair[0])
            for pair in random_pairs
        ]
    criterion = nn.NLLLoss()

    for iter in range(1, n_iters + 1):
        training_word_pair = training_word_pairs[iter - 1]
        input_word_tensor = training_word_pair[0]
        target_word_tensor = training_word_pair[1]
        if use_char_embedding:
            input_char_tensor = input_char_indexes[iter - 1]

        loss = train(input_word_tensor,
                     target_word_tensor,
                     input_char_tensor,
                     encoder,
                     decoder,
                     encoder_optimizer,
                     decoder_optimizer,
                     criterion,
                     max_length=MAX_LENGTH)
        print_loss_total += loss
        plot_loss_total += loss

        if iter % print_every == 0:
            print_loss_avg = print_loss_total / print_every
            print_loss_total = 0
            print('%s (%d %d%%) %.4f' %
                  (timeSince(start, iter / n_iters), iter,
                   iter / n_iters * 100, print_loss_avg))

        if iter % plot_every == 0:
            plot_loss_avg = plot_loss_total / plot_every
            plot_losses.append(plot_loss_avg)
            plot_loss_total = 0

    savePlot(file_path, plot_losses)
예제 #9
0
def trainIters(ds,
               encoder,
               decoder,
               n_iters,
               max_length,
               input_lang,
               output_lang,
               print_every=1000,
               plot_every=100,
               learning_rate=0.01):
    start = time.time()
    plot_losses = []
    print_loss_total = 0  # Reset every print_every
    plot_loss_total = 0  # Reset every plot_every
    device = encoder.embedding.weight.device

    encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
    decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)

    training_pairs = []
    for i in range(n_iters):
        report_id = random.choice(range(len(ds)))
        report_pair = ds.__getitem__(report_id)[:2]
        report_pair = (report_pair[0], normalizeString(report_pair[1]))
        #print(report_pair)
        training_pairs.append(
            tensorsFromPair(report_pair, input_lang, output_lang, device,
                            max_length))

    criterion = nn.NLLLoss()

    for iter in range(1, n_iters + 1):
        training_pair = training_pairs[iter - 1]
        input_tensor = training_pair[0]
        target_tensor = training_pair[1]

        loss = train(input_tensor, target_tensor, encoder, decoder,
                     encoder_optimizer, decoder_optimizer, criterion,
                     max_length)
        print_loss_total += loss
        plot_loss_total += loss

        if iter % print_every == 0:
            print_loss_avg = print_loss_total / print_every
            print_loss_total = 0
            print('%s (%d %d%%) %.4f' %
                  (timeSince(start, iter / n_iters), iter,
                   iter / n_iters * 100, print_loss_avg))

        if iter % plot_every == 0:
            plot_loss_avg = plot_loss_total / plot_every
            plot_losses.append(plot_loss_avg)
            plot_loss_total = 0

    return plot_losses
예제 #10
0
def trainIters(batch_size, cnn, encoder, decoder, data_loader, learning_rate,
               n_iters, print_every, use_cuda):
    start = time.time()
    print_losses = []
    print_loss_total = 0

    # Loss and optimizer
    cnn_optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)
    encoder_optimizer = torch.optim.Adam(encoder.parameters(),
                                         lr=learning_rate)
    decoder_optimizer = torch.optim.Adam(decoder.parameters(),
                                         lr=learning_rate)

    criterion = nn.NLLLoss()

    data_generator = data_loader.create_data_generator(args.batch_size,
                                                       args.train_path)

    best_loss = None

    for iter in range(1, n_iters + 1):
        (images, targets, targets_eval, num_nonzer,
         img_paths) = data_generator.next()
        loss, predicted_index, actual_index = train(
            images, targets, targets_eval, cnn, encoder, decoder,
            cnn_optimizer, encoder_optimizer, decoder_optimizer, criterion,
            args.max_lenth_encoder, use_cuda)

        print_loss_total += loss

        if iter % print_every == 0:
            print_loss_avg = print_loss_total / print_every
            print_losses.append(print_loss_avg)
            print_loss_total = 0

            print('%s (%d %d%%) %.4f' %
                  (timeSince(start, iter / float(n_iters)), iter,
                   iter / float(n_iters) * 100, print_loss_avg))

            print("Predicted Tokens")
            print([data_loader.tokenizer.id2vocab[i] for i in predicted_index])
            print("Actual Tokens")
            print([data_loader.tokenizer.id2vocab[i] for i in actual_index])

        if not best_loss or print_loss_avg < best_loss:
            with open(args.save_cnn, 'wb') as f:
                torch.save(cnn, f)
            with open(args.save_encoder, 'wb') as f:
                torch.save(encoder, f)
            with open(args.save_decoder, 'wb') as f:
                torch.save(decoder, f)

    return print_losses
예제 #11
0
def train_epochs(train_dataloader, val_dataloader, model, loss_fn, num_epochs,
                 save_path):

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    optimizer = torch.optim.Adam(model.parameters())
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           'max',
                                                           verbose=True)
    stopper = EarlyStopping(verbose=True,
                            path=os.path.join(save_path,
                                              'unet_model_best.pth'),
                            patience=15,
                            mode='max')
    steps = len(train_dataloader.dataset) // train_dataloader.batch_size
    best_model = model = model.to(device)

    start = time.time()
    train_losses = []
    train_ious = []
    val_losses = []
    val_ious = []
    for epoch in range(1, num_epochs + 1):
        print('-' * 10)
        print('Epoch {}/{}'.format(epoch, num_epochs))
        running_iou = []
        running_loss = []
        for step, (x, y) in enumerate(train_dataloader):
            loss, iou = train(model, x, y, loss_fn, optimizer, device)
            running_iou.append(iou)
            running_loss.append(loss)
            print('\r{:6.1f} %\tloss {:8.4f}\tIoU {:8.4f}'.format(
                100 * (step + 1) / steps, loss, iou),
                  end="")
        print('\r{:6.1f} %\tloss {:8.4f}\tIoU {:8.4f}\t{}'.format(
            100 * (step + 1) / steps, np.mean(running_loss),
            np.mean(running_iou), timeSince(start)))
        print('running validation...', end='\r')
        val_loss, val_iou = validate(val_dataloader, model, loss_fn, device)
        print('Validation: \tloss {:8.4f} \tIoU {:8.4f}'.format(
            val_loss, val_iou))
        scheduler.step(np.mean(running_iou))

        train_losses.append(loss)
        train_ious.append(iou)
        val_losses.append(val_loss)
        val_ious.append(val_iou)

        stopper(val_iou, model)
        if stopper.early_stop:
            break

    return (train_losses, val_losses), (train_ious, val_ious), best_model
 def test_report_times(self):
     eps = 0.01
     ##
     start = time.time()
     time.sleep(0.5)
     msg, h = timeSince(start)
     ##
     start = time.time()
     time.sleep(0.5)
     msg, seconds, _, _ = report_times(start)
     ##
     diff = abs(seconds - h*60*60)
     self.assertTrue(diff <= eps)
예제 #13
0
def trainIters(pairs,
               input_lang,
               output_lang,
               encoder,
               decoder,
               n_iters,
               print_every=100,
               plot_every=1000,
               learning_rate=0.01):
    start = time.time()
    plot_losses = []
    print_loss_total = 0  # reset every print_every
    plot_loss_total = 0  # reset every print_every

    # define criterion and optimization algorithm
    encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
    decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
    training_pairs = [
        variablesFromPair(random.choice(pairs), input_lang, output_lang)
        for i in range(n_iters)
    ]
    criterion = nn.NLLLoss()

    # now proceed one iteration at a time
    for iter in range(1, n_iters + 1):
        training_pair = training_pairs[iter - 1]
        input_variable = training_pair[0]
        target_variable = training_pair[1]

        # train on one example
        loss = train(input_variable, target_variable, encoder, decoder,
                     encoder_optimizer, decoder_optimizer, criterion)
        print_loss_total += loss
        plot_loss_total += loss

        if iter % print_every == 0:
            print_loss_avg = print_loss_total / print_every
            print_loss_total = 0
            print('%s (%d %d%%) %.4f' %
                  (utils.timeSince(start,
                                   float(iter) / float(n_iters)), iter,
                   float(iter) / float(n_iters) * 100, print_loss_avg))

        if iter % plot_every == 0:
            plot_loss_avg = plot_loss_total / float(plot_every)
            plot_losses.append(plot_loss_avg)
            plot_loss_total = 0

    # plot the learning curve
    utils.showPlot(plot_losses)
예제 #14
0
def trainIters(lang,
               dataSet,
               pairs,
               encoder,
               decoder,
               n_iters,
               print_every=1000,
               plot_every=100,
               learning_rate=0.01):
    start = time.time()
    plot_losses = []
    print_loss_total = 0  # Reset every print_every
    plot_loss_total = 0  # Reset every plot_every

    encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
    decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)

    # 随机获取训练的数据集
    training_pairs = [random.choice(pairs) for i in range(n_iters)]
    criterion = nn.NLLLoss()

    for iter in range(1, n_iters + 1):
        training_pair = training_pairs[iter - 1]
        input_variable = training_pair[0]
        target_variable = training_pair[1]

        loss = train(input_variable, target_variable, encoder, decoder,
                     encoder_optimizer, decoder_optimizer, criterion)
        print_loss_total += loss
        plot_loss_total += loss
        # if print_loss_total / print_every <= 0.0003:
        #    break

        if iter % print_every == 0:
            print_loss_avg = print_loss_total / print_every
            print_loss_total = 0
            print('%s (%d %d%%) %.4f' % (timeSince(start, float(
                iter / n_iters)), iter, iter / n_iters * 100, print_loss_avg))

        if iter % plot_every == 0:
            plot_loss_avg = plot_loss_total / plot_every
            plot_losses.append(plot_loss_avg)
            plot_loss_total = 0

    torch.save(encoder,
               setting.MODEL_HOME + "/%s.%s.encoder.pkl" % (dataSet, lang))
    torch.save(decoder,
               setting.MODEL_HOME + "/%s.%s.decoder.pkl" % (dataSet, lang))

    showPlot(plot_losses)
예제 #15
0
    def train(self,
              pairs,
              n_iters,
              max_length=1000,
              teacher_forcing_ratio=0.5,
              print_every=1000,
              plot_every=100,
              learning_rate=0.01):
        start = time.time()
        plot_losses = []
        print_loss_total = 0  # Reset every print_every
        plot_loss_total = 0  # Reset every plot_every

        encoder_optimizer = optim.SGD(self.encoder.parameters(),
                                      lr=learning_rate)
        decoder_optimizer = optim.SGD(self.decoder.parameters(),
                                      lr=learning_rate)
        training_pairs = [
            tensorsFromPair(self.input_lang, self.output_lang,
                            random.choice(pairs), self.device)
            for i in range(n_iters)
        ]
        criterion = nn.NLLLoss()

        for iter in range(1, n_iters + 1):
            training_pair = training_pairs[iter - 1]
            input_tensor = training_pair[0]
            target_tensor = training_pair[1]

            loss = self.step(input_tensor, target_tensor, encoder_optimizer,
                             decoder_optimizer, criterion, max_length,
                             teacher_forcing_ratio)
            print_loss_total += loss
            plot_loss_total += loss

            if iter % print_every == 0:
                print_loss_avg = print_loss_total / print_every
                print_loss_total = 0
                print('%s (%d %d%%) %.4f' %
                      (timeSince(start, iter / n_iters), iter,
                       iter / n_iters * 100, print_loss_avg))

            if iter % plot_every == 0:
                plot_loss_avg = plot_loss_total / plot_every
                plot_losses.append(plot_loss_avg)
                plot_loss_total = 0

        showPlot(plot_losses)
예제 #16
0
def train(train_env, vocab_size, n_iters, log_every=1000, val_envs={}):
    ''' Train on training set, validating on both seen and unseen. '''

    agent = ActorCriticAgent(train_env, vocab_size, "", batch_size,
                             max_episode_len)

    data_log = defaultdict(list)
    start = time.time()
    guide_prob = 0.7
    for idx in range(0, n_iters, log_every):
        interval = min(log_every, n_iters - idx)
        iter = idx + interval

        agent.train(interval, guide_prob)

        train_losses = np.array(agent.losses)
        train_loss_avg = np.average(train_losses)
        data_log['train loss'].append(train_loss_avg)
        loss_str = ''  #'guide prob: %.2f' % guide_prob
        #loss_str += ', train loss: %.4f' % train_loss_avg
        # Run validation
        for env_name, (env, evaluator) in val_envs.iteritems():
            agent.env = env
            agent.results_path = '%s%s_%s_iter_%d.json' % (
                RESULT_DIR, model_prefix, env_name, iter)
            agent.test(0.0)  #guide_prob)

            #val_losses = np.array(agent.losses)
            #val_loss_avg = np.average(val_losses)
            #data_log['%s loss' % env_name].append(val_loss_avg)

            agent.write_results()

            score_summary, _ = evaluator.score(agent.results_path)
            #loss_str += ', %s loss: %.4f' % (env_name, val_loss_avg)
            loss_str += ', %s' % (env_name)
            for metric, val in score_summary.iteritems():
                data_log['%s %s' % (env_name, metric)].append(val)
                if metric in ['success_rate']:
                    loss_str += ' success: %.2f' % (val)

        agent.env = train_env

        print('%s (%d %d%%) %s' % (timeSince(start,
                                             float(iter) / n_iters), iter,
                                   float(iter) / n_iters * 100, loss_str))
        guide_prob -= 0.01
        guide_prob = max(guide_prob, 0.0)
예제 #17
0
def trainIters(encoder,
               decoder,
               n_iters,
               print_every=1000,
               plot_every=100,
               learning_rate=0.01,
               lang_pack=None):

    assert not (lang_pack == None), "None shall pass"
    input_lang, output_lang, pairs = lang_pack

    start = time.time()
    plot_losses = []
    print_loss_total = 0  # Reset every print_every
    plot_loss_total = 0  # Reset every plot_every

    encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
    decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
    training_pairs = [
        tensorsFromPair(random.choice(pairs), langs=[input_lang, output_lang])
        for i in range(n_iters)
    ]
    criterion = nn.NLLLoss()

    for iter in range(1, n_iters + 1):
        training_pair = training_pairs[iter - 1]
        input_tensor = training_pair[0]
        target_tensor = training_pair[1]

        loss = train(input_tensor, target_tensor, encoder, decoder,
                     encoder_optimizer, decoder_optimizer, criterion)
        print_loss_total += loss
        plot_loss_total += loss

        if iter % print_every == 0:
            print_loss_avg = print_loss_total / print_every
            print_loss_total = 0
            print('%s (%d %d%%) %.4f' %
                  (timeSince(start, iter / n_iters), iter,
                   iter / n_iters * 100, print_loss_avg))

        if iter % plot_every == 0:
            plot_loss_avg = plot_loss_total / plot_every
            plot_losses.append(plot_loss_avg)
            plot_loss_total = 0

    showPlot(plot_losses)
예제 #18
0
    def trainIters(self,
                   pairs,
                   input_lang,
                   output_lang,
                   n_iters,
                   print_every=1000,
                   plot_every=100,
                   char=False):
        start = time.time()
        plot_losses = []
        print_loss_total = 0  # Reset every print_every
        plot_loss_total = 0  # Reset every plot_every

        self.input_lang = input_lang
        self.output_lang = output_lang
        self.encoder_optimizer = optim.SGD(self.encoder.parameters(),
                                           lr=self.learning_rate)
        self.decoder_optimizer = optim.SGD(self.decoder.parameters(),
                                           lr=self.learning_rate)
        selected_pairs = [random.choice(pairs) for i in range(n_iters)]
        training_pairs = [
            self.tensorsFromPair(pair, char) for pair in selected_pairs
        ]
        self.criterion = nn.NLLLoss()

        for iter in range(1, n_iters + 1):
            training_pair = training_pairs[iter - 1]
            input_tensor = training_pair[0]
            target_tensor = training_pair[1]
            loss = self.train(input_tensor, target_tensor)
            print_loss_total += loss
            plot_loss_total += loss

            if iter % print_every == 0:
                print_loss_avg = print_loss_total / print_every
                print_loss_total = 0
                print('%s (%d %d%%) %.4f' %
                      (timeSince(start, iter / n_iters), iter,
                       iter / n_iters * 100, print_loss_avg))

            if iter % plot_every == 0:
                plot_loss_avg = plot_loss_total / plot_every
                plot_losses.append(plot_loss_avg)
                plot_loss_total = 0

        showPlot(plot_losses)
예제 #19
0
    def train(self, epoch):
        """
        Return: the average trainging loss value of this epoch
        """
        self.model.train()

        self.set_transform("train")

        # to avoid loading dataset repeatedly
        if self.train_loader == None:
            self.train_loader = get_loader(image_dir = self.image_dir, attr_path = self.attr_path, 
                                            selected_attrs = self.selected_attrs, mode="train", 
                                            batch_size=self.batch_size, transform=self.transform)
            print("train_dataset size: {}".format(len(self.train_loader.dataset)))

        temp_loss = 0.0
            
        for batch_idx, samples in enumerate(self.train_loader):
            self.scheduler.step()

            images, labels = samples
            labels = torch.stack(labels).t() 
            images= images.to(self.device)
            outputs = self.model(images)
            self.optim_.zero_grad()
            if self.loss_type == "BCE_loss":
                total_loss = self.BCE_loss(outputs, labels)  

            elif self.loss_type == "focal_loss":
                total_loss = self.focal_loss(outputs, labels)

            total_loss.backward()
            self.optim_.step()
            temp_loss += total_loss.item()
            
            if batch_idx % 50 == 0:
                print("Epoch: {}/{}, training batch_idx : {}/{}, time: {}, loss: {}".format(epoch, self.epoches, 
                                batch_idx, int(len(self.train_loader.dataset)/self.batch_size), 
                                utils.timeSince(self.start_time), total_loss.item()))

        return temp_loss/(batch_idx + 1)
예제 #20
0
def trainIters(encoder,
               decoder,
               n_iters,
               print_every=1000,
               plot_every=100,
               learning_rate=0.01):
    start = time.time()
    plot_losses = []
    print_loss_total = 0  # Reset every print_every
    plot_loss_total = 0  # Reset every plot_every

    encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
    decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
    training_pairs = [
        variablesFromPair(random.choice(pairs)) for i in range(n_iters)
    ]
    criterion = nn.NLLLoss()

    for iter in range(1, n_iters + 1):
        training_pair = training_pairs[iter - 1]
        input_variable = training_pair[0]
        target_variable = training_pair[1]

        loss = train(input_variable, target_variable, encoder, decoder,
                     encoder_optimizer, decoder_optimizer, criterion)
        print_loss_total += loss
        plot_loss_total += loss

        if iter % print_every == 0:
            print_loss_avg = print_loss_total / print_every
            print_loss_total = 0
            print('%s (%d %d%%) %.4f' %
                  (timeSince(start, iter / n_iters), iter,
                   iter / n_iters * 100, print_loss_avg))

        if iter % plot_every == 0:
            plot_loss_avg = plot_loss_total / plot_every
            plot_losses.append(plot_loss_avg)
            plot_loss_total = 0

    showPlot(plot_losses)
예제 #21
0
def trainIters(input_data,
               encoder,
               decoder,
               encoder_optimizer,
               decoder_optimizer,
               n_iters=20,
               print_every=10):
    """Applies training loop to train models on data"""
    if encoder != None:
        encoder.train()
    decoder.train()
    start = time.time()

    print_loss_total = 0  # Reset every print_every
    criterion = nn.NLLLoss()

    # Sample n random pairs
    selected_indices = np.random.choice(len(input_data),
                                        n_iters,
                                        replace=False)

    # For EACH pair train model to decrease loss
    for idx, selected_idx in enumerate(selected_indices):
        loss = train_supervised(input_data[selected_idx][0],
                                input_data[selected_idx][1],
                                input_data[selected_idx][2], encoder, decoder,
                                encoder_optimizer, decoder_optimizer,
                                criterion)
        print_loss_total += loss

        iter = idx + 1
        if iter % print_every == 0:
            print_loss_avg = print_loss_total / print_every
            print_loss_total = 0
            print('%s (%d %d%%) %.4f' %
                  (utils.timeSince(start, iter / n_iters), iter,
                   iter / n_iters * 100, print_loss_avg))
예제 #22
0
def train(train_env, encoder, decoder, n_iters, path_type, history, feedback_method, max_episode_len, MAX_INPUT_LENGTH, model_prefix,
    log_every=100, val_envs=None):
    ''' Train on training set, validating on both seen and unseen. '''
    if val_envs is None:
        val_envs = {}

    if agent_type == 'seq2seq':
        agent = Seq2SeqAgent(train_env, "", encoder, decoder, max_episode_len)
    else:
        sys.exit("Unrecognized agent_type '%s'" % agent_type)
    print 'Training a %s agent with %s feedback' % (agent_type, feedback_method)
    encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate, weight_decay=weight_decay)
    decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate, weight_decay=weight_decay) 

    data_log = defaultdict(list)
    start = time.time()
    print 'Start training'
    for idx in range(0, n_iters, log_every):

        interval = min(log_every,n_iters-idx)
        iter = idx + interval
        data_log['iteration'].append(iter)

        # Train for log_every interval
        agent.train(encoder_optimizer, decoder_optimizer, interval, feedback=feedback_method)
        train_losses = np.array(agent.losses)
        assert len(train_losses) == interval
        train_loss_avg = np.average(train_losses)
        data_log['train loss'].append(train_loss_avg)
        loss_str = 'train loss: %.4f' % train_loss_avg

        # Run validation
        for env_name, (env, evaluator) in val_envs.iteritems():
            agent.env = env
            agent.results_path = '%s%s_%s_iter_%d.json' % (RESULT_DIR, model_prefix, env_name, iter)
            # Get validation loss under the same conditions as training
            agent.test(use_dropout=True, feedback=feedback_method, allow_cheat=True)
            val_losses = np.array(agent.losses)
            val_loss_avg = np.average(val_losses)
            data_log['%s loss' % env_name].append(val_loss_avg)
            # Get validation distance from goal under test evaluation conditions
            agent.test(use_dropout=False, feedback='argmax')
            agent.write_results()
            score_summary, _ = evaluator.score(agent.results_path)
            loss_str = ', %s loss: %.4f' % (env_name, val_loss_avg)
            for metric, val in score_summary.iteritems():
                 data_log['%s %s' % (env_name, metric)].append(val)
                 if metric in ['success_rate', 'oracle success_rate', 'oracle path_success_rate', 'dist_to_end_reduction']:
                     loss_str += ', %s: %.3f' % (metric, val)

        agent.env = train_env

        print('%s (%d %d%%) %s' % (timeSince(start, float(iter)/n_iters),
                                             iter, float(iter)/n_iters*100, loss_str))
        df = pd.DataFrame(data_log)
        df.set_index('iteration')
        df_path = '%s%s-log.csv' % (PLOT_DIR, model_prefix)
        df.to_csv(df_path)
        
        split_string = "-".join(train_env.splits)
        enc_path = '%s%s_%s_enc_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, iter)
        dec_path = '%s%s_%s_dec_iter_%d' % (SNAPSHOT_DIR, model_prefix, split_string, iter)
        agent.save(enc_path, dec_path)

    print 'Finish training'
                if (ind + 1) % 10 == 0:
                    g_list[j].flush()
                    os.fsync(g_list[j].fileno())
        else:
            ssa = sortsummary(beam, beta=beta)
            g.write('-' * 5 + f'<{ind+1}>' + '-' * 5 + '\n')
            g.write('\n')
            if ssa == []:
                g.write('\n')
            else:
                for m in range(len(ssa)):
                    g.write(' '.join(ssa[m][1][1:]) + '\n')
                    g.write('{:.3f}'.format(ssa[m][0]) + '   ' + '{:.3f}'.format(ssa[m][3]) + '   ' + '{:.3f}'.format(ssa[m][4]) + '\n')
                    g.writelines(['%d,   ' % loc for loc in ssa[m][2]])
                    g.write('\n')
                g.write('\n')
            
            if (ind + 1) % 10 == 0:
                g.flush()
                os.fsync(g.fileno())
                
    print('time elapsed %s' % timeSince(start))
    if fixedlen:
        for gg in g_list:
            gg.close()
        print('results saved to: %s' % (("\n" + " " * 18).join(smrypath_list)))        
    else:
        g.close()
        print(f'results saved to: {smrypath}')

예제 #24
0
        loss = criterion(output, y_batch)

        optimizer.zero_grad()
        loss.backward(retain_graph=True)
        optimizer.step()

        if loss.item() < min_loss:
            min_loss = loss.item()
            torch.save(model, model_fn)

        idx += 1

        if idx % 100 == 0:
            losses.append(loss.item())
            print('%s, %d epoch, %d index, %f loss' %
                  (timeSince(start), epoch, idx, loss.item()))

plt.figure()
plt.plot(np.arange(len(losses)), losses)
plt.show()


def category_from_output(output, all_categories):
    top_n, top_i = output.topk(1)
    category_i = top_i[0].item()
    return all_categories[category_i]


def test(X, y, y_vec):
    model = torch.load(model_fn)
예제 #25
0
all_losses = []
total_loss = 0  # Reset every plot_every iters

start = time.time()

save_model = "advacnced_start_optim.pth"

for it in range(1, epoch + 1):
    output, loss = train(*utils.randomTrainingExample_new(
        all_categories, category_lines, n_letters, all_letters),
                         teacher_forcing=True)
    total_loss += loss

    if it % print_every == 0:
        print('%s (%d %d%%) %.4f' %
              (utils.timeSince(start), it, it / epoch * 100, loss))
        torch.save(rnn.state_dict(), save_model)
        utils.evaluation(it,
                         all_categories,
                         n_letters,
                         all_letters,
                         rnn,
                         start_token=True)

        #utils.samples('Russian', all_categories, n_letters, all_letters, rnn, 'RUS', start_token=True)
        #utils.samples('German', all_categories, n_letters, all_letters, rnn, 'GER', start_token=True)
        #utils.samples('Spanish', all_categories, n_letters, all_letters, rnn, 'SPA', start_token=True)
        #utils.samples('Chinese', all_categories, n_letters, all_letters, rnn, 'CHI', start_token=True)

    if it % plot_every == 0:
        all_losses.append(total_loss / plot_every)
예제 #26
0
def train_main(args, topo, trainset, validset, model, optimizer):
    print("-----Training Start-----")
    open_log(args.save_dir, dir=True, message="result")
    cost_log = open_log(args.cost_path, message="cost")
    valid_log = open_log(args.valid_path, message="valid")

    cost_log.write("Iter\ttrain_loss\ttrain_acc\ttime\n")

    start = time.time()
    valid_eval = 0
    best_eval = 99999.0
    LR = args.learning_rate

    # learning decay, early stop
    n_decay = 0
    patience = 0
    early_stop = 0

    # Performances
    total_loss = 0
    total_acc = 0
    best_mean = 0
    best_var = 0
    best_fail_ratio = 0
    best_eval = 99999.0

    torch.autograd.set_detect_anomaly(True)
    for epoch in range(args.epoch):
        print("-----{} Epoch-----".format(epoch))
        if early_stop == 1:
            break
        for i in range(trainset.n_data()):
            train_topo = trainset.getitem(i, topo)
            model.train()

            for reqid in train_topo.reqs.keys():
                label = train_topo.label[reqid]
                gen_nodes, loss = model(label, reqid, train_topo)
                gen_nodes = gen_nodes.cpu().numpy()

                model.zero_grad()
                loss.backward()
                optimizer.step()

                train_top.update_topology(label, req_id)

            total_acc += tmp_total_acc / float(n_req)

            if i % args.print_iter == 0 and i > 1:
                avg_acc = (total_acc / args.print_iter) * 100
                avg_loss = total_loss / args.print_iter
                total_loss = 0
                total_acc = 0
                print("- {} epoch, {} gpu, {} model, {} lr_decay, {} patience -".format(\
                        epoch, args.device, args.model_name, n_decay, patience))
                print("{} iters - {:.3f} train_loss - {:.3f} train_acc - {} time".format\
                    (i, avg_loss, avg_acc, timeSince(start)))
                print("Current Best Valid {:.3f}".format(best_eval))
                print("Label Sequence : ", label)
                print("Gen.  Sequence : ", gen_nodes)

            if i % args.valid_iter == 0 and i > 1:
                train_log.write("{}\t{}\t{}\t".format(i, avg_loss, avg_acc))
                print("path : ", args.save_model_path)
                torch.save(model, args.save_model_path)
                print("Save the model : ", args.save_model_path)
                print("-----------Validation Results----------")
                valid_mean, valid_var, valid_time, valid_fail_ratio, fail0, fail1, fail2, _, _, _ = \
                                generation(args, valid_loader, model, use='test')
                print("Valid Ratio Mean      : {:.3f}".format(valid_mean))
                print("Valid Ratio Var.      : {:.3f}".format(valid_var))
                print(
                    "Ratio of Fail (%)     : {:.3f}".format(valid_fail_ratio))
                print("Ratio of fail type(%) : \nFAIL0={:.3f} FAIL1={:.3f} FAIL2={:.3f}"\
                                .format(fail0,fail1,fail2))
                print("Spent Avg. Time : {:.3f}".format(valid_time))
                train_log.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(\
                    valid_mean,valid_var, valid_fail_ratio, fail0, fail1, fail2))

                valid_eval = valid_mean + (valid_fail_ratio / 25.0)
                if valid_eval < best_eval:
                    print("Find Best Model! Saving : ",
                          args.save_model_path + '.best.pth')
                    torch.save(model, args.save_model_path + '.best.pth')
                    best_mean,best_var,best_fail_ratio,best_fail_type,best_time,best_eval=\
                    valid_mean,valid_var,valid_fail_ratio,(fail0,fail1,fail2),valid_time,\
                    valid_eval

                    patience = 0
                else:
                    patience += 1
                    if patience >= args.patience:
                        if n_decay >= args.lr_decay:
                            print("Early Stopping...")
                            print("Best Valid Mean : {}".format(best_mean))
                            print("Best Valid Var. : {}".format(best_var))
                            print(
                                "Best Fail Ratio : {}".format(best_fail_ratio))
                            print("Best Fail0 : {}, Fail1 : {}, Fail2 : {}".format(\
                                    best_fail_type[0],best_fail_type[1],best_fail_type[2]))
                            print("The time for one packet : {}".format(
                                best_time))
                            print("It too {} packet, {} epoch, {} Time"\
                                .format(i, epoch, timeSince(start)))
                            early_stop = 1
                            break
                        else:
                            n_decay += 1
                            patience = 0
                            print("Learning Rate Decaying...")
                            for param_group in optimizer.param_groups:
                                param_group['lr'] = LR * 0.1
                                LR = LR * 0.1
                            print("Current LR : ", LR)
            if early_stop == 1:
                break
    print("Training End..")
    return train_log
예제 #27
0
파일: run_search.py 프로젝트: hyzcn/FAST
def sweep_gamma(args, agent, val_envs, gamma_space):
    ''' Train on training set, validating on both seen and unseen. '''

    print('Sweeping gamma, loss under %s feedback' % args.feedback_method)

    data_log = defaultdict(list)
    start = time.time()

    split_string = "-".join(list(val_envs.keys()))

    def make_path(gamma):
        return os.path.join(
            args.SNAPSHOT_DIR, '%s_gamma_%.3f' % (
                get_model_prefix(args, val_env.image_features_list),
                gamma))

    best_metrics = {}
    for idx,gamma in enumerate(gamma_space):
        agent.scorer.gamma = gamma
        data_log['gamma'].append(gamma)
        loss_str = ''

        save_log = []
        # Run validation
        for env_name, (val_env, evaluator) in sorted(val_envs.items()):
            print("evaluating on {}".format(env_name))
            agent.env = val_env
            if hasattr(agent, 'speaker') and agent.speaker:
                agent.speaker.env = val_env
            agent.results_path = '%s%s_%s_gamma_%.3f.json' % (
                args.RESULT_DIR, get_model_prefix(
                    args, val_env.image_features_list), env_name, gamma)

            # Get validation distance from goal under evaluation conditions
            agent.test(use_dropout=False, feedback='argmax')
            if not args.no_save:
                agent.write_results()
            score_summary, _ = evaluator.score_results(agent.results)
            pp.pprint(score_summary)

            for metric, val in sorted(score_summary.items()):
                data_log['%s %s' % (env_name, metric)].append(val)
                if metric in ['success_rate']:
                    loss_str += ', %s: %.3f' % (metric, val)

                    key = (env_name, metric)
                    if key not in best_metrics or best_metrics[key] < val:
                        save_log.append("new best _%s-%s=%.3f" % (
                                env_name, metric, val))

        idx = idx+1
        print(('%s (%.3f %d%%) %s' % (
            timeSince(start, float(idx)/len(gamma_space)),
            gamma, float(idx)/len(gamma_space)*100, loss_str)))
        for s in save_log:
            print(s)

        df = pd.DataFrame(data_log)
        df.set_index('gamma')
        df_path = '%s%s_%s_log.csv' % (
            args.PLOT_DIR, get_model_prefix(
            args, val_env.image_features_list), split_string)
        df.to_csv(df_path)
예제 #28
0
def main():
    """Runs simulation
    """
    SIZE_X, SIZE_Y = 128, 128
    FLUID_DENSITY = 0.1
    # Section 3.2 discusses how to set this in some detail. Related by CFL condition
    # Here is empirically, as long as it's small enough, it is ok.
    TIMESTEP = 0.005
    MAX_TIME = 8
    N_STEPS = (int)(MAX_TIME / TIMESTEP)
    PRINT_EVERY = 4

    # A buffer that stores a RGB PNG image to be outputted
    pixels = np.zeros((SIZE_X, SIZE_Y, 3), dtype=np.uint8)

    fluid_quantities = {
        'particle_density':
        FluidQuantity(size=(SIZE_X, SIZE_Y),
                      offset=(0.5, 0.5),
                      dx=1 / min(SIZE_X, SIZE_Y))
    }

    def unsigned_set_func_wrapper(target_val):
        """Returns a function f: (cur_val) -> target_val"""
        def wrapped(cur_val):
            """Returns target_val is target_val is larger"""
            if abs(cur_val) < abs(target_val):
                return target_val
            else:
                return cur_val

        return wrapped

    conditions = {
        'particle_density': {
            'block': (0.45, 0.2, 0.55, 0.21),
            'func': unsigned_set_func_wrapper(1.0)
        },
        'u': {
            'block': (0.45, 0.2, 0.55, 0.21),
            'func': unsigned_set_func_wrapper(0.0)
        },
        'v': {
            'block': (0.45, 0.2, 0.55, 0.21),
            'func': unsigned_set_func_wrapper(3.0)
        }
    }

    fluid_solver = BaselineFluidSolver(size=(SIZE_X, SIZE_Y),
                                       fluid_density=FLUID_DENSITY,
                                       fluid_quantities=fluid_quantities)

    def print_image(particle_density, image, filename):
        """Outputs a PNG using particle_density FluidQuantity"""

        size = image.shape[0:2]
        shade = ((1 - particle_density._val.reshape(size)) *
                 255.0).astype('uint8')
        image[:, :, 0] = shade
        image[:, :, 1] = shade
        image[:, :, 2] = shade

        im = Image.fromarray(image)
        im.save(filename, 'PNG', quality=100)

    img_index = 0
    start_time = time.time()
    for step in range(1, N_STEPS + 1):
        fluid_solver.set_condition(conditions)
        fluid_solver.step(TIMESTEP)

        if step % PRINT_EVERY == 0:
            print_image(fluid_quantities['particle_density'], pixels,
                        'output/frame{:05d}.png'.format(img_index))
            img_index += 1

        print('%s (%d %d%%)' % (timeSince(
            start_time, step / N_STEPS), step, step / N_STEPS * 100))
예제 #29
0
        output, hidden = model(X_batch, hidden)
        loss = criterion(output, y_batch)

        optimizer.zero_grad()
        loss.backward(retain_graph=True)        
        optimizer.step()        

        if loss.item() < min_loss:
            min_loss = loss.item()
            torch.save(model, model_fn)

        idx += 1

        if idx % 100 == 0:
            losses.append(loss.item())
            print('%s, %d epoch, %d index, %f loss' % (timeSince(start),epoch, idx, loss.item()))


plt.figure()
plt.plot(np.arange(len(losses))/10.0, losses)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()


def category_from_output(output, all_categories):
    top_n, top_i = output.topk(1)
    category_i = top_i[0].item()
    return all_categories[category_i]

y_preds = []
예제 #30
0
파일: pcg.py 프로젝트: belinghy/cpsc542G
def main():
    """Runs simulation
    """
    SIZE_X, SIZE_Z = 128, 128
    FLUID_DENSITY = 1
    # Section 3.2 discusses how to set this in some detail. Related by CFL condition
    # Here is empiricallz, as long as it's small enough, it is ok.
    TIMESTEP = 0.005
    MAX_TIME = 8
    N_STEPS = (int)(MAX_TIME/TIMESTEP)
    PRINT_EVERY = 4
    UPDATE_EVERY = 4

    # A buffer that stores a RGB PNG image to be outputted
    pixels = np.zeros((SIZE_X, SIZE_Z, 3), dtype=np.uint8)

    fluid_solver = BaselineFluidSolver(
        size=(SIZE_X, SIZE_Z), rho=FLUID_DENSITY)

    def update_image(particle_density, image):
        """Update image using particle_density"""

        size = image.shape[0:2]
        shade = ((1 - particle_density._val.reshape(size))
                 * 255.0).astype('uint8')
        image[:, :, 0] = shade
        image[:, :, 1] = shade
        image[:, :, 2] = shade

    def save_image(image, filename):
        """Save image"""
        im = Image.fromarray(image)
        im.save(filename, 'PNG', quality=100)

    def update_frame(im, image):
        """Update animation frame"""
        im.set_array(image)
        plt.draw()
        plt.pause(0.01)

    fig = plt.figure()
    im = plt.imshow(pixels, animated=True)

    img_index = 0
    start_time = time.time()
    fluid_solver.gen_sparse_poisson(TIMESTEP)
    fluid_solver.modified_incomplete_cholesky()
    for step in range(1, N_STEPS+1):
        print(step)
        fluid_solver.set_condition()
        fluid_solver.step(TIMESTEP)

        if step % UPDATE_EVERY == 0:
            update_image(fluid_solver._p, pixels)
            # Realtime plotting
            update_frame(im, pixels)

        if step % PRINT_EVERY == 0:
            # Uncomment to output PNG
            # save_image(pixels, 'output/frame{:05d}.png'.format(img_index))
            img_index += 1

        if step == 230:
            save_image(pixels, 'pcg.png')

        print('%s (%d %d%%)' % (timeSince(start_time, step / N_STEPS),
                                step, step / N_STEPS * 100))
예제 #31
0
파일: run_search.py 프로젝트: hyzcn/FAST
def _train(args, train_env, agent, optimizers,
          n_iters, log_every=train.log_every, val_envs=None):
    ''' Train on training set, validating on both seen and unseen. '''

    if val_envs is None: val_envs = {}

    print('Training with %s feedback' % args.feedback_method)

    data_log = defaultdict(list)
    start = time.time()

    split_string = "-".join(train_env.splits)

    def make_path(n_iter):
        return os.path.join(
            args.SNAPSHOT_DIR, '%s_%s_iter_%d' % (
                get_model_prefix(args, train_env.image_features_list),
                split_string, n_iter))

    best_metrics = {}
    last_model_saved = {}
    for idx in range(0, n_iters, log_every):
        agent.env = train_env
        if hasattr(agent, 'speaker') and agent.speaker:
            agent.speaker.env = train_env

        interval = min(log_every, n_iters-idx)
        iter = idx + interval
        data_log['iteration'].append(iter)

        # Train for log_every interval
        agent.train(optimizers, interval, feedback=args.feedback_method)
        train_losses = np.array(agent.losses)
        train_loss_avg = np.average(train_losses)
        data_log['train loss'].append(train_loss_avg)
        loss_str = 'train loss: %.4f' % train_loss_avg

        ce_loss = np.average(agent.ce_losses)
        pm_loss = np.average(agent.pm_losses)
        loss_str += ' ce {:.3f}|pm {:.3f}'.format(ce_loss,pm_loss)

        save_log = []
        # Run validation
        for env_name, (val_env, evaluator) in sorted(val_envs.items()):
            agent.env = val_env
            if hasattr(agent, 'speaker') and agent.speaker:
                agent.speaker.env = val_env

            agent.results_path = '%s%s_%s_iter_%d.json' % (
                args.RESULT_DIR, get_model_prefix(
                    args, train_env.image_features_list),
                env_name, iter)

            # Get validation loss under the same conditions as training
            agent.test(use_dropout=True, feedback=args.feedback_method,
                       allow_cheat=True)
            val_losses = np.array(agent.losses)
            val_loss_avg = np.average(val_losses)
            data_log['%s loss' % env_name].append(val_loss_avg)
            loss_str += ', %s loss: %.4f' % (env_name, val_loss_avg)

            ce_loss = np.average(agent.ce_losses)
            pm_loss = np.average(agent.pm_losses)
            data_log['%s ce' % env_name].append(ce_loss)
            data_log['%s pm' % env_name].append(pm_loss)
            loss_str += ' ce {:.3f}|pm {:.3f}'.format(ce_loss,pm_loss)

            # Get validation distance from goal under evaluation conditions
            agent.test(use_dropout=False, feedback='argmax')
            if not args.no_save:
                agent.write_results()
            score_summary, _ = evaluator.score_results(agent.results)

            for metric, val in sorted(score_summary.items()):
                data_log['%s %s' % (env_name, metric)].append(val)
                if metric in ['success_rate']:
                    loss_str += ', %s: %.3f' % (metric, val)

                    key = (env_name, metric)
                    if key not in best_metrics or best_metrics[key] < val:
                        best_metrics[key] = val
                        if not args.no_save:
                            model_path = make_path(iter) + "_%s-%s=%.3f" % (
                                env_name, metric, val)
                            save_log.append(
                                "new best, saved model to %s" % model_path)
                            agent.save(model_path)
                            if key in last_model_saved:
                                for old_model_path in last_model_saved[key]:
                                    if os.path.isfile(old_model_path):
                                        os.remove(old_model_path)
                            #last_model_saved[key] = [agent.results_path] +\
                            last_model_saved[key] = [] +\
                                list(agent.modules_paths(model_path))

        print(('%s (%d %d%%) %s' % (
            timeSince(start, float(iter)/n_iters),
            iter, float(iter)/n_iters*100, loss_str)))
        for s in save_log:
            print(colorize(s))

        if not args.no_save:
            if save_every and iter % save_every == 0:
                agent.save(make_path(iter))

            df = pd.DataFrame(data_log)
            df.set_index('iteration')
            df_path = '%s/trainsearch_%s_%s_log.csv' % (
                args.PLOT_DIR, get_model_prefix(
                    args, train_env.image_features_list), split_string)
            df.to_csv(df_path)