Exemple #1
0
def recv_data(s_socket):
    global termserver
    # mode
    # 0 : recv CID
    # 1 : recv rm CID
    # 2 : recv chat
    # 3 : recv keep alive
    # 4 : recv exit
    # response mode
    mode2cmd = { 0 : saveCID,    \
                 3 : reset_time, \
                 4 : rm_timer    \
               }

    while True:
        sleep(0.01)
        data, addr = s_socket.return_data()

        # if server is terminated
        if termserver:
            return
        # if data is none
        if len(data) == 0:
            continue

        # unpack data
        mode, unpacked = utils.unpack_data(data.decode())
        # response
        mode2cmd[mode](s_socket, addr, unpacked)
Exemple #2
0
def _construct_examples_batch(batch_size, split, num_classes,
                              num_tr_examples_per_class,
                              num_val_examples_per_class):
  data_provider = data.DataProvider(split, config.get_data_config())
  examples_batch = data_provider.get_batch(batch_size, num_classes,
                                           num_tr_examples_per_class,
                                           num_val_examples_per_class)
  return utils.unpack_data(examples_batch)
def joint_elbo(K):
    model.eval()
    llik = 0
    obj = locals()[('m_' if hasattr(model, 'vaes') else '') + 'iwae']()
    for dataT in test_loader:
        data = unpack_data(dataT, device=device)
        llik += obj(model, data, K).item()
    print('Marginal Log Likelihood of joint {} (IWAE, K = {}): {:.4f}'.format(
        model.modelName, K, llik / N))
def llik_eval(K):
    model.eval()
    llik_joint = 0
    for dataT in test_loader:
        data = unpack_data(dataT, device=device)
        qz_xs, px_zs, zss = model(data, K)
        llik_joint += iwae(qz_xs, px_zs, zss, data)
    print('Marginal Log Likelihood of joint {} (IWAE, K = {}): {:.4f}'.format(
        model.modelName, K, llik_joint / N))
Exemple #5
0
    def train(epoch):
        model.train()
        train_loss_meter = AverageMeter()

        # NOTE: is_paired is 1 if the example is paired
        for batch_idx, dataT in enumerate(train_loader):
            mnist, svhn = unpack_data(dataT, device=device)

            if epoch < args.annealing_epochs:
                # compute the KL annealing factor for the current mini-batch in the current epoch
                annealing_factor = (
                    float(batch_idx + (epoch - 1) * N_mini_batches + 1) /
                    float(args.annealing_epochs * N_mini_batches))
            else:
                # by default the KL annealing factor is unity
                annealing_factor = 1.0

            batch_size = len(mnist)
            optimizer.zero_grad()
            recon_mnist_1, recon_svhn_1, mu_1, logvar_1 = model(mnist, svhn)
            # recon_mnist_2, recon_svhn_2, mu_2, logvar_2 = model(mnist)
            # recon_mnist_3, recon_svhn_3, mu_3, logvar_3 = model(text=svhn)

            # compute ELBO for each data combo
            joint_loss = elbo_loss(recon_mnist_1,
                                   mnist,
                                   recon_svhn_1,
                                   svhn,
                                   mu_1,
                                   logvar_1,
                                   lambda_mnist=args.lambda_mnist,
                                   lambda_svhn=args.lambda_svhn,
                                   annealing_factor=annealing_factor)
            # mnist_loss = elbo_loss(recon_mnist_2, mnist, None, None, mu_2, logvar_2,
            #                        lambda_mnist=args.lambda_mnist, lambda_svhn=args.lambda_svhn,
            #                        annealing_factor=annealing_factor)
            # svhn_loss  = elbo_loss(None, None, recon_svhn_3, svhn, mu_3, logvar_3,
            #                        lambda_mnist=args.lambda_mnist, lambda_svhn=args.lambda_svhn,
            #                        annealing_factor=annealing_factor)
            # train_loss = joint_loss + mnist_loss + svhn_loss
            train_loss = joint_loss
            train_loss_meter.update(train_loss.data.item(), batch_size)

            # compute gradients and take step
            train_loss.backward()
            optimizer.step()

            if batch_idx % args.log_interval == 0:
                print(
                    'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'
                    .format(epoch, batch_idx * len(mnist),
                            len(train_loader.dataset),
                            100. * batch_idx / len(train_loader),
                            train_loss_meter.avg, annealing_factor))

        print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch,
                                                     train_loss_meter.avg))
Exemple #6
0
def _construct_examples_batch(batch_size, split, num_classes,
                              num_tr_examples_per_class,
                              num_val_examples_per_class,
                              use_cross=False):
  data_provider = data.DataProvider(split, config.get_data_config(), feat_dim=FLAGS.feat_dim, use_cross=use_cross)
  examples_batch = data_provider.get_batch(batch_size, num_classes,
                                           num_tr_examples_per_class,
                                           num_val_examples_per_class,
                                           num_pretrain_classes=FLAGS.num_pretrain_classes)
  return utils.unpack_data(examples_batch)
Exemple #7
0
def estimate_log_marginal(K):
    """Compute an IWAE estimate of the log-marginal likelihood of test data."""
    model.eval()
    marginal_loglik = 0
    with torch.no_grad():
        for dataT in test_loader:
            data = unpack_data(dataT, device=device)
            marginal_loglik += -t_objective(model, data, K).item()

    marginal_loglik /= len(test_loader.dataset)
    print('Marginal Log Likelihood (IWAE, K = {}): {:.4f}'.format(
        K, marginal_loglik))
Exemple #8
0
def test(epoch, agg):
    model.eval()
    b_loss = 0
    with torch.no_grad():
        for i, dataT in enumerate(test_loader):
            data = unpack_data(dataT, device=device)
            loss = -t_objective(model, data, K=args.K)
            b_loss += loss.item()
            if i == 0:
                model.reconstruct(data, runPath, epoch)
                if not args.no_analytics:
                    model.analyse(data, runPath, epoch)
    agg['test_loss'].append(b_loss / len(test_loader.dataset))
    print('====>             Test loss: {:.4f}'.format(agg['test_loss'][-1]))
Exemple #9
0
def train(epoch, agg):
    model.train()
    b_loss = 0
    for i, dataT in enumerate(train_loader):
        data = unpack_data(dataT, device=device)
        optimizer.zero_grad()
        loss = -objective(model, data, K=args.K)
        loss.backward()
        optimizer.step()
        b_loss += loss.item()
        if args.print_freq > 0 and i % args.print_freq == 0:
            print("iteration {:04d}: loss: {:6.3f}".format(
                i,
                loss.item() / args.batch_size))
    agg['train_loss'].append(b_loss / len(train_loader.dataset))
    print('====> Epoch: {:03d} Train loss: {:.4f}'.format(
        epoch, agg['train_loss'][-1]))
Exemple #10
0
def _construct_examples_batch(batch_size,
                              split,
                              num_classes,
                              num_tr_examples_per_class,
                              num_val_examples_per_class,
                              db_path,
                              sp_para=None):
    data_provider = data.DataProvider(split, config.get_data_config())
    data_provider.load_db(db_path)
    if sp_para:
        test_id, sp_bias, weights, k = sp_para
        data_provider.set_sp_paras(weights, sp_bias)
        data_provider.set_test_id(test_id, k)

    examples_batch = data_provider.get_batch(batch_size, num_classes,
                                             num_tr_examples_per_class,
                                             num_val_examples_per_class)
    return utils.unpack_data(examples_batch)
Exemple #11
0
    def test():
        model.eval()
        test_loss_meter = AverageMeter()

        for batch_idx, dataT in enumerate(test_loader):
            mnist, svhn = unpack_data(dataT, device=device)

            batch_size = len(mnist)

            with torch.no_grad():
                recon_mnist_1, recon_svhn_1, mu_1, logvar_1 = model(mnist, svhn)
                recon_mnist_2, recon_svhn_2, mu_2, logvar_2 = model(mnist)
                recon_mnist_3, recon_svhn_3, mu_3, logvar_3 = model(text=svhn)

                joint_loss = elbo_loss(recon_mnist_1, mnist, recon_svhn_1, svhn, mu_1, logvar_1)
                mnist_loss = elbo_loss(recon_mnist_2, mnist, None, None, mu_2, logvar_2)
                svhn_loss  = elbo_loss(None, None, recon_svhn_3, svhn, mu_3, logvar_3)
                test_loss  = joint_loss + mnist_loss + svhn_loss
                test_loss_meter.update(test_loss.item(), batch_size)

        print('====> Test Loss: {:.4f}'.format(test_loss_meter.avg))
        return test_loss_meter.avg
Exemple #12
0
def recv_data(c_socket):
    #print("recv_data_init")
    # mode
    # 0 : recv add CID
    # 1 : recv rm CID
    # 2 : recv chat
    mode2cmd = { 0 : add_CID, \
                 1 : rm_CID, \
                 2 : print_chat \
               }
    while True:
        if exit_flag == 1:
            break
        data, addr = c_socket.return_data()
        if len(data) == 0:
            continue
        time.sleep(0.01)
        # unpack data to mode and message
        mode, msg = utils.unpack_data(data.decode())

        # execute function
        mode2cmd[mode](msg)
    print("recv_data thread terminated")
Exemple #13
0
    def _sample(N):
        model.eval()
        for batch_idx, dataT in enumerate(test_loader):
            mnist, svhn = unpack_data(dataT, device=device)
            break
        gt = [mnist[:N], svhn[:N], torch.cat([resize_img(mnist[:N], svhn[:N]), svhn[:N]])]
        zss = OrderedDict()

        # mode 1: generate
        zss['gen_samples'] = [torch.zeros((N * N, model.n_latents)).to(device),
                              torch.ones((N * N, model.n_latents)).to(device)]

        # mode 2: mnist --> mnist, mnist --> svhn
        mu, logvar = model.infer(image=gt[0])
        zss['recon_0'] = [mu, logvar.mul(0.5).exp_()]

        # mode 3: svhn --> mnist, svhn --> svhn
        mu, logvar = model.infer(sent=gt[1])
        zss['recon_1'] = [mu, logvar.mul(0.5).exp_()]

        # mode 4: mnist, svhn --> mnist, mnist, svhn --> svhn
        mu, logvar = model.infer(image=gt[0], sent=gt[1])
        zss['recon_2'] = [mu, logvar.mul(0.5).exp_()]
        return zss, gt
Exemple #14
0
    wandering vs. classical behavior

    """

    output_path = sys.argv[1]  # Where to save outputs
    if not os.path.exists(output_path):
        # Make the directory
        logger.warning(f"{output_path} does not exist, creating")
        os.makedirs(output_path)
    logger.info(f"Outputs will be saved to {output_path}")

    # For parallel runs, use task id from SLURM array job.
    # Passed in via env variable
    try:
        test_bear_idx = int(os.getenv("SLURM_ARRAY_TASK_ID"))
        logger.info(f"Index: {test_bear_idx}")
    except TypeError:
        # Empty variable if not run in a parallel setting (interactive mode)
        logger.warning("Non-interactive setting, index is set to 0")
        test_bear_idx = 0  # Hardcode to a random value

    all_bears = unpack_data()

    # Sort the ids and grab the index
    ids = np.sort(all_bears.Bear_ID.unique())
    test_bear_id = ids[test_bear_idx]
    logger.debug(f"Test index: {test_bear_id}")

    pipeline = MultiVarLSTM(all_bears, 5, test_bear_id)
    pipeline.run(output_path)