def compute_features(self, args, dataloader, model, N):
        args = parser.parse_args()
        if args.verbose:
            print('Compute features')
        batch_time = AverageMeter()
        end = time.time()
        model.eval()
        # discard the label information in the dataloader
        for i, (input_tensor, _) in enumerate(dataloader):
            input_var = torch.autograd.Variable(input_tensor, volatile=True)
            aux = model(input_var).data.cpu().numpy()

            if i == 0:
                features = np.zeros((N, aux.shape[1])).astype('float32')

            if i < len(dataloader) - 1:
                features[i * args.batch:(i + 1) *
                         args.batch] = aux.astype('float32')
            else:
                # special treatment for final batch
                features[i * args.batch:] = aux.astype('float32')

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if args.verbose and (i % 200) == 0:
                print(
                    '{0} / {1}\t'
                    'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
                        i, len(dataloader), batch_time=batch_time))
        return features
Exemple #2
0
def compute_features(dataloader, model, N):

    batch_time = AverageMeter()
    end = time.time()
    model.eval()
    # discard the label information in the dataloader
    for i, (input_tensor, label) in enumerate(dataloader):
        print(input_tensor.shape)
        torch.no_grad()
        input_var = torch.autograd.Variable(input_tensor.cuda())#, volatile=True)
        s_aux=time.time()
        #print(i, s_aux)
        aux = model(input_var).data.cpu().numpy()
        #print(i, time.time()-s_aux)
        if i == 0:
            features = np.zeros((N, aux.shape[1]), dtype='float32')
            labels = np.zeros((N))
        aux = aux.astype('float32')
        if i < len(dataloader) - 1:
            features[i * args.batch: (i + 1) * args.batch] = aux
            labels[i * args.batch: (i + 1) * args.batch] = label
        else:
            # special treatment for final batch
            features[i * args.batch:] = aux
            labels[i * args.batch:] = label

            # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if args.verbose and (i % 200) == 0:
            print('{0} / {1}\t' 
                  'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})'
                  .format(i, len(dataloader), batch_time=batch_time))
    return features, labels
Exemple #3
0
    def train(self, loader, loss_function):
        losses = AverageMeter()

        self.model.prepare_for_training()

        # create optimizer for top layer
        optimizer_tl = torch.optim.SGD(self.model.top_layer.parameters(),
                                       lr=self.lr,
                                       weight_decay=self.weight_decay)

        self.model.cuda()

        for _ in range(10):
            for i, (input_tensor, label) in enumerate(loader):
                output = self.model(input_tensor.cuda())
                loss = loss_function(output, label.cuda())
                losses.update(loss.item(), label.shape[0])

                self.model_optimizer.zero_grad()
                optimizer_tl.zero_grad()
                loss.backward()
                self.model_optimizer.step()
                optimizer_tl.step()

        self.model.cpu()

        print('classification loss: {}'.format(losses.avg))
Exemple #4
0
def compute_features(dataloader, model, N, args):
    if args.verbose:
        print('Compute features')
    batch_time = AverageMeter()
    end = time.time()
    model.eval()
    # discard the label information in the dataloader
    for i, (input_tensor, idx, pose) in enumerate(dataloader):
        # print(i)
        with torch.no_grad():
            input_var = torch.autograd.Variable(input_tensor.cuda())

        aux = model(input_var).data.cpu().numpy()
        idx = idx.data.cpu().numpy()
        pose = pose.data.cpu().numpy()

        if i == 0:
            features = np.zeros((N, aux.shape[1])).astype('float32')
            poses = np.zeros((N, pose.shape[1])).astype('float32')
            idxs = np.zeros((N)).astype(np.int)

        if i < len(dataloader) - 1:
            features[i * args.batch:(i + 1) *
                     args.batch] = aux.astype('float32')
            poses[i * args.batch:(i + 1) * args.batch] = pose.astype(np.int)
            idxs[i * args.batch:(i + 1) * args.batch] = idx.astype(np.int)
        else:
            # special treatment for final batch
            features[i * args.batch:] = aux.astype('float32')
            poses[i * args.batch:] = pose.astype(np.int)
            idxs[i * args.batch:] = idx.astype(np.int)

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if args.verbose and (i % 50) == 0:
            print('{0} / {1}\t'
                  'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
                      i, len(dataloader), batch_time=batch_time))
    return features, idxs, poses
 def compute_features(dataloader, model, N):
     if args.verbose:
         print('Compute features')
     batch_time = AverageMeter()
     end = time.time()
     model.eval()
     for input_tensor in dataloader:
         # discard the label information in the dataloader
         input_var = torch.autograd.Variable(dataloader.cuda(), volatile=True)
         aux = model(input_var).data.cpu().numpy()
         num=np.array(aux[0,:])
         features=num.tolist()
     return features
Exemple #6
0
    def _train(self, loader, i_epoch):
        self.model.train()

        losses = AverageMeter()
        for i, (x, ) in enumerate(loader):
            x = x.to(self.device)
            self.optimizer.zero_grad()

            # VAE
            mean_z, var_z, mean_x, logvar_x = self.model(x)
            elbo = self.model._elbo(x, mean_z, var_z, mean_x, logvar_x,
                                    self.beta(i_epoch))
            loss = -elbo.mean()

            if (loss != loss).any() or (loss == float('inf')).any() or (
                    loss == -float('inf')).any():
                raise RuntimeError

            losses.update(loss.item(), x.shape[0])
            loss.backward()
            self.optimizer.step()

        return losses.avg
Exemple #7
0
def validate(val_loader, model, criterion):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()
    softmax = nn.Softmax(dim=1).cuda()
    end = time.time()
    for i, (input_tensor, target) in enumerate(val_loader):
        target = target.cuda(non_blocking=True)
        with torch.no_grad():
            input_var = torch.autograd.Variable(input_tensor.cuda())

            target_var = torch.autograd.Variable(target)

        output = model(
            input_var)  #reglog(forward(input_var, model, reglog.conv))

        output_central = output

        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        top1.update(prec1[0], input_tensor.size(0))
        top5.update(prec5[0], input_tensor.size(0))
        loss = criterion(output_central, target_var)
        losses.update(loss.item(), input_tensor.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if args.verbose and i % 100 == 0:
            print('Validation: [{0}/{1}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                      i,
                      len(val_loader),
                      batch_time=batch_time,
                      loss=losses,
                      top1=top1,
                      top5=top5))

    return top1.avg, top5.avg, losses.avg
Exemple #8
0
def train(train_loader, model, criterion, optimizer, epoch):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # freeze also batch norm layers
    model.eval()
    model.features.requires_grad = False
    end = time.time()
    for i, (input, target) in enumerate(train_loader):

        # measure data loading time
        data_time.update(time.time() - end)

        #adjust learning rate
        learning_rate_decay(optimizer, len(train_loader) * epoch + i, args.lr)

        target = target.cuda(non_blocking=True)
        input_var = torch.autograd.Variable(input.cuda())
        target_var = torch.autograd.Variable(target)
        # compute output

        #output = forward(input_var, model, reglog.conv)

        output = model(input_var)  #reglog(output)
        loss = criterion(output, target_var)
        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.item(), input.size(0))
        top1.update(prec1[0], input.size(0))
        top5.update(prec5[0], input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if args.verbose and i % 100 == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                      epoch,
                      i,
                      len(train_loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                      top1=top1,
                      top5=top5))
Exemple #9
0
def train(loader, model, crit, opt, epoch, args):
    """Training of the CNN.
        Args:
            loader (torch.utils.data.DataLoader): Data loader
            model (nn.Module): CNN
            crit (torch.nn): loss
            opt (torch.optim.SGD): optimizer for every parameters with True
                                   requires_grad in model except top layer
            epoch (int)
    """
    batch_time = AverageMeter()
    losses = AverageMeter()
    data_time = AverageMeter()
    forward_time = AverageMeter()
    backward_time = AverageMeter()

    # switch to train mode
    model.train()

    # create an optimizer for the last fc layer
    optimizer_tl = torch.optim.SGD(
        model.top_layer.parameters(),
        lr=args.lr,
        weight_decay=10**args.wd,
    )

    end = time.time()

    for _ in range(1):
        for i, (input_tensor, target) in enumerate(loader):
            data_time.update(time.time() - end)

            # import pdb; pdb.set_trace()
            # save checkpoint
            n = len(loader) * epoch + i
            if n % args.checkpoints == 0:
                path = os.path.join(
                    args.exp,
                    'checkpoints',
                    'checkpoint_' + str(n / args.checkpoints) + '.pth.tar',
                )
                if args.verbose:
                    print('Save checkpoint at: {0}'.format(path))
                torch.save(
                    {
                        'epoch': epoch + 1,
                        'arch': args.arch,
                        'state_dict': model.state_dict(),
                        'optimizer': opt.state_dict()
                    }, path)

            target = target.cuda(async=True)
            input_var = torch.autograd.Variable(input_tensor.cuda())
            target_var = torch.autograd.Variable(target)

            output = model(input_var)
            loss = crit(output, target_var)

            # record loss
            losses.update(loss.data[0], input_tensor.size(0))

            # compute gradient and do SGD step
            opt.zero_grad()
            optimizer_tl.zero_grad()
            loss.backward()
            opt.step()
            optimizer_tl.step()

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if args.verbose and (i % 50) == 0:
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Data: {data_time.val:.3f} ({data_time.avg:.3f})\t'
                      'Loss: {loss.val:.4f} ({loss.avg:.4f})'.format(
                          epoch,
                          i,
                          len(loader),
                          batch_time=batch_time,
                          data_time=data_time,
                          loss=losses))

    return losses.avg
Exemple #10
0
    def _eval(self, loader, i_epoch):

        self.model.eval()

        x_orig = []
        x_recon = []

        losses = AverageMeter()
        with torch.no_grad():
            for i, (x, ) in enumerate(loader):
                x = x.to(self.device)

                # VAE
                mean_z, var_z, mean_x, logvar_x = self.model(x)
                elbo = self.model._elbo(x, mean_z, var_z, mean_x, logvar_x,
                                        self.beta(i_epoch))
                loss = -elbo.mean()
                x_recon_ = self.model.reparameterize(mean_x, logvar=logvar_x)
                x_recon.append(x_recon_)

                x_orig.append(x)

                losses.update(loss.item(), x.shape[0])

        x_recon = torch.cat(x_recon, dim=0)
        x_orig = torch.cat(x_orig, dim=0)

        z, x_sample, x_sample_mean = self.sample(num_samples=128)

        self.model.train()

        def plot_point():
            fig, axes = plt.subplots(nrows=1,
                                     ncols=4,
                                     sharex='all',
                                     sharey='all',
                                     figsize=[20, 5])
            axes = axes.reshape([-1])

            for i, (x_plot, title) in enumerate(
                    zip([x_orig, x_sample, x_recon, x_sample_mean],
                        ['raw', 'sample', 'reconstruction', 'sample mean'])):
                ax = axes[i]
                setup_axes(ax, limit=10, walls=True)
                ax.set_title(title)

                x_plot = self.unnormalize_data(x_plot)

                x_plot = x_plot.reshape(
                    [-1, self.episode_length, x_plot.shape[-1]])

                x_plot = add_time(x_plot.cpu().numpy())

                x_plot = x_plot.reshape([-1, x_plot.shape[-1]])
                sc = ax.scatter(x_plot[:, 0],
                                x_plot[:, 1],
                                c=x_plot[:, 2],
                                s=1**2)
                plt.colorbar(sc, ax=ax)

        def plot_cheetah():
            fig, axes = plt.subplots(nrows=3,
                                     ncols=4,
                                     sharex='row',
                                     sharey='row',
                                     figsize=[20, 15])
            for i_col, (x_plot, title) in enumerate(
                    zip([x_orig, x_sample, x_recon, x_sample_mean],
                        ['raw', 'sample', 'reconstruction', 'sample mean'])):
                axes[0, i_col].set_title(title)

                x_plot = self.unnormalize_data(x_plot)
                x_plot = x_plot.reshape(
                    [-1, self.episode_length, x_plot.shape[-1]])
                x_plot = add_time(x_plot.cpu().numpy())

                rootz = x_plot[:, :, 0].reshape([-1, 1])
                rootx_vel = x_plot[:, :, 8].reshape([-1, 1])
                rootz_vel = x_plot[:, :, 9].reshape([-1, 1])
                time = x_plot[:, :, -1].reshape([-1, 1])

                i_row = 0
                sc = axes[i_row, i_col].scatter(rootz,
                                                rootx_vel,
                                                c=time,
                                                s=1**2)
                axes[i_row, i_col].set_xlabel('rootz [m]')
                axes[i_row, i_col].set_ylabel('rootx_vel [m/s]')

                i_row = 1
                sc = axes[i_row, i_col].scatter(rootx_vel,
                                                rootz_vel,
                                                c=time,
                                                s=1**2)
                axes[i_row, i_col].set_xlabel('rootx_vel [m/s]')
                axes[i_row, i_col].set_ylabel('rootz_vel [m/s]')

                i_row = 2
                sc = axes[i_row, i_col].scatter(rootz,
                                                rootz_vel,
                                                c=time,
                                                s=1**2)
                axes[i_row, i_col].set_xlabel('rootz [m]')
                axes[i_row, i_col].set_ylabel('rootz_vel [m/s]')

        def plot_ant():
            fig, axes = plt.subplots(nrows=3,
                                     ncols=4,
                                     sharex='row',
                                     sharey='row',
                                     figsize=[20, 15])
            for i_col, (x_plot, title) in enumerate(
                    zip([x_orig, x_sample, x_recon, x_sample_mean],
                        ['raw', 'sample', 'reconstruction', 'sample mean'])):
                axes[0, i_col].set_title(title)

                x_plot = self.unnormalize_data(x_plot)
                x_plot = x_plot.reshape(
                    [-1, self.episode_length, x_plot.shape[-1]])
                x_plot = add_time(x_plot.cpu().numpy())

                qpos_0 = x_plot[:, :, 0].reshape([-1, 1])
                qpos_1 = x_plot[:, :, 1].reshape([-1, 1])
                qpos_2 = x_plot[:, :, 2].reshape([-1, 1])
                time = x_plot[:, :, -1].reshape([-1, 1])

                i_row = 0
                sc = axes[i_row, i_col].scatter(qpos_0, qpos_1, c=time, s=1**2)
                axes[i_row, i_col].set_xlabel('qpos_0 [m]')
                axes[i_row, i_col].set_ylabel('qpos_1 [m]')

                i_row = 1
                sc = axes[i_row, i_col].scatter(qpos_0, qpos_2, c=time, s=1**2)
                axes[i_row, i_col].set_xlabel('qpos_0 [m]')
                axes[i_row, i_col].set_ylabel('qpos_2 [m]')

                i_row = 2
                sc = axes[i_row, i_col].scatter(qpos_1, qpos_2, c=time, s=1**2)
                axes[i_row, i_col].set_xlabel('qpos_1 [m]')
                axes[i_row, i_col].set_ylabel('qpos_2 [m]')

        if self.args.vae_plot:
            if 'Point2D' in self.args.env_name:
                plot_point()
            elif 'HalfCheetah' in self.args.env_name:
                plot_cheetah()
            elif 'Ant' in self.args.env_name:
                plot_ant()
            plt.savefig(
                os.path.join(self.plot_dir, 'epoch_{}.png'.format(i_epoch)))
            plt.close('all')

        return losses.avg
Exemple #11
0
def train(loader, model, crit, opt, epoch):
    """Training of the CNN.
        Args:
            loader (torch.utils.data.DataLoader): Data loader
            model (nn.Module): CNN
            crit (torch.nn): loss
            opt (torch.optim.SGD): optimizer for every parameters with True
                                   requires_grad in model except top layer
            epoch (int)
    """
    batch_time = AverageMeter()
    losses = AverageMeter()
    data_time = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    forward_time = AverageMeter()
    backward_time = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (input_tensor, target, text) in enumerate(loader):
        data_time.update(time.time() - end)

        # save checkpoint
        n = len(loader) * epoch + i
        if n % args.checkpoints == 0:
            path = os.path.join(
                args.exp,
                'checkpoints',
                'checkpoint_' + str(n / args.checkpoints) + '.pth.tar',
            )
            if args.verbose:
                print('Save checkpoint at: {0}'.format(path))
            torch.save(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'optimizer': opt.state_dict()
                }, path)

        target = target.cuda(non_blocking=True)
        if text_only:
            input_var = text.cuda(
            )  # ' '.join(text[0].split()[:512])#torch.autograd.Variable(text)#.cuda())  # , volatile=True)
        elif is_joint:
            text = text.cuda()
            input_var = torch.autograd.Variable(
                input_tensor.cuda())  # , volatile=True)
        else:
            input_var = torch.autograd.Variable(
                input_tensor.cuda())  # , volatile=True)

        target_var = torch.autograd.Variable(target)

        if is_joint:
            output = model(input_var, text)
        else:
            output = model(input_var)
        loss = crit(output, target_var)

        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        top1.update(prec1[0], input_tensor.size(0))
        top5.update(prec5[0], input_tensor.size(0))
        # record loss
        losses.update(loss.item(), input_tensor.size(0))

        # compute gradient and do SGD step
        opt.zero_grad()
        # optimizer_tl.zero_grad()
        loss.backward()
        opt.step()
        # optimizer_tl.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if args.verbose and (i % 200) == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data: {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss: {loss.val:.4f} ({loss.avg:.4f})'.format(
                      epoch,
                      i,
                      len(loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))
    print("Train [%d]: " % epoch, top1.avg, top5.avg, losses.avg)
    return losses.avg
Exemple #12
0
def compute_features(dataloader, model, N):
    if args.verbose:
        print('Compute features')
    batch_time = AverageMeter()
    end = time.time()
    model.eval()
    print("Before", GPUInfo.gpu_usage())

    # discard the label information in the dataloader
    try:
        for i, (input_tensor, _, text) in enumerate(dataloader):
            torch.no_grad()

            try:
                if text_only:
                    #text = torch.autograd.Variable(text)#.cuda())  # , volatile=True)
                    #print(text[0].split()[:512].size)

                    aux = model.extract_features(
                        text.cuda()).data.cpu().numpy()
                    aux = aux.astype('float32')

                elif is_joint:
                    input_var = torch.autograd.Variable(
                        input_tensor.cuda())  # , volatile=True)
                    text = text.cuda()
                    aux = model.module.extract_features(
                        input_var, text).data.cpu().numpy()
                    aux = aux.astype('float32')

                else:
                    input_var = torch.autograd.Variable(
                        input_tensor.cuda())  # , volatile=True)
                    aux = model.module.extract_features(
                        input_var).data.cpu().numpy()
                    aux = aux.astype('float32')
                if i == 0:
                    features = np.zeros((N, aux.shape[1]), dtype='float32')

                if i < len(dataloader):
                    features[i * args.batch:(i + 1) * args.batch] = aux
                else:
                    # special treatment for final batch
                    features[i * args.batch:] = aux

                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()

                if args.verbose and (i % 50) == 0:
                    print('{0} / {1}\t'
                          'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})'.
                          format(i, len(dataloader), batch_time=batch_time))
            except Exception as e:
                print("RAM Usage: ", str(psutil.virtual_memory().percent))
                print(GPUInfo.gpu_usage())

                print("failed: ", e)
    except RuntimeError:
        print("RAM Usage: ", str(psutil.virtual_memory().percent))
        print(GPUInfo.gpu_usage())

        return features
    except Exception as e:
        print("Error {}".format(e))
    finally:
        return features