コード例 #1
0
ファイル: cifar.py プロジェクト: kevinzakka/blog-code
def test(model, device, test_loader, epoch):
    losses = AverageMeter()
    top1 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    for batch_idx, (data, target) in enumerate(test_loader):
        data, target = data.to(device), target.to(device)

        # compute output
        with torch.no_grad():
            output = model(data)
        loss = F.nll_loss(output, target)

        # measure accuracy and record loss
        prec1 = accuracy(output, target, topk=(1,))[0]
        losses.update(loss.item(), data.size(0))
        top1.update(prec1.item(), data.size(0))

        if batch_idx % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
                      batch_idx, len(test_loader), loss=losses,
                      top1=top1))

    print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
    return top1.avg
コード例 #2
0
ファイル: cifar_reg.py プロジェクト: kevinzakka/blog-code
def train(model, device, train_loader, optimizer, epoch):
    """Train for one epoch on the training set"""
    losses = AverageMeter()
    top1 = AverageMeter()

    # switch to train mode
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)

        # compute output
        output = model(data)
        loss = F.nll_loss(output, target)

        # measure accuracy and record loss
        prec1 = accuracy(output, target, topk=(1,))[0]
        losses.update(loss.item(), data.size(0))
        top1.update(prec1.item(), data.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if batch_idx % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
                      epoch, batch_idx, len(train_loader), loss=losses, top1=top1))
コード例 #3
0
ファイル: cifar.py プロジェクト: kevinzakka/blog-code
def train_transient(model, device, train_loader, optimizer, epoch, track=False):
    """Train for one epoch on the training set"""
    losses = AverageMeter()
    top1 = AverageMeter()

    # switch to train mode
    model.train()
    epoch_stats = []

    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)

        # compute output
        output = model(data)
        losses_ = F.nll_loss(output, target, reduction='none')

        if track:
            indices = [batch_idx*train_loader.batch_size + i for i in range(len(data))]
            batch_stats = []
            for i, l in zip(indices, losses_):
                batch_stats.append([i, l.item()])
            epoch_stats.append(batch_stats)

        loss = losses_.mean()

        # measure accuracy and record loss
        prec1 = accuracy(output, target, topk=(1,))[0]
        losses.update(loss.item(), data.size(0))
        top1.update(prec1.item(), data.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if batch_idx % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
                      epoch, batch_idx, len(train_loader), loss=losses, top1=top1))
    if track:
        return epoch_stats
    return None
コード例 #4
0
ファイル: main.py プロジェクト: chiminghui/examples
def test(epoch):
    model.eval()
    test_loss = 0
    with torch.no_grad():
        for i, (data, _) in enumerate(test_loader):
            data = data.to(device)
            recon_batch, mu, logvar = model(data)
            test_loss += loss_function(recon_batch, data, mu, logvar).item()
            if i == 0:
                n = min(data.size(0), 8)
                comparison = torch.cat([data[:n],
                                      recon_batch.view(args.batch_size, 1, 28, 28)[:n]])
                save_image(comparison.cpu(),
                         'results/reconstruction_' + str(epoch) + '.png', nrow=n)

    test_loss /= len(test_loader.dataset)
    print('====> Test set loss: {:.4f}'.format(test_loss))
コード例 #5
0
ファイル: main.py プロジェクト: Biocodings/examples
def test(epoch):
    model.eval()
    test_loss = 0
    for i, (data, _) in enumerate(test_loader):
        if args.cuda:
            data = data.cuda()
        data = Variable(data, volatile=True)
        recon_batch, mu, logvar = model(data)
        test_loss += loss_function(recon_batch, data, mu, logvar).data[0]
        if i == 0:
            n = min(data.size(0), 8)
            comparison = torch.cat([data[:n],
                                  recon_batch.view(args.batch_size, 1, 28, 28)[:n]])
            save_image(comparison.data.cpu(),
                     'results/reconstruction_' + str(epoch) + '.png', nrow=n)

    test_loss /= len(test_loader.dataset)
    print('====> Test set loss: {:.4f}'.format(test_loss))
コード例 #6
0
ファイル: reconstruction.py プロジェクト: TaihuLight/biogans
def run_experiment(netG, dataloader_test, nn_path, opt, optimize_red_first=False, n_bfgs_iter=50, lbfgs_lr=0.05, num_lbfgs_trials=5):
    """
    Optimize over the latent noise to try to reconstruct a target image.
    """
    # read the training set to get images for nearest neighbors
    search_images, search_image_names = nearest_neighbors.read_search_images(search_dataset=nn_path, classes_of_interest=opt.class_names,
                                                                             num_channels=opt.original_nc, use_cuda=opt.cuda)
    # prepare data loader
    data_looper = GanImageloader()
    iterator_data = data_looper.return_iterator(dataloader_test, opt.cuda, opt.nc, return_labels=False, num_passes=1)

    # the generator in the eval mode
    netG.eval()

    # create folders
    for cl in opt.class_names:
        os.system('mkdir -p {0}'.format(os.path.join(opt.experiment, cl)))

    l2_dists = {cl: [] for cl in opt.class_names}
    lls_noise = {cl: [] for cl in opt.class_names}
    lls_noise_init = {cl: [] for cl in opt.class_names}
    nn_dists = {cl: [] for cl in opt.class_names}
    mvn = multivariate_normal(np.zeros(opt.nz), np.identity(opt.nz))


    def compute_rec_error(data, rec):
        rec_error = data - rec
        l2_dist = torch.sum(rec_error ** 2 / rec_error.numel()) ** 0.5
        return l2_dist

    def im_to_01(im):
        return im * opt.std_val + opt.mean_val

    def im_to_original(im):
        return (im - opt.mean_val) / opt.std_val


    for i_batch, data in enumerate(iterator_data):
        print('Batch {}'.format(i_batch))
        assert (len(opt.class_names) == data.size(0))
        assert (data.dim() == 5)
        for i_class in range(data.size(0)):
            class_name = opt.class_names[i_class]

            # add class information to the generator
            netG_forward = lambda input: netG(input, i_class)

            reconstructions_best = data[i_class].clone()
            reconstructions_best.data.zero_()
            reconstructions_best_init = data[i_class].clone()
            reconstructions_best_init.data.zero_()
            reconstructions_error_best = [float('inf')] * data[i_class].size(0)
            ll_noise_best = [float('inf')] * data[i_class].size(0)
            ll_noise_init_best = [float('inf')] * data[i_class].size(0)
            nn_dists_batch = [float('inf')] * data[i_class].size(0)


            for i_trial in range(num_lbfgs_trials):
                print('Class {0}: {1}, trial {2} of {3}'.format(i_class, class_name, i_trial + 1, num_lbfgs_trials))
                sys.stdout.flush()
                # get the noise leading to the good reconstruction
                if optimize_red_first:
                    noise_init, noise = reconstruct_cells_red_first(data[i_class], netG_forward, opt, n_bfgs_iter=n_bfgs_iter, lbfgs_lr=lbfgs_lr)
                else:
                    noise_init, noise = reconstruct_cells(data[i_class], netG_forward, opt, n_bfgs_iter=n_bfgs_iter, lbfgs_lr=lbfgs_lr)

                # get reconstructions
                reconstructions_init = netG_forward(noise_init)
                reconstructions = netG_forward(noise)

                # compute reconstruction errors
                for i_im in range(reconstructions.size(0)):
                    # get log-likelihoods
                    noise_np = noise[i_im].view(-1).data.cpu().numpy()
                    ll_noise = -mvn.logpdf(noise_np)
                    noise_init_np = noise_init[i_im].view(-1).data.cpu().numpy()
                    ll_noise_init = -mvn.logpdf(noise_init_np)


                    l2_dist = compute_rec_error(im_to_01(data[i_class][i_im].data), im_to_01(reconstructions[i_im].data))

                    if l2_dist < reconstructions_error_best[i_im]:
                        reconstructions_error_best[i_im] = l2_dist
                        reconstructions_best[i_im] = reconstructions[i_im]
                        reconstructions_best_init[i_im] = reconstructions_init[i_im]
                        ll_noise_best[i_im] = ll_noise
                        ll_noise_init_best[i_im] = ll_noise_init

            # find nearest neighbors from the training set
            neighbors = torch.FloatTensor(reconstructions_best.size())
            if opt.cuda:
                neighbors = neighbors.cuda()
            for i_im in range(reconstructions_best.size(0)):
                ref_im = data[i_class][i_im].data
                ref_im_01 = im_to_01(ref_im)
                _, nn_ids = nearest_neighbors.find_neighbors(ref_im_01, search_images[class_name], search_image_names[class_name], num_neighbors=1)
                nn_im_01 = search_images[class_name][nn_ids[0]]
                neighbors[i_im] = im_to_original(nn_im_01)
                nn_dists_batch[i_im] = compute_rec_error(nn_im_01, ref_im_01)
            neighbors = Variable(neighbors)

            # save results
            for i_im in range(reconstructions_best.size(0)):
                all_images = [data[i_class][i_im], reconstructions_best[i_im], reconstructions_best_init[i_im], neighbors[i_im]]
                all_images = torch.stack(all_images, 0)
                all_images = pad_channels(all_images.data, 3)
                file_name = os.path.join(opt.experiment, class_name, '{0}_batch{1}_image{2}.png'.format(class_name, i_batch, i_im))
                vutils.save_image(im_to_01(all_images), file_name)

                l2_dist = reconstructions_error_best[i_im]
                ll_noise = ll_noise_best[i_im]
                ll_noise_init = ll_noise_init_best[i_im]

                l2_dists[class_name].append(l2_dist)
                lls_noise[class_name].append(ll_noise)
                lls_noise_init[class_name].append(ll_noise_init)
                nn_dists[class_name].append(nn_dists_batch[i_im])

    # saving the full reconstruction data
    all_data = {'l2_dists': l2_dists, 'lls_noise': lls_noise, 'lls_noise_init': lls_noise_init, 'nn_dists': nn_dists}
    torch.save(all_data, os.path.join(opt.experiment,  'reconstruction_data.pth'))

    # print aggregated statistics
    for i_class, class_name in enumerate(opt.class_names):
        l2 = np.array(l2_dists[class_name])
        l2_mean = np.mean(l2)
        l2_std = np.std(l2)
        ll_noise = np.array(lls_noise[class_name])
        ll_noise_mean = np.mean(ll_noise)
        ll_noise_std = np.std(ll_noise)
        ll_noise_init = np.array(lls_noise_init[class_name])
        ll_noise_init_mean = np.mean(ll_noise_init)
        ll_noise_init_std = np.std(ll_noise_init)
        nn_d = np.array(nn_dists[class_name])
        nn_d_mean = np.mean(nn_d)
        nn_d_std = np.std(nn_d)
        print('Class {0}: L2-reconstr mean {1:0.3f} std {2:0.3f}; L2-noise mean {3:0.3f} std {4:0.3f}; L2-noise-init mean {5:0.3f} std {6:0.3f}; NN dist {7:0.3f} std {8:0.3f}'.format(class_name, l2_mean, l2_std, ll_noise_mean, ll_noise_std, ll_noise_init_mean, ll_noise_init_std, nn_d_mean, nn_d_std))

    l2 = np.concatenate([np.array(d) for d in l2_dists.values()])
    l2_mean = np.mean(l2)
    l2_std = np.std(l2)
    ll_noise = np.concatenate([np.array(d) for d in lls_noise.values()])
    ll_noise_mean = np.mean(ll_noise)
    ll_noise_std = np.std(ll_noise)
    ll_noise_init = np.concatenate([np.array(d) for d in lls_noise_init.values()])
    ll_noise_init_mean = np.mean(ll_noise_init)
    ll_noise_init_std = np.std(ll_noise_init)
    nn_d = np.concatenate([np.array(d) for d in nn_dists.values()])
    nn_d_mean = np.mean(nn_d)
    nn_d_std = np.std(nn_d)

    print('All classes: L2-reconstr mean {0:0.3f} std {1:0.3f}; L2-noise mean {2:0.3f} std {3:0.3f}; L2-noise-init mean {4:0.3f} std {5:0.3f}; NN dist {6:0.3f} std {7:0.3f}'.format(l2_mean, l2_std, ll_noise_mean, ll_noise_std, ll_noise_init_mean, ll_noise_init_std, nn_d_mean, nn_d_std))
コード例 #7
0
    def __getitem__(self, index):
        if self.training:
            index_ratio = int(self.ratio_index[index])
        else:
            index_ratio = index

        # get the anchor index for current sample index
        # here we set the anchor index to the last one
        # sample in this group
        minibatch_db = [self._roidb[index_ratio]]
        blobs = get_minibatch(minibatch_db, self._num_classes)
        data = torch.from_numpy(blobs['data'])
        im_info = torch.from_numpy(blobs['im_info'])
        # we need to random shuffle the bounding box.
        data_height, data_width = data.size(1), data.size(2)
        if self.training:
            np.random.shuffle(blobs['gt_boxes'])
            gt_boxes = torch.from_numpy(blobs['gt_boxes'])

            ########################################################
            # padding the input image to fixed size for each group #
            ########################################################

            # NOTE1: need to cope with the case where a group cover both conditions. (done)
            # NOTE2: need to consider the situation for the tail samples. (no worry)
            # NOTE3: need to implement a parallel data loader. (no worry)
            # get the index range

            # if the image need to crop, crop to the target size.
            ratio = self.ratio_list_batch[index]

            if self._roidb[index_ratio]['need_crop']:
                if ratio < 1:
                    # this means that data_width << data_height, we need to crop the
                    # data_height
                    min_y = int(torch.min(gt_boxes[:, 1]))
                    max_y = int(torch.max(gt_boxes[:, 3]))
                    trim_size = int(np.floor(data_width / ratio))
                    if trim_size > data_height:
                        trim_size = data_height
                    box_region = max_y - min_y + 1
                    if min_y == 0:
                        y_s = 0
                    else:
                        if (box_region - trim_size) < 0:
                            y_s_min = max(max_y - trim_size, 0)
                            y_s_max = min(min_y, data_height - trim_size)
                            if y_s_min == y_s_max:
                                y_s = y_s_min
                            else:
                                y_s = np.random.choice(range(y_s_min, y_s_max))
                        else:
                            y_s_add = int((box_region - trim_size) / 2)
                            if y_s_add == 0:
                                y_s = min_y
                            else:
                                y_s = np.random.choice(
                                    range(min_y, min_y + y_s_add))
                    # crop the image
                    data = data[:, y_s:(y_s + trim_size), :, :]

                    # shift y coordiante of gt_boxes
                    gt_boxes[:, 1] = gt_boxes[:, 1] - float(y_s)
                    gt_boxes[:, 3] = gt_boxes[:, 3] - float(y_s)

                    # update gt bounding box according the trip
                    gt_boxes[:, 1].clamp_(0, trim_size - 1)
                    gt_boxes[:, 3].clamp_(0, trim_size - 1)

                else:
                    # this means that data_width >> data_height, we need to crop the
                    # data_width
                    min_x = int(torch.min(gt_boxes[:, 0]))
                    max_x = int(torch.max(gt_boxes[:, 2]))
                    trim_size = int(np.ceil(data_height * ratio))
                    if trim_size > data_width:
                        trim_size = data_width
                    box_region = max_x - min_x + 1
                    if min_x == 0:
                        x_s = 0
                    else:
                        if (box_region - trim_size) < 0:
                            x_s_min = max(max_x - trim_size, 0)
                            x_s_max = min(min_x, data_width - trim_size)
                            if x_s_min == x_s_max:
                                x_s = x_s_min
                            else:
                                x_s = np.random.choice(range(x_s_min, x_s_max))
                        else:
                            x_s_add = int((box_region - trim_size) / 2)
                            if x_s_add == 0:
                                x_s = min_x
                            else:
                                x_s = np.random.choice(
                                    range(min_x, min_x + x_s_add))
                    # crop the image
                    data = data[:, :, x_s:(x_s + trim_size), :]

                    # shift x coordiante of gt_boxes
                    gt_boxes[:, 0] = gt_boxes[:, 0] - float(x_s)
                    gt_boxes[:, 2] = gt_boxes[:, 2] - float(x_s)
                    # update gt bounding box according the trip
                    gt_boxes[:, 0].clamp_(0, trim_size - 1)
                    gt_boxes[:, 2].clamp_(0, trim_size - 1)

            # based on the ratio, padding the image.
            if ratio < 1:
                # this means that data_width < data_height
                trim_size = int(np.floor(data_width / ratio))

                padding_data = torch.FloatTensor(int(np.ceil(data_width / ratio)), \
                                                 data_width, 3).zero_()

                padding_data[:data_height, :, :] = data[0]
                # update im_info
                im_info[0, 0] = padding_data.size(0)
                # print("height %d %d \n" %(index, anchor_idx))
            elif ratio > 1:
                # this means that data_width > data_height
                # if the image need to crop.
                padding_data = torch.FloatTensor(data_height, \
                                                 int(np.ceil(data_height * ratio)), 3).zero_()
                padding_data[:, :data_width, :] = data[0]
                im_info[0, 1] = padding_data.size(1)
            else:
                trim_size = min(data_height, data_width)
                padding_data = torch.FloatTensor(trim_size, trim_size,
                                                 3).zero_()
                padding_data = data[0][:trim_size, :trim_size, :]
                gt_boxes.clamp_(0, trim_size)
                # gt_boxes[:, :4].clamp_(0, trim_size)
                im_info[0, 0] = trim_size
                im_info[0, 1] = trim_size

            # check the bounding box:
            # original
            # not_keep = (gt_boxes[:,0] == gt_boxes[:,2]) | (gt_boxes[:,1] == gt_boxes[:,3])
            # keep = torch.nonzero(not_keep == 0).view(-1)
            if len(gt_boxes) != 0:
                not_keep = (gt_boxes[:, 0] == gt_boxes[:, 2]) | (
                    gt_boxes[:, 1] == gt_boxes[:, 3])
            else:
                not_keep = torch.Tensor(1, 1)

            keep = torch.nonzero(not_keep == 0).view(-1)

            # print('not_keep=',not_keep,'gt_boxes.size(1)',5,self.max_num_box)

            gt_boxes_padding = torch.FloatTensor(self.max_num_box, 5).zero_()

            if keep.numel() != 0:
                gt_boxes = gt_boxes[keep]
                num_boxes = min(gt_boxes.size(0), self.max_num_box)
                gt_boxes_padding[:num_boxes, :] = gt_boxes[:num_boxes]
            else:
                num_boxes = 0

                # permute trim_data to adapt to downstream processing
            padding_data = padding_data.permute(2, 0, 1).contiguous()
            im_info = im_info.view(3)

            return padding_data, im_info, gt_boxes_padding, num_boxes
        else:
            data = data.permute(0, 3, 1,
                                2).contiguous().view(3, data_height,
                                                     data_width)
            im_info = im_info.view(3)

            gt_boxes = torch.FloatTensor([1, 1, 1, 1, 1])
            num_boxes = 0

            return data, im_info, gt_boxes, num_boxes
コード例 #8
0
def EB_extraction(vp):
    print("EB extraction, 3 levels, sequence and non-sequence")

    for loader in [vp.testloader_acc]:

        vp.model_VGG.eval()

        eb.use_eb(True)

        print("Starting EB extraction")
        print(len(loader))

        counter = 0
        all_examples = 0
        last_video = "-"
        last_batch = 0
        
        n_image = 0
        for batch_idx, (data, target, video_name)  in enumerate(loader):
            
            #if video_name[0] == "2016090800": # already did it before
            #    continue

            if last_video != video_name[0]:
                print("video ", video_name[0])
                last_batch = 0

            last_video = video_name[0]
            if last_batch >= 32:
                continue
            else:
                last_batch += 1

            print(last_batch, batch_idx, video_name[0])

            # remove continue and exit()

            timer = time.time()
            target.squeeze_()
            target = Variable(target)
            data = data[0]
            data = Variable(data)

            #print data.size(), target.size(), video_name

            #features = vp.model_VGG(data)

            
            #print time.time()-timer
            output = vp.model_VGG(data)

            pred = output.data.max(1)[1] 
            all_examples += data.size()[0]
            flat_pred = pred.cpu().numpy().flatten()
            np_target = target.data.cpu().numpy()


            correct = pred.eq(target.data).sum()
            #print(output) # don't even need output

            if correct == 32:
                counter += 1
                print("all correct here")
            else:
                print(correct,"/",32 , "not correct...")
            
            #print time.time()-timer

            layer_top = list(vp.model_VGG.modules())[0].classifier[6] 
            layer_second = list(vp.model_VGG.modules())[0].classifier[4] 
            target_layer = list(vp.model_VGG.modules())[0].features[2] # 3,4,5 or 6
            top_dh = torch.zeros(32,3)

            #print np_target
            for i in range(32):
                top_dh[i,np_target[i]] = 1 # ground truth based contrastive signal
            #print top_dh

            mylogger.log("Using eb")
            grad = eb.contrastive_eb(vp.model_VGG, data, layer_top, layer_second, target=target_layer, top_dh=top_dh)
            mylogger.log("Time void from contrastive EB: {}".format(time.time()-timer))

            #print grad.numpy().shape

            grad_ = grad.numpy()[:,:,:,:]
            grad_ = np.mean(grad_,axis=1)

            prediction = pred.cpu().numpy()
            for j in range(32):
                mytensor = torch.from_numpy(grad_[j]).unsqueeze(0).unsqueeze(0)                
                print(mytensor.size())
                vutils.save_image(mytensor,"./experiments/viewpoint_EB_final_data_proper/sample_"+str(batch_idx).zfill(4)+"_"+str(j+n_image*32).zfill(4)+"_"+str(int(prediction[j]))+".png", normalize=True)
                print(data.size())
                vutils.save_image(data[j].data,"./experiments/viewpoint_EB_final_data_proper/sample_orig_"+str(batch_idx).zfill(4)+"_"+str(j+n_image*32).zfill(4)+"_"+str(int(prediction[j]))+".png", normalize=False)
            

            n_image += 1
            continue


            np.save("./experiments/viewpoint_EB_final_data_proper/"+str(last_batch)+"_"+str(video_name[0])+"_attentions.npy", grad_)
            np.save("./experiments/viewpoint_EB_final_data_proper/"+str(last_batch)+"_"+str(video_name[0])+"_original.npy", data.data.numpy())
            np.save("./experiments/viewpoint_EB_final_data_proper/"+str(last_batch)+"_"+str(video_name[0])+"_targets.npy", target.data.numpy())

            '''
            plt.figure(1)
            for i in range(32):
                img = grad_[i,:,:]
                plt.subplot(6,6,i+1)
                plt.imshow(img)
                plt.draw()
                plt.axis('off')
            plt.show()
            '''
            #print time.time()-timer

            
        print("Correct total sequences from 9128 to ", counter)
コード例 #9
0
    .lowshelf())

model = UNet(config)
checkpoint = torch.load('Exp/19250_2_checkpoint.pth.tar')
model.load_state_dict(checkpoint['state_dict'])
model.eval()
# model.eval()

dataset = CommonVoice()
trainLoader = DL(dataset, batch_size=1, shuffle=False, num_workers=2)

# print(model)

for no, data in enumerate(trainLoader):

    print(data.size())

    data = data.to('cuda')

    model.cuda()
    output = model(data)

    output = output.detach().cpu().numpy()
    data = data.detach().cpu().numpy()

    datax = data[0, 0, :, :] + 1j * output[0, 1, :, :]
    outputx = output[0, 0, :, :] + 1j * output[0, 1, :, :]

    outputx.reshape([output.shape[2], output.shape[3]])
    datax.reshape([data.shape[2], data.shape[3]])
コード例 #10
0
def training(model, data_loader, optimizer, criterion):
    """
    Perform trainging
    """
    model_g = model["Generator"]
    model_d = model["Discriminator"]
    model_g.train()  # training mode
    model_d.train()  # training mode

    criterion_g = criterion["Generator"]
    criterion_d = criterion["Discriminator"]

    gan_loss = {"generator": 0.0, "discriminator": 0.0}

    for i, data in enumerate(data_loader, 0):
        data = data.to(DEVICE)  # send data to GPU
        data = data.float()  # workaround

        mini_batch_size = data.size()[0]
        label_real = torch.full((mini_batch_size, 1),
                                REAL_LABEL,
                                device=DEVICE)
        label_fake = torch.full((mini_batch_size, 1),
                                FAKE_LABEL,
                                device=DEVICE)

        ############################
        # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
        ###########################
        # Train with all-real batch
        optimizer["Discriminator"].zero_grad()

        # Forward pass real batch through D
        output = model_d(data)

        # Calculate loss on all-real batch
        err_d_real = criterion_d(output, label_real)

        # Calculate gradients for D in backward pass
        err_d_real.backward()
        discrimitor_outx = output.mean().item()  # D(x)

        # Train with all-fake batch
        # Generate batch of latent vectors
        noise = torch.randn((mini_batch_size, PARAM["model"]["latent_dim"]),
                            device=DEVICE)
        fake = model_g(noise)

        # Classify all fake batch with D
        output = model_d(fake.detach())
        # detach() is essential to prevent the gradient from flowing into G.

        # Calculate D's loss on the all-fake batch
        err_d_fake = criterion_d(output, label_fake)
        # Calculate the gradients for this batch
        err_d_fake.backward()

        discrimitor_gz = output.mean().item()  # D(G(z))

        err_d = err_d_real + err_d_fake
        gan_loss["discriminator"] += err_d.item()

        # Update discriminator
        optimizer["Discriminator"].step()

        ############################
        # (2) Update G network: maximize log(D(G(z)))
        ###########################
        optimizer["Generator"].zero_grad()

        # Since we just updated D, perform another forward pass
        # of all-fake batch through D
        output = model_d(fake)

        # Calculate G's loss based on this output
        err_g = criterion_g(output, label_real)

        # Calculate gradients for G
        err_g.backward()
        gan_loss["generator"] += err_g.item()

        discrimitor_gz2 = output.mean().item()  # D(G(z))

        # Update generator
        optimizer["Generator"].step()

        if i % 50 == 0:
            com.logger.info(
                '[%d/%d]\tLoss_D: %.6f\tLoss_G: %.6f'
                '\tD(x): %.6f\tD(G(z)): %.6f / %.6f', i, len(data_loader),
                err_d.item(), err_g.item(), discrimitor_outx, discrimitor_gz,
                discrimitor_gz2)

    gan_loss["discriminator"] /= len(data_loader)
    gan_loss["generator"] /= len(data_loader)

    return gan_loss["discriminator"], gan_loss["generator"]
コード例 #11
0
    def __getitem__(self, index):
        if self.training:
            index_ratio = int(self.ratio_index[index])
        else:
            index_ratio = index

        minibatch_db = [self._roidb[index_ratio]]
        blobs = self.get_minibatch(minibatch_db, self._num_classes)

        data = torch.from_numpy(blobs['data'])
        im_info = torch.from_numpy(blobs['im_info'])

        data_height, data_width = data.size(1), data.size(2)
        if self.training:
            np.random.shuffle(blobs['gt_boxes'])
            gt_boxes = torch.from_numpy(blobs['gt_boxes'])
            ratio = self.ratio_list_batch[index]
            if self._roidb[index_ratio]['need_crop']:
                if ratio < 1:
                    min_y = int(torch.min(gt_boxes[:, 1]))
                    max_y = int(torch.max(gt_boxes[:, 3]))
                    trim_size = int(np.floor(data_width / ratio))
                    if trim_size > data_height:
                        trim_size = data_height

                    boxes_region = max_y - min_y + 1
                    if min_y == 0:
                        y_s = 0
                    else:
                        if (boxes_region - trim_size) < 0:
                            y_s_min = max(max_y - trim_size, 0)
                            y_s_max = min(min_y, data_height - trim_size)
                            if y_s_min == y_s_max:
                                y_s = y_s_min
                            else:
                                y_s = np.random.choice(range(y_s_min, y_s_max))
                        else:
                            y_s_add = int((boxes_region - trim_size) / 2)
                            if y_s_add == 0:
                                y_s = min_y
                            else:
                                y_s = np.random.choice(
                                    range(min_y, min_y + y_s_add))
                    data = data[:, y_s:(y_s + trim_size), :, :]

                    gt_boxes[:, 1] = gt_boxes[:, 1] - float(y_s)
                    gt_boxes[:, 3] = gt_boxes[:, 3] - float(y_s)

                    gt_boxes[:, 1].clamp_(0, trim_size - 1)
                    gt_boxes[:, 3].clamp_(0, trim_size - 1)

                else:

                    min_x = int(torch.min(gt_boxes[:, 0]))
                    max_x = int(torch.max(gt_boxes[:, 2]))
                    trim_size = int(np.ceil(data_height * ratio))
                    if trim_size > data_width:
                        trim_size = data_width
                    box_region = max_x - min_x + 1
                    if min_x == 0:
                        x_s = 0
                    else:
                        if (box_region - trim_size) < 0:
                            x_s_min = max(max_x - trim_size, 0)
                            x_s_max = min(min_x, data_width - trim_size)
                            if x_s_min == x_s_max:
                                x_s = x_s_min
                            else:
                                x_s = np.random.choice(range(x_s_min, x_s_max))
                        else:
                            x_s_add = int((box_region - trim_size) / 2)
                            if x_s_add == 0:
                                x_s = min_x
                            else:
                                x_s = np.random.choice(
                                    range(min_x, min_x + x_s_add))

                    data = data[:, :, x_s:(x_s + trim_size), :]

                    gt_boxes[:, 0] = gt_boxes[:, 0] - float(x_s)
                    gt_boxes[:, 2] = gt_boxes[:, 2] - float(x_s)

                    gt_boxes[:, 0].clamp_(0, trim_size - 1)
                    gt_boxes[:, 2].clamp_(0, trim_size - 1)

            if ratio < 1:
                trim_size = int(np.floor(data_width / ratio))
                padding_data = torch.FloatTensor(
                    int(np.ceil(data_width / ratio)), data_width, 3).zero_()
                padding_data[:data_height, :, :] = data[0]
                im_info[0, 0] = padding_data.size(0)
            elif ratio > 1:
                # this means that data_width > data_height
                # if the image need to crop.
                padding_data = torch.FloatTensor(data_height, \
                                                 int(np.ceil(data_height * ratio)), 3).zero_()
                padding_data[:, :data_width, :] = data[0]
                im_info[0, 1] = padding_data.size(1)
            else:
                trim_size = min(data_height, data_width)
                padding_data = torch.FloatTensor(trim_size, trim_size,
                                                 3).zero_()
                padding_data = data[0][:trim_size, :trim_size, :]
                # gt_boxes.clamp_(0, trim_size)
                gt_boxes[:, :4].clamp_(0, trim_size)
                im_info[0, 0] = trim_size
                im_info[0, 1] = trim_size

            not_keep = (gt_boxes[:, 0] == gt_boxes[:, 2]) | (gt_boxes[:, 1]
                                                             == gt_boxes[:, 3])
            keep = torch.nonzero(not_keep == 0).view(-1)
            gt_boxes_padding = torch.FloatTensor(self.max_num_box,
                                                 gt_boxes.size(1)).zero_()
            if keep.numel() != 0:
                gt_boxes = gt_boxes[keep]
                num_boxes = min(gt_boxes.size(0), self.max_num_box)

                gt_boxes_padding[:num_boxes, :] = gt_boxes[:num_boxes, :]
            else:
                num_boxes = 0

            padding_data = padding_data.permute(2, 0, 1).contiguous()
            im_info = im_info.view(3)

            return padding_data, im_info, gt_boxes_padding, num_boxes

        else:
            data = data.permute(0, 3, 1,
                                2).contiguous().view(3, data_height,
                                                     data_width)
            im_info = im_info.view(3)

            gt_boxes = torch.FloatTensor([1, 1, 1, 1, 1])
            num_boxes = 0

            return data, im_info, gt_boxes, num_boxes
コード例 #12
0
ファイル: pytorch_checkpoint.py プロジェクト: y0z/optuna
def objective(trial):

    # Generate the model.
    model = define_model(trial).to(DEVICE)

    # Generate the optimizers.
    optimizer_name = trial.suggest_categorical("optimizer",
                                               ["Adam", "RMSprop", "SGD"])
    lr = trial.suggest_float("lr", 1e-5, 1e-1, log=True)
    optimizer = getattr(optim, optimizer_name)(model.parameters(), lr=lr)

    if "checkpoint_path" in trial.user_attrs:
        checkpoint = torch.load(trial.user_attrs["checkpoint_path"])
        epoch_begin = checkpoint["epoch"] + 1
        model.load_state_dict(checkpoint["model_state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
        accuracy = checkpoint["accuracy"]
    else:
        epoch_begin = 0

    # Get the MNIST dataset.
    train_loader, valid_loader = get_mnist()

    path = f"pytorch_checkpoint/{trial.number}"
    os.makedirs(path, exist_ok=True)

    # Training of the model.
    for epoch in range(epoch_begin, EPOCHS):
        model.train()
        for batch_idx, (data, target) in enumerate(train_loader):
            # Limiting training data for faster epochs.
            if batch_idx * BATCHSIZE >= N_TRAIN_EXAMPLES:
                break

            data, target = data.view(data.size(0),
                                     -1).to(DEVICE), target.to(DEVICE)

            optimizer.zero_grad()
            output = model(data)
            loss = F.nll_loss(output, target)
            loss.backward()
            optimizer.step()

        # Validation of the model.
        model.eval()
        correct = 0
        with torch.no_grad():
            for batch_idx, (data, target) in enumerate(valid_loader):
                # Limiting validation data.
                if batch_idx * BATCHSIZE >= N_VALID_EXAMPLES:
                    break
                data, target = data.view(data.size(0),
                                         -1).to(DEVICE), target.to(DEVICE)
                output = model(data)
                # Get the index of the max log-probability.
                pred = output.argmax(dim=1, keepdim=True)
                correct += pred.eq(target.view_as(pred)).sum().item()

        accuracy = correct / min(len(valid_loader.dataset), N_VALID_EXAMPLES)

        trial.report(accuracy, epoch)

        # Save optimization status. We should save the objective value because the process may be
        # killed between saving the last model and recording the objective value to the storage.
        torch.save(
            {
                "epoch": epoch,
                "model_state_dict": model.state_dict(),
                "optimizer_state_dict": optimizer.state_dict(),
                "accuracy": accuracy,
            },
            os.path.join(path, "model.pt"),
        )

        # Handle pruning based on the intermediate value.
        if trial.should_prune():
            raise optuna.exceptions.TrialPruned()

    return accuracy
コード例 #13
0
ファイル: BiAE.py プロジェクト: muyeby/Biling-Embeddings
    optimizer1 = optim.SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=True)
    optimizer2 = optim.RMSprop(net.parameters(), lr=0.01, alpha=0.9, weight_decay=True)
    optimizer3 = optim.Adam(net.parameters(), lr=learn_rate, betas=(0.9, 0.99))
    optimizer4 = optim.LBFGS(net.parameters(), lr=0.8)

    trainSet = MyDataSet(root='en.train640 de.train640', train=True, transform=None)
    trainloader = torch.utils.data.DataLoader(trainSet, batch_size=batch_size,
                                              shuffle=True, num_workers=2)
    net.apply(weights_init)
    step = 0
    l1,l2,l3 = 0,0,0
    for epoch in range(epoch_num):
        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            step += 1
            rowLen = int(data.size()[1] / 2)
            input_view1, input_view2 = data[:, :rowLen], data[:, rowLen:]

            input_view1, input_view2 = Variable(input_view1), Variable(input_view2)  # 把数据放到gpu中

            optimizer3.zero_grad()

            z1, x1, z2, x2 = net(input_view1.cuda().float(), input_view2.cuda().float())

            l1,l2,l3 = criterion(input_view1.cuda().float(), input_view2.cuda().float(), x1, x2, z1, z2,
                                   net.view1_AE.encode_1.weight, net.view2_AE.encode_1.weight)

            loss = l1+l2+l3
            loss.backward()

            optimizer3.step()
コード例 #14
0
ファイル: train_val.py プロジェクト: xyq2016/QuantizeCNNModel
def validate(model, val_loader, criterion, gpu=0, epoch=0, summary_writer=None, name_prefix=None, print_freq=20):

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    loss_name = "val/loss"
    prec1_name = "val/top-1"
    prec5_name = "val/top-5"

    if name_prefix is not None:
        name_prefix = ''.join((name_prefix, '-'))
        loss_name = ''.join((name_prefix, loss_name))
        prec1_name = ''.join((name_prefix, prec1_name))
        prec5_name = ''.join((name_prefix, prec5_name))

    # 进入 eval 状态
    model.eval()

    # if not full_precision:
    #     qw = QuantizeWeightOrActivation()  # 1, 创建量化器
    #     model.apply(qw.quantize_tanh)  # 2, 量化权重, 保存全精度权重和量化梯度

    with torch.no_grad():
        start = time.time()
        for i, (data, target) in enumerate(val_loader):
            if gpu is not None:
                data = data.cuda(gpu, non_blocking=True)

            # batch_size 128时, target size 为 torch.Size([128])
            target = target.cuda(gpu, non_blocking=True)
            output = model(data)
            loss = criterion(output, target)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss.item(), data.size(0))
            top1.update(prec1[0], data.size(0))
            top5.update(prec5[0], data.size(0))

            # measure elapsed time
            batch_time.update(time.time() - start)
            start = time.time()

            if i % print_freq == 0:
                print('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                       i, len(val_loader), batch_time=batch_time,
                       loss=losses, top1=top1, top5=top5))

        if summary_writer is not None:
            summary_writer.add_scalar(loss_name, losses.avg, epoch)
            summary_writer.add_scalar(prec1_name, top1.avg, epoch)
            summary_writer.add_scalar(prec5_name, top5.avg, epoch)

        print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(top1=top1, top5=top5))

    # if not full_precision:
    #     model.apply(qw.restore)  # 第3步, 恢复全精度权重

    return top1.avg
コード例 #15
0
ファイル: train_val.py プロジェクト: xyq2016/QuantizeCNNModel
def train(model, train_loader, criterion, optimizer, gpu, epoch=0,
          summary_writer=None, log_per_epoch=100, print_freq=30):

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to train mode
    model.train()

    # if not full_precision:
    #     qw = QuantizeWeightOrActivation()   # 第一步, 创建量化器
    end = time.time()

    # 用于控制 tensorboard 的显示频率
    interval = len(train_loader) // log_per_epoch
    summary_point = [interval * split for split in torch.arange(log_per_epoch)]

    for i, (data, target) in enumerate(train_loader):
        data_time.update(time.time() - end)  # measure checkpoint.pth data loading time

        if gpu is not None:
            data = data.cuda(gpu, non_blocking=True)
        target = target.cuda(gpu, non_blocking=True)

        # if not full_precision:
        #     model.apply(qw.quantize_tanh)  # 第二步, 量化权重, 保存全精度权重和量化梯度

        output = model(data)
        loss = criterion(output, target)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(output, target, topk=(1, 5))
        losses.update(loss.item(), data.size(0))
        top1.update(prec1[0], data.size(0))
        top5.update(prec5[0], data.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()

        # if not full_precision:
        #     model.apply(qw.restore)  # 第三步, 反向传播后, 模型梯度计算后, 恢复全精度权重
        #     model.apply(qw.update_grad)  # 第四步, 使用之前存储的量化梯度乘上反向传播的梯度

        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # 控制台
        if i % print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                  'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                   epoch, i, len(train_loader), batch_time=batch_time,
                   data_time=data_time, loss=losses, top1=top1, top5=top5))

        if summary_writer and (i in summary_point):
            step = i//interval + epoch * log_per_epoch
            summary_writer.add_scalar("loss/train_loss", loss, step)
            summary_writer.add_scalar("train/top-1", top1.avg, step)
            summary_writer.add_scalar("train/top-5", top5.avg, step)
コード例 #16
0
last_epoch = 0
if args.checkpoint:
    resume(args.checkpoint)
    last_epoch = args.checkpoint
    scheduler.last_epoch = last_epoch - 1

for epoch in range(last_epoch + 1, args.max_epochs + 1):

    scheduler.step()

    for batch, data in enumerate(train_loader):
        batch_t0 = time.time()

        ## init lstm state
        encoder_h_1 = (Variable(torch.zeros(data.size(0), 256, 8, 8).cuda()),
                       Variable(torch.zeros(data.size(0), 256, 8, 8).cuda()))
        encoder_h_2 = (Variable(torch.zeros(data.size(0), 512, 4, 4).cuda()),
                       Variable(torch.zeros(data.size(0), 512, 4, 4).cuda()))
        encoder_h_3 = (Variable(torch.zeros(data.size(0), 512, 2, 2).cuda()),
                       Variable(torch.zeros(data.size(0), 512, 2, 2).cuda()))

        decoder_h_1 = (Variable(torch.zeros(data.size(0), 512, 2, 2).cuda()),
                       Variable(torch.zeros(data.size(0), 512, 2, 2).cuda()))
        decoder_h_2 = (Variable(torch.zeros(data.size(0), 512, 4, 4).cuda()),
                       Variable(torch.zeros(data.size(0), 512, 4, 4).cuda()))
        decoder_h_3 = (Variable(torch.zeros(data.size(0), 256, 8, 8).cuda()),
                       Variable(torch.zeros(data.size(0), 256, 8, 8).cuda()))
        decoder_h_4 = (Variable(torch.zeros(data.size(0), 128, 16, 16).cuda()),
                       Variable(torch.zeros(data.size(0), 128, 16, 16).cuda()))
コード例 #17
0
def main():
    weight_path = config.weights + config.model_name + os.sep + config.description + os.sep + str(
        config.fold) + os.sep
    if not os.path.exists(weight_path):
        os.makedirs(weight_path)
    log_path = config.logs + config.model_name + os.sep + config.description + os.sep + str(
        config.fold) + os.sep
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    submit_path = config.submit + config.model_name + os.sep + config.description + os.sep + str(
        config.fold) + os.sep
    if not os.path.exists(submit_path):
        os.makedirs(submit_path)

    config.write_to_log(log_path + os.sep + 'log.txt')

    #dataset preparing
    train_dataset = customDataset(config.train_data, train=True)
    val_dataset = customDataset(config.test_data, train=True)
    train_loader = DataLoader(train_dataset,
                              batch_size=config.batch_size,
                              shuffle=True,
                              pin_memory=True)
    val_loader = DataLoader(val_dataset,
                            batch_size=config.batch_size * 2,
                            shuffle=False,
                            pin_memory=False)
    #model preparing
    model = get_net(config.num_classes)
    model = DataParallel(model.cuda(), device_ids=config.gpus)
    model.train()
    #optimizer preparing
    optimizer = optim.Adam(model.parameters(),
                           lr=config.lr,
                           amsgrad=True,
                           weight_decay=config.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
    #loss preparing
    #criterion = nn.CrossEntropyLoss().cuda()
    criterion = FocalLoss(config.num_classes).cuda()

    train_loss = AverageMeter()
    train_top1 = AverageMeter()
    valid_loss = [np.inf, 0, 0]
    best_precision = 0

    for epoch in range(config.epochs):
        scheduler.step(epoch)
        train_progressor = ProgressBar(log_path,
                                       mode="Train",
                                       epoch=epoch,
                                       total_epoch=config.epochs,
                                       model_name=config.model_name,
                                       total=len(train_loader))
        for index, (data, label) in enumerate(train_loader):
            train_progressor.current = index
            data = Variable(data).cuda()
            label = Variable(torch.from_numpy(np.asarray(label))).cuda()

            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output, label)
            loss.backward()
            optimizer.step()

            precision1_train, precision2_train = accuracy(output,
                                                          label,
                                                          topk=(1, 2))
            train_loss.update(loss.item(), data.size(0))
            train_top1.update(precision1_train[0], data.size(0))
            train_progressor.current_loss = train_loss.avg
            train_progressor.current_top1 = train_top1.avg
            train_progressor()
            #print('train epoch %d iteration %d: loss: %.3f' % (epoch + 1, index + 1, loss.data))
        train_progressor.done()
        val_loss, val_top1 = evaluate(epoch, model, val_loader, criterion,
                                      log_path)
        is_best = val_top1 > best_precision
        #print(bool(is_best))
        best_precision = max(val_top1, best_precision)
        save_checkpoint(
            {
                "epoch": epoch + 1,
                "model_name": config.model_name,
                "state_dict": model.state_dict(),
                "best_precision1": best_precision,
                "optimizer": optimizer.state_dict(),
                "fold": config.fold,
                "valid_loss": valid_loss,
            }, is_best, weight_path, log_path, epoch)
コード例 #18
0
                                           shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset,
                                         batch_size=128,
                                         shuffle=False)

time1 = time()

for epoch in range(20):
    train_loss = 0.
    val_loss = 0.
    train_acc = 0.
    val_acc = 0.
    time0 = time()
    for data, target in train_loader:
        # Transform target to one-hot encoding, since Keras uses MSELoss
        target = torch.zeros(data.size(0), 10).scatter_(1, target[:, None], 1.)

        optimizer.zero_grad()
        output = model(data.view(data.size(0), -1))
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        train_acc += (torch.argmax(output,
                                   1) == torch.argmax(target,
                                                      1)).float().sum()
    with torch.no_grad():
        for data, target in val_loader:
            target = torch.zeros(data.size(0),
                                 10).scatter_(1, target[:, None], 1.)
コード例 #19
0
                                         shuffle=False)

time1 = time()

for epoch in range(20):
    train_loss = 0.
    val_loss = 0.
    train_acc = 0.
    val_acc = 0.
    time0 = time()
    for data, target in train_loader:
        # Transform target to one-hot encoding, since Keras uses MSELoss
        #target = torch.zeros(data.size(0), 10).scatter_(1, target[:, None], 1.)

        optimizer.zero_grad()
        output = model(data.view(data.size(0), -1))
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        #print("output", output.shape)
        #print("target", target.shape)
        train_loss += loss.item()

        train_acc += (torch.argmax(output, 1) == target).float().sum()
    with torch.no_grad():
        for data, target in val_loader:
            #target = torch.zeros(data.size(0), 10).scatter_(1, target[:, None], 1.)

            output = model(data.view(data.size(0), -1))
            loss = criterion(output, target)
コード例 #20
0
def test(model, test_loader, test_loader_abnorm, device, scaling, vlog, elog,
         image_size, batch_size, log_var_std):
    model.eval()
    test_loss = []
    kl_loss = []
    rec_loss = []
    with torch.no_grad():
        for i, (data, _) in enumerate(test_loader):
            data = data.to(device)
            data_flat = data.flatten(start_dim=1).repeat(1, scaling)
            recon_batch, mu, logvar = model(data_flat)
            loss, kl, rec = loss_function(recon_batch, data_flat, mu, logvar,
                                          log_var_std)
            test_loss += (kl + rec).tolist()
            kl_loss += kl.tolist()
            rec_loss += rec.tolist()
            if i == 0:
                n = min(data.size(0), 8)
                comparison = torch.cat([
                    data[:n],
                    recon_batch[:, :image_size].view(batch_size, 1, 28, 28)[:n]
                ])
                # vlog.show_image_grid(comparison.cpu(),   name='reconstruction')

    # vlog.show_value(np.mean(kl_loss), name="Norm-Kl-loss", tag="Anno")
    # vlog.show_value(np.mean(rec_loss), name="Norm-Rec-loss", tag="Anno")
    # vlog.show_value(np.mean(test_loss), name="Norm-Total-loss", tag="Anno")
    # elog.show_value(np.mean(kl_loss), name="Norm-Kl-loss", tag="Anno")
    # elog.show_value(np.mean(rec_loss), name="Norm-Rec-loss", tag="Anno")
    # elog.show_value(np.mean(test_loss), name="Norm-Total-loss", tag="Anno")

    test_loss_ab = []
    kl_loss_ab = []
    rec_loss_ab = []
    with torch.no_grad():
        for i, (data, _) in enumerate(test_loader_abnorm):
            data = data.to(device)
            data_flat = data.flatten(start_dim=1).repeat(1, scaling)
            recon_batch, mu, logvar = model(data_flat)
            loss, kl, rec = loss_function(recon_batch, data_flat, mu, logvar,
                                          log_var_std)
            test_loss_ab += (kl + rec).tolist()
            kl_loss_ab += kl.tolist()
            rec_loss_ab += rec.tolist()
            if i == 0:
                n = min(data.size(0), 8)
                comparison = torch.cat([
                    data[:n],
                    recon_batch[:, :image_size].view(batch_size, 1, 28, 28)[:n]
                ])
                # vlog.show_image_grid(comparison.cpu(),                                     name='reconstruction2')

    print('====> Test set loss: {:.4f}'.format(np.mean(test_loss)))

    # vlog.show_value(np.mean(kl_loss_ab), name="Unorm-Kl-loss", tag="Anno")
    # vlog.show_value(np.mean(rec_loss_ab), name="Unorm-Rec-loss", tag="Anno")
    # vlog.show_value(np.mean(test_loss_ab), name="Unorm-Total-loss", tag="Anno")
    # elog.show_value(np.mean(kl_loss_ab), name="Unorm-Kl-loss", tag="Anno")
    # elog.show_value(np.mean(rec_loss_ab), name="Unorm-Rec-loss", tag="Anno")
    # elog.show_value(np.mean(test_loss_ab), name="Unorm-Total-loss", tag="Anno")

    kl_roc, kl_pr = elog.get_classification_metrics(
        kl_loss + kl_loss_ab,
        [0] * len(kl_loss) + [1] * len(kl_loss_ab),
    )[0]
    rec_roc, rec_pr = elog.get_classification_metrics(
        rec_loss + rec_loss_ab,
        [0] * len(rec_loss) + [1] * len(rec_loss_ab),
    )[0]
    loss_roc, loss_pr = elog.get_classification_metrics(
        test_loss + test_loss_ab,
        [0] * len(test_loss) + [1] * len(test_loss_ab),
    )[0]

    # vlog.show_value(np.mean(kl_roc), name="KL-loss", tag="ROC")
    # vlog.show_value(np.mean(rec_roc), name="Rec-loss", tag="ROC")
    # vlog.show_value(np.mean(loss_roc), name="Total-loss", tag="ROC")
    # elog.show_value(np.mean(kl_roc), name="KL-loss", tag="ROC")
    # elog.show_value(np.mean(rec_roc), name="Rec-loss", tag="ROC")
    # elog.show_value(np.mean(loss_roc), name="Total-loss", tag="ROC")

    # vlog.show_value(np.mean(kl_pr), name="KL-loss", tag="PR")
    # vlog.show_value(np.mean(rec_pr), name="Rec-loss", tag="PR")
    # vlog.show_value(np.mean(loss_pr), name="Total-loss", tag="PR")

    return kl_roc, rec_roc, loss_roc, kl_pr, rec_pr, loss_pr
コード例 #21
0
ファイル: dataset.py プロジェクト: quanpn90/NMTGMinor
    def __init__(self,
                 src_data,
                 tgt_data,
                 src_sizes=None,
                 tgt_sizes=None,
                 src_langs=None,
                 tgt_langs=None,
                 batch_size_words=16384,
                 data_type="text",
                 batch_size_sents=128,
                 multiplier=1,
                 sorting=False,
                 augment=False,
                 src_align_right=False,
                 tgt_align_right=False,
                 verbose=False,
                 cleaning=False,
                 debug=False,
                 num_split=1,
                 sa_f=8,
                 sa_t=64,
                 **kwargs):
        """
        :param src_data: List of tensors for the source side (1D for text, 2 or 3Ds for other modalities)
        :param tgt_data: List of tensors (1D text) for the target side (already padded with <s> and </s>
        :param src_langs: Source languages (list of one-tensors)
        :param tgt_langs: Target Languages (list of one-tensors)
        :param batch_size_words: Maximum number of words in the minibatch (MB can't have more than this)
        :param data_type: Text or Audio
        :param batch_size_sents: Maximum number of sequences in the minibatch (MB can't have more than this)
        :param multiplier: The number of sequences must divide by this number (for fp16 when multiplier=8)
        :param reshape_speech: Put N frames together to reduce the length (this might be done already in preprocessing)
        :param augment: Speech Augmentation (currently only spec augmentation is implemented)
        """
        """
        For alignment, the right-aligned data looks like:
        P P P P D D D D
        P P D D D D D D
        P P P P P D D D
        P P P D D D D D
        This can affect positional encoding (whose implementation is not consistent w.r.t padding)
        For models with absolute positional encoding, src and tgt should be aligned left (This is default)
        For models with relative positional encoding, src should be right and tgt should be left
        """

        self.src = src_data
        self._type = data_type
        self.src_align_right = src_align_right
        if self.src_align_right and verbose:
            print("* Source sentences aligned to the right side.")
        self.tgt_align_right = tgt_align_right
        self.upsampling = kwargs.get('upsampling', False)

        self.max_src_len = kwargs.get('max_src_len', None)
        self.max_tgt_len = kwargs.get('max_tgt_len', 256)
        self.cleaning = int(cleaning)
        self.debug = debug
        self.num_split = num_split
        self.vocab_mask = None

        if self.max_src_len is None:
            if self._type == 'text':
                self.max_src_len = 256
            else:
                # for audio set this to 2048 frames
                self.max_src_len = 2048

        # self.reshape_speech = reshape_speech
        if tgt_data:
            self.tgt = tgt_data

        else:
            self.tgt = None

        self.order = np.arange(len(self.src))

        # Processing data sizes
        if self.src is not None:
            if src_sizes is not None:
                self.src_sizes = np.asarray(src_sizes)
            else:
                self.src_sizes = np.asarray(
                    [data.size(0) for data in self.src])
        else:
            self.src_sizes = None

        if self.tgt is not None:
            if tgt_sizes is not None:
                self.tgt_sizes = np.asarray(tgt_sizes)
            else:
                self.tgt_sizes = np.asarray(
                    [data.size(0) for data in self.tgt])
        else:
            self.tgt_sizes = None

        # sort data to have efficient mini-batching during training
        if sorting:
            if verbose:
                print("* Sorting data ...")

            if self._type == 'text':
                sorted_order = np.lexsort((self.src_sizes, self.tgt_sizes))
            elif self._type == 'audio':
                sorted_order = np.lexsort((self.tgt_sizes, self.src_sizes))

            self.order = sorted_order

        # store data length in numpy for fast query
        if self.tgt is not None and self.src is not None:
            stacked_sizes = np.stack((self.src_sizes, self.tgt_sizes - 1),
                                     axis=0)
            self.data_lengths = np.amax(stacked_sizes, axis=0)
        elif self.src is None:
            self.data_lengths = self.tgt_sizes
        else:
            self.data_lengths = self.src_sizes

        # Processing language ids
        self.src_langs = src_langs
        self.tgt_langs = tgt_langs

        if self.src_langs is not None and self.tgt_langs is not None:
            assert (len(src_langs) == len(tgt_langs))

        # In "bilingual" case, the src_langs only contains one single vector
        # Which is broadcasted to batch_size
        if len(src_langs) <= 1:
            self.bilingual = True
        else:
            self.bilingual = False

        self.full_size = len(self.src) if self.src is not None else len(
            self.tgt)

        # maximum number of tokens in a mb
        self.batch_size_words = batch_size_words

        # maximum sequences in a mb
        self.batch_size_sents = batch_size_sents

        # the actual batch size must divide by this multiplier (for fp16 it has to be 4 or 8)
        self.multiplier = multiplier

        # by default: count the amount of padding when we group mini-batches
        self.pad_count = True

        # group samples into mini-batches
        if verbose:
            print("* Allocating mini-batches ...")
        self.batches = allocate_batch(self.order, self.data_lengths,
                                      self.src_sizes, self.tgt_sizes,
                                      batch_size_words, batch_size_sents,
                                      self.multiplier, self.max_src_len,
                                      self.max_tgt_len, self.cleaning)

        # the second to last mini-batch is likely the largest
        # (the last one can be the remnant after grouping samples which has less than max size)
        self.largest_batch_id = len(self.batches) - 2

        self.num_batches = len(self.batches)

        self.cur_index = 0
        self.batchOrder = None

        if augment:
            self.augmenter = Augmenter(F=sa_f, T=sa_t)
        else:
            self.augmenter = None
コード例 #22
0
    def __getitem__(self, index):
        index_ratio = int(self.ratio_index[index])

        # get the anchor index for current sample index
        # here we set the anchor index to the last one
        # sample in this group
        minibatch_db = [self._roidb[index_ratio]]

        blobs = get_minibatch(minibatch_db, self._num_classes)

        # rajath
        blobs['gt_boxes'] = [
            x for x in blobs['gt_boxes'] if x[-1] in self.list_ind
        ]
        # blobs['gt_boxes'] = [x for x in blobs['gt_boxes'] if int(x[-1]) in self.sketchy_classes]
        blobs['gt_boxes'] = np.array(blobs['gt_boxes'])

        if self.training:
            # Random choice query catgory
            catgory = blobs['gt_boxes'][:, -1]

            cand = np.unique(catgory).astype(np.uint8)
            # cand = np.intersect1d(cand, self.sketchy_classes)
            # print ("index:", index, "\nindex_ratio:", index_ratio, "\ncatgory:", catgory, "\ncand:", cand, "\nsketchy_classes:", self.sketchy_classes)
            if len(cand) == 1:
                choice = cand[0]

            else:
                p = []
                for i in cand:
                    p.append(self.show_time[i])
                p = np.array(p)
                p /= p.sum()
                choice = np.random.choice(cand, 1, p=p)[0]

            # Delete useless gt_boxes
            blobs['gt_boxes'][:, -1] = np.where(
                blobs['gt_boxes'][:, -1] == choice, 1, 0)
            # Get query image
            query = self.load_query(choice)
        else:
            query = self.load_query(index, minibatch_db[0]['img_id'])

        data = torch.from_numpy(blobs['data'])
        query = torch.from_numpy(query)
        query = query.permute(0, 3, 1, 2).contiguous().squeeze(0)
        im_info = torch.from_numpy(blobs['im_info'])

        # we need to random shuffle the bounding box.
        data_height, data_width = data.size(1), data.size(2)
        if self.training:
            np.random.shuffle(blobs['gt_boxes'])
            gt_boxes = torch.from_numpy(blobs['gt_boxes'])

            ########################################################
            # padding the input image to fixed size for each group #
            ########################################################

            # NOTE1: need to cope with the case where a group cover both conditions. (done)
            # NOTE2: need to consider the situation for the tail samples. (no worry)
            # NOTE3: need to implement a parallel data loader. (no worry)
            # get the index range

            # if the image need to crop, crop to the target size.
            ratio = self.ratio_list_batch[index]

            if self._roidb[index_ratio]['need_crop']:
                if ratio < 1:
                    # this means that data_width << data_height, we need to crop the
                    # data_height
                    min_y = int(torch.min(gt_boxes[:, 1]))
                    max_y = int(torch.max(gt_boxes[:, 3]))
                    trim_size = int(np.floor(data_width / ratio))
                    if trim_size > data_height:
                        trim_size = data_height
                    box_region = max_y - min_y + 1
                    if min_y == 0:
                        y_s = 0
                    else:
                        if (box_region - trim_size) < 0:
                            y_s_min = max(max_y - trim_size, 0)
                            y_s_max = min(min_y, data_height - trim_size)
                            if y_s_min == y_s_max:
                                y_s = y_s_min
                            else:
                                y_s = np.random.choice(range(y_s_min, y_s_max))
                        else:
                            y_s_add = int((box_region - trim_size) / 2)
                            if y_s_add == 0:
                                y_s = min_y
                            else:
                                y_s = np.random.choice(
                                    range(min_y, min_y + y_s_add))
                    # crop the image
                    data = data[:, y_s:(y_s + trim_size), :, :]

                    # shift y coordiante of gt_boxes
                    gt_boxes[:, 1] = gt_boxes[:, 1] - float(y_s)
                    gt_boxes[:, 3] = gt_boxes[:, 3] - float(y_s)

                    # update gt bounding box according the trip
                    gt_boxes[:, 1].clamp_(0, trim_size - 1)
                    gt_boxes[:, 3].clamp_(0, trim_size - 1)

                else:
                    # this means that data_width >> data_height, we need to crop the
                    # data_width
                    min_x = int(torch.min(gt_boxes[:, 0]))
                    max_x = int(torch.max(gt_boxes[:, 2]))
                    trim_size = int(np.ceil(data_height * ratio))
                    if trim_size > data_width:
                        trim_size = data_width
                    box_region = max_x - min_x + 1
                    if min_x == 0:
                        x_s = 0
                    else:
                        if (box_region - trim_size) < 0:
                            x_s_min = max(max_x - trim_size, 0)
                            x_s_max = min(min_x, data_width - trim_size)
                            if x_s_min == x_s_max:
                                x_s = x_s_min
                            else:
                                x_s = np.random.choice(range(x_s_min, x_s_max))
                        else:
                            x_s_add = int((box_region - trim_size) / 2)
                            if x_s_add == 0:
                                x_s = min_x
                            else:
                                x_s = np.random.choice(
                                    range(min_x, min_x + x_s_add))
                    # crop the image
                    data = data[:, :, x_s:(x_s + trim_size), :]

                    # shift x coordiante of gt_boxes
                    gt_boxes[:, 0] = gt_boxes[:, 0] - float(x_s)
                    gt_boxes[:, 2] = gt_boxes[:, 2] - float(x_s)
                    # update gt bounding box according the trip
                    gt_boxes[:, 0].clamp_(0, trim_size - 1)
                    gt_boxes[:, 2].clamp_(0, trim_size - 1)

            # based on the ratio, padding the image.
            if ratio < 1:
                # this means that data_width < data_height
                trim_size = int(np.floor(data_width / ratio))

                padding_data = torch.FloatTensor(int(np.ceil(data_width / ratio)), \
                                                 data_width, 3).zero_()

                padding_data[:data_height, :, :] = data[0]
                # update im_info
                im_info[0, 0] = padding_data.size(0)
                # print("height %d %d \n" %(index, anchor_idx))
            elif ratio > 1:
                # this means that data_width > data_height
                # if the image need to crop.
                padding_data = torch.FloatTensor(data_height, \
                                                 int(np.ceil(data_height * ratio)), 3).zero_()
                padding_data[:, :data_width, :] = data[0]
                im_info[0, 1] = padding_data.size(1)
            else:
                trim_size = min(data_height, data_width)
                padding_data = torch.FloatTensor(trim_size, trim_size,
                                                 3).zero_()
                padding_data = data[0][:trim_size, :trim_size, :]
                # gt_boxes.clamp_(0, trim_size)
                gt_boxes[:, :4].clamp_(0, trim_size)
                im_info[0, 0] = trim_size
                im_info[0, 1] = trim_size

            # check the bounding box:
            not_keep = (gt_boxes[:, 0] == gt_boxes[:, 2]) | (gt_boxes[:, 1]
                                                             == gt_boxes[:, 3])
            # not_keep = (gt_boxes[:,2] - gt_boxes[:,0]) < 10
            # print(not_keep)
            # not_keep = (gt_boxes[:,2] - gt_boxes[:,0]) < torch.FloatTensor([10]) | (gt_boxes[:,3] - gt_boxes[:,1]) < torch.FloatTensor([10])

            keep = torch.nonzero(not_keep == 0).view(-1)

            gt_boxes_padding = torch.FloatTensor(self.max_num_box,
                                                 gt_boxes.size(1)).zero_()
            if keep.numel() != 0:
                gt_boxes = gt_boxes[keep]
                num_boxes = min(gt_boxes.size(0), self.max_num_box)
                gt_boxes_padding[:num_boxes, :] = gt_boxes[:num_boxes]
            else:
                num_boxes = 0

                # permute trim_data to adapt to downstream processing
            padding_data = padding_data.permute(2, 0, 1).contiguous()
            im_info = im_info.view(3)

            return padding_data, query, im_info, gt_boxes_padding, num_boxes
        else:
            data = data.permute(0, 3, 1,
                                2).contiguous().view(3, data_height,
                                                     data_width)
            im_info = im_info.view(3)

            # gt_boxes = torch.FloatTensor([1,1,1,1,1])
            gt_boxes = torch.from_numpy(blobs['gt_boxes'])
            choice = self.cat_list[index]

            return data, query, im_info, gt_boxes, choice
コード例 #23
0
ファイル: train.py プロジェクト: Richarizardd/SRGAN
        img_resize.save("results/"+opt.name+"/"+image_name[:-4]+"_resize.jpg")
        img_gt.save("results/"+opt.name+"/"+image_name[:-4]+"_gt.jpg")

    print "Evaluating...",
    evalute('./results/'+opt.name)
    print "Done"

for epoch in range(1, NUM_EPOCHS + 1):
    train_bar = tqdm(train_loader)
    running_results = {'batch_sizes': 0, 'd_loss': 0, 'g_loss': 0, 'd_score': 0, 'g_score': 0}

    netG.train()
    netD.train()
    for data, target in train_bar:
        g_update_first = True
        batch_size = data.size(0)
        running_results['batch_sizes'] += batch_size

        ############################
        # (1) Update D network: maximize D(x)-1-D(G(z))
        ###########################
        real_img = Variable(target)
        if torch.cuda.is_available():
            real_img = real_img.cuda()
        z = Variable(data)
        if torch.cuda.is_available():
            z = z.cuda()
        fake_img = netG(z)
        # print("NetG Output: ", fake_img.size())
        # print("Real image: ", real_img.size())
コード例 #24
0
def train(epoch, model, optimizer, scheduler, criterion, train_loader, config,
          writer):
    global global_step

    run_config = config['run_config']
    optim_config = config['optim_config']
    data_config = config['data_config']

    logger.info('Train {}'.format(epoch))

    model.train()

    loss_meter = AverageMeter()
    accuracy_meter = AverageMeter()
    start = time.time()
    for step, (data, targets) in enumerate(train_loader):
        global_step += 1

        if data_config['use_mixup']:
            data, targets = mixup(data, targets, data_config['mixup_alpha'],
                                  data_config['n_classes'])

        if run_config['tensorboard_train_images']:
            if step == 0:
                image = torchvision.utils.make_grid(data,
                                                    normalize=True,
                                                    scale_each=True)
                writer.add_image('Train/Image', image, epoch)

        if optim_config['scheduler'] == 'multistep':
            scheduler.step(epoch - 1)
        elif optim_config['scheduler'] == 'cosine':
            scheduler.step()

        if run_config['tensorboard']:
            if optim_config['scheduler'] != 'none':
                lr = scheduler.get_lr()[0]
            else:
                lr = optim_config['base_lr']
            writer.add_scalar('Train/LearningRate', lr, global_step)

        if run_config['use_gpu']:
            data = data.cuda()
            targets = targets.cuda()
        data = Variable(data)
        targets = Variable(targets)

        optimizer.zero_grad()

        outputs = model(data)
        loss = criterion(outputs, targets)
        loss.backward()

        optimizer.step()

        _, preds = torch.max(outputs, dim=1)

        loss_ = loss.data[0]
        if data_config['use_mixup']:
            _, targets = targets.max(dim=1)
        correct_ = preds.eq(targets).cpu().sum().data.numpy()[0]
        num = data.size(0)

        accuracy = correct_ / num

        loss_meter.update(loss_, num)
        accuracy_meter.update(accuracy, num)

        if run_config['tensorboard']:
            writer.add_scalar('Train/RunningLoss', loss_, global_step)
            writer.add_scalar('Train/RunningAccuracy', accuracy, global_step)

        if step % 100 == 0:
            logger.info('Epoch {} Step {}/{} '
                        'Loss {:.4f} ({:.4f}) '
                        'Accuracy {:.4f} ({:.4f})'.format(
                            epoch,
                            step,
                            len(train_loader),
                            loss_meter.val,
                            loss_meter.avg,
                            accuracy_meter.val,
                            accuracy_meter.avg,
                        ))

    elapsed = time.time() - start
    logger.info('Elapsed {:.2f}'.format(elapsed))

    if run_config['tensorboard']:
        writer.add_scalar('Train/Loss', loss_meter.avg, epoch)
        writer.add_scalar('Train/Accuracy', accuracy_meter.avg, epoch)
        writer.add_scalar('Train/Time', elapsed, epoch)
コード例 #25
0
                                           collate_fn=_collate_fun,
                                           num_workers=self.num_workers,
                                           shuffle=self.shuffle)
        return data_loader

    def __call__(self, epoch=0):
        return self.get_iterator(epoch)

    def __len__(self):
        return self.epoch_size / self.batch_size


if __name__ == '__main__':
    from matplotlib import pyplot as plt

    dataset = GenericDataset('imagenet', 'train', random_sized_crop=True)
    dataloader = DataLoader(dataset, batch_size=8, unsupervised=True)

    for b in dataloader(0):
        data, label = b
        break

    inv_transform = dataloader.inv_transform
    for i in range(data.size(0)):
        plt.subplot(data.size(0) / 4, 4, i + 1)
        fig = plt.imshow(inv_transform(data[i]))
        fig.axes.get_xaxis().set_visible(False)
        fig.axes.get_yaxis().set_visible(False)

    plt.show()
コード例 #26
0
ファイル: train.py プロジェクト: gakarak/SRGAN
def main_train(path_trn: str, path_val: str,
               crop_size: int, upscale_factor: int, num_epochs: int,
               num_workers: int, to_device: str = 'cuda:0', batch_size: int = 64):
    to_device = get_device(to_device)
    train_set = TrainDatasetFromFolder(path_trn, crop_size=crop_size, upscale_factor=upscale_factor)
    val_set = ValDatasetFromFolder(path_val, upscale_factor=upscale_factor)
    # train_set = TrainDatasetFromFolder('data/VOC2012/train', crop_size=crop_size, upscale_factor=upscale_factor)
    # val_set = ValDatasetFromFolder('data/VOC2012/val', upscale_factor=upscale_factor)
    #
    train_loader = DataLoader(dataset=train_set, num_workers=num_workers, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(dataset=val_set, num_workers=num_workers, batch_size=1, shuffle=False)

    netG = Generator(upscale_factor)
    print('# generator parameters:', sum(param.numel() for param in netG.parameters()))
    netD = Discriminator()
    print('# discriminator parameters:', sum(param.numel() for param in netD.parameters()))

    generator_criterion = GeneratorLoss()

    if torch.cuda.is_available():
        netG.cuda()
        netD.cuda()
        generator_criterion.cuda()

    optimizerG = optim.Adam(netG.parameters())
    optimizerD = optim.Adam(netD.parameters())

    results = {'d_loss': [], 'g_loss': [], 'd_score': [], 'g_score': [], 'psnr': [], 'ssim': []}

    for epoch in range(1, num_epochs + 1):
        train_bar = tqdm(train_loader)
        running_results = {'batch_sizes': 0, 'd_loss': 0, 'g_loss': 0, 'd_score': 0, 'g_score': 0}

        netG.train()
        netD.train()
        # FIXME: seperate function for epoch training
        for data, target in train_bar:
            g_update_first = True
            batch_size = data.size(0)
            #
            # img_hr = target.numpy().transpose((0, 2, 3, 1))[0]
            # img_lr = data.numpy().transpose((0, 2, 3, 1))[0]
            # img_lr_x4 = cv2.resize(img_lr, img_hr.shape[:2], interpolation=cv2.INTER_CUBIC)
            # #
            # plt.subplot(1, 3, 1)
            # plt.imshow(img_hr)
            # plt.subplot(1, 3, 2)
            # plt.imshow(img_lr)
            # plt.subplot(1, 3, 3)
            # plt.imshow(img_lr_x4)
            # plt.show()
            running_results['batch_sizes'] += batch_size

            ############################
            # (1) Update D network: maximize D(x)-1-D(G(z))
            ###########################
            # real_img = Variable(target)
            # if torch.cuda.is_available():
            #     real_img = real_img.cuda()
            # z = Variable(data)
            # if torch.cuda.is_available():
            #     z = z.cuda()
            z = data.to(to_device)
            real_img = target.to(to_device)
            fake_img = netG(z)

            netD.zero_grad()
            real_out = netD(real_img).mean()
            fake_out = netD(fake_img).mean()
            d_loss = 1 - real_out + fake_out
            d_loss.backward(retain_graph=True)
            optimizerD.step()

            ############################
            # (2) Update G network: minimize 1-D(G(z)) + Perception Loss + Image Loss + TV Loss
            ###########################
            netG.zero_grad()
            g_loss = generator_criterion(fake_out, fake_img, real_img)
            g_loss.backward()
            optimizerG.step()
            fake_img = netG(z)
            fake_out = netD(fake_img).mean()

            g_loss = generator_criterion(fake_out, fake_img, real_img)
            running_results['g_loss'] += float(g_loss) * batch_size
            d_loss = 1 - real_out + fake_out
            running_results['d_loss'] += float(d_loss) * batch_size
            running_results['d_score'] += float(real_out) * batch_size
            running_results['g_score'] += float(fake_out) * batch_size

            train_bar.set_description(desc='[%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f' % (
                epoch, num_epochs, running_results['d_loss'] / running_results['batch_sizes'],
                running_results['g_loss'] / running_results['batch_sizes'],
                running_results['d_score'] / running_results['batch_sizes'],
                running_results['g_score'] / running_results['batch_sizes']))

        netG.eval()
        #FIXME: seperate function for epoch validation
        with torch.no_grad():
            out_path = 'training_results/SRF_' + str(upscale_factor) + '/'
            if not os.path.exists(out_path):
                os.makedirs(out_path)
            val_bar = tqdm(val_loader)
            valing_results = {'mse': 0, 'ssims': 0, 'psnr': 0, 'ssim': 0, 'batch_sizes': 0}
            val_images = []
            for val_lr, val_hr_restore, val_hr in val_bar:
                batch_size = val_lr.size(0)
                valing_results['batch_sizes'] += batch_size
                # lr = Variable(val_lr, volatile=True)
                # hr = Variable(val_hr, volatile=True)
                # if torch.cuda.is_available():
                #     lr = lr.cuda()
                #     hr = hr.cuda()
                lr = val_lr.to(to_device)
                hr = val_hr.to(to_device)
                sr = netG(lr)

                batch_mse = ((sr - hr) ** 2).mean()
                valing_results['mse'] += float(batch_mse) * batch_size
                batch_ssim = float(pytorch_ssim.ssim(sr, hr)) #.data[0]
                valing_results['ssims'] += batch_ssim * batch_size
                valing_results['psnr'] = 10 * log10(1 / (valing_results['mse'] / valing_results['batch_sizes']))
                valing_results['ssim'] = valing_results['ssims'] / valing_results['batch_sizes']
                val_bar.set_description(
                    desc='[converting LR images to SR images] PSNR: %.4f dB SSIM: %.4f' % (
                        valing_results['psnr'], valing_results['ssim']))

                val_images.extend(
                    [display_transform()(val_hr_restore.squeeze(0)), display_transform()(hr.data.cpu().squeeze(0)),
                     display_transform()(sr.data.cpu().squeeze(0))])
            val_images = torch.stack(val_images)
            val_images = torch.chunk(val_images, val_images.size(0) // 15)
            val_save_bar = tqdm(val_images, desc='[saving training results]')
            index = 1
            for image in val_save_bar:
                image = utils.make_grid(image, nrow=3, padding=5)
                utils.save_image(image, out_path + 'epoch_%d_index_%d.png' % (epoch, index), padding=5)
                index += 1

        # save model parameters
        torch.save(netG.state_dict(), 'epochs/netG_epoch_%d_%d.pth' % (upscale_factor, epoch))
        torch.save(netD.state_dict(), 'epochs/netD_epoch_%d_%d.pth' % (upscale_factor, epoch))
        # save loss\scores\psnr\ssim
        results['d_loss'].append(running_results['d_loss'] / running_results['batch_sizes'])
        results['g_loss'].append(running_results['g_loss'] / running_results['batch_sizes'])
        results['d_score'].append(running_results['d_score'] / running_results['batch_sizes'])
        results['g_score'].append(running_results['g_score'] / running_results['batch_sizes'])
        results['psnr'].append(valing_results['psnr'])
        results['ssim'].append(valing_results['ssim'])

        if epoch % 10 == 0 and epoch != 0:
            out_path = 'statistics/'
            data_frame = pd.DataFrame(
                data={'Loss_D': results['d_loss'], 'Loss_G': results['g_loss'], 'Score_D': results['d_score'],
                      'Score_G': results['g_score'], 'PSNR': results['psnr'], 'SSIM': results['ssim']},
                index=range(1, epoch + 1))
            data_frame.to_csv(out_path + 'srf_' + str(upscale_factor) + '_train_results.csv', index_label='Epoch')
コード例 #27
0
    z_loss = torch.mean(torch.sum((z.view(-1, 512 * 8 * 8)**2), (1)))

    return BBCE + z_term * 1 / z_loss


pay = 0
train_loss = 0
valid_loss = 0
valid_loss_list, train_loss_list = [], []
for epoch in range(max_epochs):

    train_loss = 0
    valid_loss = 0
    #opt_enc=exp_lr_scheduler(opt_enc, epoch, lr_decay=0.1, lr_decay_epoch=20)
    for data in train_loader:
        batch_size = data.size()[0]

        #print (data.size())
        datav = Variable(data).cuda()
        #datav[l2,:,row2:row2+5,:]=0
        #model_children = list(G.children())
        regularize_loss = G.sparse_loss()
        reg_weight = 0.01
        rec_enc, z = G(datav)
        beta_err = beta_loss_function(rec_enc, datav, beta, z)
        err_enc = beta_err + regularize_loss * reg_weight
        opt_enc.zero_grad()
        err_enc.backward()
        opt_enc.step()
        train_loss += beta_err.item()
    train_loss /= len(train_loader.dataset)
コード例 #28
0
    def run(self, epochs):
        print("nwords=%r, ntags=%r " % (nwords, ntags))
        print("begin training...")

        if torch.cuda.is_available():
            self.model = self.model.cuda()
            self.loss_fn = self.loss_fn.cuda()

        train_dataset = MyDataSet(train_file, train_labels)
        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=90,
                                                   collate_fn=custom_collate,
                                                   shuffle=True,
                                                   num_workers=1,
                                                   pin_memory=True)

        dev_dataset = MyDataSet(dev_file, dev_labels)
        dev_loader = torch.utils.data.DataLoader(dev_dataset,
                                                 batch_size=1,
                                                 collate_fn=custom_collate,
                                                 shuffle=True,
                                                 num_workers=1,
                                                 pin_memory=True)

        self.metrics = []
        for e in range(n_epochs):
            model.train()
            losses = []
            if self.stop_cond():
                return
            epoch_loss = 0
            count = 0
            torch.manual_seed(3000)
            for batch_idx, (data, label, seq_len,
                            mask) in enumerate(train_loader):
                print(data.size(), label.size())
                count = count + 90
                self.optimizer.zero_grad()
                if torch.cuda.is_available():
                    data = data.cuda()
                    label = label.cuda()
                    mask = mask.cuda()
                X = Variable(data).long()
                Y = Variable(label).float()
                mask = Variable(mask)
                indices = torch.nonzero(mask.view(-1))

                out, ht = self.model(X, seq_len)
                #loss = self.loss_fn(torch.index_select(out.view(-1, out.size()[2]), 0, indices.squeeze(1)), torch.index_select(Y.view(-1),0,indices.squeeze(1)))
                loss = self.loss_fn(out, Y.transpose(0, 1))
                loss.backward()
                nn.utils.clip_grad_norm(self.model.parameters(), 0.25)
                self.optimizer.step()
                epoch_loss += loss[0].data.cpu()

                if batch_idx % 100 == 0:
                    print(loss[0].data.cpu().numpy()[0])
                #tensor_logger.model_param_histo_summary(model, (e * 32) + batch_idx)

            if e % 2 == 0:
                adjust_learning_rate(optimizer, e + 1)
            total_loss = epoch_loss / count
            val_loss = inference(self.model, dev_loader)
            print("Epoch : ", e + 1)
            print("Val loss: ", val_loss.cpu().numpy()[0])
            print("Total loss: ", total_loss.cpu().numpy()[0])
            self.save_model('./offensive-language.pt')
コード例 #29
0
def run(modelcheckpoint, normalizeData, simfile):
    """
    """

    model = wresnet34x2().cpu()

    if os.path.isfile(modelcheckpoint):
        print("=> Loading checkpoint '{}'".format(modelcheckpoint))
        checkpoint = torch.load(modelcheckpoint,
                                map_location=lambda storage, loc: storage)
        best_acc = checkpoint['best_acc']
        print("This model had an accuracy of %.2f on the validation set." %
              (best_acc, ))
        keys = checkpoint['state_dict'].keys()
        for old_key in keys:
            new_key = old_key.replace('module.', '')
            checkpoint['state_dict'][new_key] = checkpoint['state_dict'].pop(
                old_key)
        model.load_state_dict(checkpoint['state_dict'])
        print("=> Loaded checkpoint '{}' (epoch {})".format(
            modelcheckpoint, checkpoint['epoch']))
    else:
        print("=> No model checkpoint found. Exiting")
        return None

    cudnn.benchmark = False

    # Load the Normalizer function
    h = h5py.File(normalizeData, 'r')
    mean = torch.FloatTensor(h['mean'][:])
    mean = mean.permute(2, 0, 1)
    std_dev = torch.FloatTensor(h['std_dev'][:])
    std_dev = std_dev.permute(2, 0, 1)
    h.close()
    normalize = transforms.Normalize(mean=mean, std=std_dev)

    # Load simulation data
    time_freq_resolution = (384, 512)
    aca = ibmseti.compamp.SimCompamp(open(simfile, 'rb').read())
    complex_data = aca.complex_data()
    complex_data = complex_data.reshape(time_freq_resolution[0],
                                        time_freq_resolution[1])
    complex_data = complex_data * np.hanning(complex_data.shape[1])
    cpfft = np.fft.fftshift(np.fft.fft(complex_data), 1)
    spectrogram = np.abs(cpfft)
    features = np.stack(
        (np.log(spectrogram**2), np.arctan(cpfft.imag / cpfft.real)), -1)

    # create FloatTensor, permute to proper dimensional order, and normalize
    data = torch.FloatTensor(features)
    data = data.permute(2, 0, 1)
    data = normalize(data)

    # The model expects a 4D tensor
    s = data.size()
    data = data.contiguous().view(1, s[0], s[1], s[2])

    input_var = torch.autograd.Variable(data, volatile=True)

    model.eval()

    softmax = torch.nn.Softmax()
    softmax.zero_grad()
    output = model(input_var)
    probs = softmax(output).data.view(7).tolist()

    return probs
コード例 #30
0
def train(epoch, model, optimizer, scheduler, criterion, train_loader,
          run_config, writer):
    global global_step

    logger.info('Train {}'.format(epoch))

    model.train()

    loss_meter = AverageMeter()
    accuracy_meter = AverageMeter()
    start = time.time()
    for step, (data, targets) in enumerate(train_loader):
        global_step += 1

        if run_config['tensorboard'] and step == 0:
            image = torchvision.utils.make_grid(data,
                                                normalize=True,
                                                scale_each=True)
            writer.add_image('Train/Image', image, epoch)

        scheduler.step()
        if run_config['tensorboard']:
            writer.add_scalar('Train/LearningRate',
                              scheduler.get_lr()[0], global_step)

        data = data.cuda()
        targets = targets.cuda()

        optimizer.zero_grad()

        outputs, outputsmin = model(data)
        loss = 0.5 * criterion(outputs, targets) + 0.5 * criterion(
            outputsmin, targets)
        loss.backward()

        optimizer.step()

        _, preds = torch.max(0.5 * outputs + 0.5 * outputsmin, dim=1)

        loss_ = loss.item()
        correct_ = preds.eq(targets).sum().item()
        num = data.size(0)

        accuracy = correct_ / num

        loss_meter.update(loss_, num)
        accuracy_meter.update(accuracy, num)

        if run_config['tensorboard']:
            writer.add_scalar('Train/RunningLoss', loss_, global_step)
            writer.add_scalar('Train/RunningAccuracy', accuracy, global_step)

        if step % 100 == 0:
            logger.info('Epoch {} Step {}/{} '
                        'Loss {:.4f} ({:.4f}) '
                        'Accuracy {:.4f} ({:.4f})'.format(
                            epoch,
                            step,
                            len(train_loader),
                            loss_meter.val,
                            loss_meter.avg,
                            accuracy_meter.val,
                            accuracy_meter.avg,
                        ))

    elapsed = time.time() - start
    logger.info('Elapsed {:.2f}'.format(elapsed))

    if run_config['tensorboard']:
        writer.add_scalar('Train/Loss', loss_meter.avg, epoch)
        writer.add_scalar('Train/Accuracy', accuracy_meter.avg, epoch)
        writer.add_scalar('Train/Time', elapsed, epoch)
    def __getitem__(self, index):
        if self.training:
            index_ratio = int(self.ratio_index[index])
        else:
            index_ratio = index

        # get the anchor index for current sample index
        # here we set the anchor index to the last one
        # sample in this group
        minibatch_db = [self._roidb[index_ratio]]
        blobs = get_minibatch(minibatch_db, self._num_classes, self.RGB,
                              self.NIR, self.DEPTH)
        data = torch.from_numpy(blobs['data'])
        im_info = torch.from_numpy(blobs['im_info'])
        # we need to random shuffle the bounding box.
        data_height, data_width = data.size(1), data.size(2)
        if self.training:
            np.random.shuffle(blobs['gt_boxes'])
            gt_boxes = torch.from_numpy(blobs['gt_boxes'])

            # if the image need to crop, crop to the target size.
            ratio = self.ratio_list_batch[index]

            if self._roidb[index_ratio]['need_crop']:
                if ratio < 1:
                    # this means that data_width << data_height, we need to crop the
                    # data_height
                    min_y = int(torch.min(gt_boxes[:, 1]))
                    max_y = int(torch.max(gt_boxes[:, 3]))
                    trim_size = int(np.floor(data_width / ratio))
                    if trim_size > data_height:
                        trim_size = data_height
                    box_region = max_y - min_y + 1
                    if min_y == 0:
                        y_s = 0
                    else:
                        if (box_region - trim_size) < 0:
                            y_s_min = max(max_y - trim_size, 0)
                            y_s_max = min(min_y, data_height - trim_size)
                            if y_s_min == y_s_max:
                                y_s = y_s_min
                            else:
                                y_s = np.random.choice(range(y_s_min, y_s_max))
                        else:
                            y_s_add = int((box_region - trim_size) / 2)
                            if y_s_add == 0:
                                y_s = min_y
                            else:
                                y_s = np.random.choice(
                                    range(min_y, min_y + y_s_add))
                    # crop the image
                    data = data[:, y_s:(y_s + trim_size), :, :]

                    # shift y coordiante of gt_boxes
                    gt_boxes[:, 1] = gt_boxes[:, 1] - float(y_s)
                    gt_boxes[:, 3] = gt_boxes[:, 3] - float(y_s)

                    # update gt bounding box according the trip
                    gt_boxes[:, 1].clamp_(0, trim_size - 1)
                    gt_boxes[:, 3].clamp_(0, trim_size - 1)

                else:
                    # this means that data_width >> data_height, we need to crop the
                    # data_width
                    min_x = int(torch.min(gt_boxes[:, 0]))
                    max_x = int(torch.max(gt_boxes[:, 2]))
                    trim_size = int(np.ceil(data_height * ratio))
                    if trim_size > data_width:
                        trim_size = data_width
                    box_region = max_x - min_x + 1
                    if min_x == 0:
                        x_s = 0
                    else:
                        if (box_region - trim_size) < 0:
                            x_s_min = max(max_x - trim_size, 0)
                            x_s_max = min(min_x, data_width - trim_size)
                            if x_s_min == x_s_max:
                                x_s = x_s_min
                            else:
                                x_s = np.random.choice(range(x_s_min, x_s_max))
                        else:
                            x_s_add = int((box_region - trim_size) / 2)
                            if x_s_add == 0:
                                x_s = min_x
                            else:
                                x_s = np.random.choice(
                                    range(min_x, min_x + x_s_add))
                    # crop the image
                    data = data[:, :, x_s:(x_s + trim_size), :]

                    # shift x coordiante of gt_boxes
                    gt_boxes[:, 0] = gt_boxes[:, 0] - float(x_s)
                    gt_boxes[:, 2] = gt_boxes[:, 2] - float(x_s)
                    # update gt bounding box according the trip
                    gt_boxes[:, 0].clamp_(0, trim_size - 1)
                    gt_boxes[:, 2].clamp_(0, trim_size - 1)

            # based on the ratio, padding the image.
            if ratio < 1:
                # this means that data_width < data_height
                trim_size = int(np.floor(data_width / ratio))
                if self.RGB & self.NIR & self.DEPTH:
                    padding_data = torch.FloatTensor(int(np.ceil(data_width / ratio)), \
                                                     data_width, 5).zero_()
                elif (self.RGB & self.NIR) | (self.RGB & self.DEPTH) | (
                        self.NIR & self.DEPTH):
                    padding_data = torch.FloatTensor(int(np.ceil(data_width / ratio)), \
                                                     data_width, 4).zero_()
                else:
                    padding_data = torch.FloatTensor(int(np.ceil(data_width / ratio)), \
                                                     data_width, 3).zero_()
                padding_data[:data_height, :, :] = data[0]
                # update im_info
                im_info[0, 0] = padding_data.size(0)
            elif ratio > 1:
                # this means that data_width > data_height
                # if the image need to crop.
                if self.RGB & self.NIR & self.DEPTH:
                    padding_data = torch.FloatTensor(data_height, \
                                                     int(np.ceil(data_height * ratio)), 5).zero_()
                elif (self.RGB & self.NIR) | (self.RGB & self.DEPTH) | (
                        self.NIR & self.DEPTH):
                    padding_data = torch.FloatTensor(data_height, \
                                                     int(np.ceil(data_height * ratio)), 4).zero_()
                else:
                    padding_data = torch.FloatTensor(data_height, \
                                                     int(np.ceil(data_height * ratio)), 3).zero_()

                padding_data[:, :data_width, :] = data[0]
                im_info[0, 1] = padding_data.size(1)
            else:
                trim_size = min(data_height, data_width)
                if self.RGB & self.NIR & self.DEPTH:
                    padding_data = torch.FloatTensor(trim_size, trim_size,
                                                     5).zero_()
                elif (self.RGB & self.NIR) | (self.RGB & self.DEPTH) | (
                        self.NIR & self.DEPTH):
                    padding_data = torch.FloatTensor(trim_size, trim_size,
                                                     4).zero_()
                else:
                    padding_data = torch.FloatTensor(trim_size, trim_size,
                                                     3).zero_()

                padding_data = data[0][:trim_size, :trim_size, :]
                # gt_boxes.clamp_(0, trim_size)
                gt_boxes[:, :4].clamp_(0, trim_size)
                im_info[0, 0] = trim_size
                im_info[0, 1] = trim_size

            # check the bounding box:
            not_keep = (gt_boxes[:, 0] == gt_boxes[:, 2]) | (gt_boxes[:, 1]
                                                             == gt_boxes[:, 3])
            keep = torch.nonzero(not_keep == 0).view(-1)

            gt_boxes_padding = torch.FloatTensor(self.max_num_box,
                                                 gt_boxes.size(1)).zero_()
            if keep.numel() != 0:
                gt_boxes = gt_boxes[keep]
                num_boxes = min(gt_boxes.size(0), self.max_num_box)
                gt_boxes_padding[:num_boxes, :] = gt_boxes[:num_boxes]
            else:
                num_boxes = 0

                # permute trim_data to adapt to downstream processing
            padding_data = padding_data.permute(2, 0, 1).contiguous()
            im_info = im_info.view(3)

            return padding_data, im_info, gt_boxes_padding, num_boxes
        else:
            data = data.permute(0, 3, 1,
                                2).contiguous().view(data.shape[3],
                                                     data_height, data_width)
            im_info = im_info.view(3)

            gt_boxes = torch.FloatTensor([1, 1, 1, 1, 1])
            num_boxes = 0

            return data, im_info, gt_boxes, num_boxes
コード例 #32
0
last_epoch = 0
if args.checkpoint:
    resume(args.checkpoint)
    last_epoch = args.checkpoint
    scheduler.last_epoch = last_epoch - 1

for epoch in range(last_epoch + 1, args.max_epochs + 1):

    scheduler.step()

    for batch, data in enumerate(train_loader):
        batch_t0 = time.time()

        ## init lstm state
        encoder_h_1 = (Variable(torch.zeros(data.size(0), 256, 8, 8).cuda()),
                       Variable(torch.zeros(data.size(0), 256, 8, 8).cuda()))
        encoder_h_2 = (Variable(torch.zeros(data.size(0), 512, 4, 4).cuda()),
                       Variable(torch.zeros(data.size(0), 512, 4, 4).cuda()))
        encoder_h_3 = (Variable(torch.zeros(data.size(0), 512, 2, 2).cuda()),
                       Variable(torch.zeros(data.size(0), 512, 2, 2).cuda()))

        decoder_h_1 = (Variable(torch.zeros(data.size(0), 512, 2, 2).cuda()),
                       Variable(torch.zeros(data.size(0), 512, 2, 2).cuda()))
        decoder_h_2 = (Variable(torch.zeros(data.size(0), 512, 4, 4).cuda()),
                       Variable(torch.zeros(data.size(0), 512, 4, 4).cuda()))
        decoder_h_3 = (Variable(torch.zeros(data.size(0), 256, 8, 8).cuda()),
                       Variable(torch.zeros(data.size(0), 256, 8, 8).cuda()))
        decoder_h_4 = (Variable(torch.zeros(data.size(0), 128, 16, 16).cuda()),
                       Variable(torch.zeros(data.size(0), 128, 16, 16).cuda()))
コード例 #33
0
 dataset = dset.MNIST(root='./data',
                      train=True,
                      transform=transforms.Compose([transforms.ToTensor()]),
                      download=True)
 n_channel = 1
 dataloader = torch.utils.data.DataLoader(dataset,
                                          batch_size=batchSize,
                                          shuffle=True)
 print("=====> 构建VAE")
 vae = VAE().to(device)
 vae.load_state_dict(torch.load('./VAE-GAN-VAE_epoch5.pth'))
 pos = []
 label = []
 for epoch in range(nepoch):
     for i, (data, lab) in enumerate(dataloader, 0):
         num_img = data.size(0)
         data = data.view(num_img, 1, 28, 28).to(device)  # 将图片展开为28*28=784
         x, mean, logstd = vae(data)  # 将真实图片放入判别器中
         pos.append(mean)
         label.append(lab)
         if (i == 100):
             break
 pos = torch.cat(pos)
 label = torch.cat(label)
 print(pos.shape)
 print(label.shape)
 for i in range(10):
     plt.scatter(pos[label == i][:, 0].detach().numpy(),
                 pos[label == i][:, 1].detach().numpy(),
                 alpha=0.5,
                 label=i)
コード例 #34
0
def train(train_loader,
          model,
          criterion,
          optimizer,
          epoch,
          method,
          method_flag=False):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    s_e = sparse_coefficent(epoch, args)
    method.sparse_coefficent_value(s_e, args.stsr)
    for i, (data, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        if args.cuda:
            data, target = data.cuda(), target.cuda()

        # compute output
        output = model(data)
        loss = criterion(output, target)

        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.item(), data.size(0))
        top1.update(prec1[0], data.size(0))
        top5.update(prec5[0], data.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # if i % 5 == 0:
        if method_flag:
            method.model = model
            method.model_weight_update()
            model = method.model
        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print_log(
                'Epoch: [{0}][{1}/{2}]\t'
                'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\t'
                'lambda: {s_e:.5f}'.format(epoch,
                                           i,
                                           len(train_loader),
                                           batch_time=batch_time,
                                           data_time=data_time,
                                           loss=losses,
                                           top1=top1,
                                           top5=top5,
                                           s_e=s_e), log)
    # method.print_index()

    return losses.avg
コード例 #35
0
def test(epoch, model, criterion, test_loader, run_config, writer):
    logger.info('Test {}'.format(epoch))

    model.eval()

    loss_meter = AverageMeter()
    correct_meter = AverageMeter()
    correctmax_meter = AverageMeter()
    correctmin_meter = AverageMeter()
    start = time.time()
    for step, (data, targets) in enumerate(test_loader):
        if run_config['tensorboard'] and epoch == 0 and step == 0:
            image = torchvision.utils.make_grid(data,
                                                normalize=True,
                                                scale_each=True)
            writer.add_image('Test/Image', image, epoch)

        data = data.cuda()
        targets = targets.cuda()

        with torch.no_grad():
            outputs, outputsmin = model(data)
        loss = 0.5 * criterion(outputs, targets) + 0.5 * criterion(
            outputsmin, targets)

        _, preds = torch.max(0.5 * outputs + 0.5 * outputsmin, dim=1)
        _, predsmax = torch.max(outputs, dim=1)
        _, predsmin = torch.max(outputsmin, dim=1)

        loss_ = loss.item()
        correct_ = preds.eq(targets).sum().item()
        correctmax_ = predsmax.eq(targets).sum().item()
        correctmin_ = predsmin.eq(targets).sum().item()
        num = data.size(0)

        loss_meter.update(loss_, num)
        correct_meter.update(correct_, 1)
        correctmax_meter.update(correctmax_, 1)
        correctmin_meter.update(correctmin_, 1)

    accuracy = correct_meter.sum / len(test_loader.dataset)
    accuracymax = correctmax_meter.sum / len(test_loader.dataset)
    accuracymin = correctmin_meter.sum / len(test_loader.dataset)

    logger.info(
        'Epoch {} Loss {:.4f} Accuracy {:.4f} AccuracyMax {:.4f} AccuracyMin {:.4f}'
        .format(epoch, loss_meter.avg, accuracy, accuracymax, accuracymin))

    elapsed = time.time() - start
    logger.info('Elapsed {:.2f}'.format(elapsed))

    if run_config['tensorboard']:
        if epoch > 0:
            writer.add_scalar('Test/Loss', loss_meter.avg, epoch)
        writer.add_scalar('Test/Accuracy', accuracy, epoch)
        writer.add_scalar('Test/AccuracyMax', accuracymax, epoch)
        writer.add_scalar('Test/AccuracyMin', accuracymin, epoch)
        writer.add_scalar('Test/Time', elapsed, epoch)

        for name, param in model.named_parameters():
            writer.add_histogram(name, param, global_step)

    return accuracy, accuracymax, accuracymin
コード例 #36
0
ファイル: DataLoaderRandom.py プロジェクト: huyaoyu/NewStereo
            % (a[0], np.random.rand(1), torch.randn(1), os.getpid()))

        return a

    def show(self):
        for d in self.data:
            print(d)


if __name__ == "__main__":
    print("Test the random functions with dataloader.")

    # Create the dataset.
    dataset = RandomDataFolder(8)

    print("The original data: ")
    dataset.show()
    print("")

    # Create the dataloader.
    dataloader = data.DataLoader( dataset, \
        batch_size=2, shuffle=False, num_workers=2, drop_last=False )

    # import ipdb; ipdb.set_trace()

    # Test.
    print("The actual loaded data.")
    for batchIdx, (data) in enumerate(dataloader):
        for i in range(data.size()[0]):
            print("batchIdx = %d, data = %f. " % (batchIdx, data[i, 0]))
コード例 #37
0
  def __getitem__(self, index):
    if self.training:
        index_ratio = int(self.ratio_index[index])
    else:
        index_ratio = index

    # get the anchor index for current sample index
    # here we set the anchor index to the last one
    # sample in this group
    minibatch_db = [self._roidb[index_ratio]]
    blobs = get_minibatch(minibatch_db, self._num_classes)
    data = torch.from_numpy(blobs['data'])
    im_info = torch.from_numpy(blobs['im_info'])
    # we need to random shuffle the bounding box.
    data_height, data_width = data.size(1), data.size(2)
    if self.training:
        np.random.shuffle(blobs['gt_boxes'])
        gt_boxes = torch.from_numpy(blobs['gt_boxes'])

        ########################################################
        # padding the input image to fixed size for each group #
        ########################################################

        # NOTE1: need to cope with the case where a group cover both conditions. (done)
        # NOTE2: need to consider the situation for the tail samples. (no worry)
        # NOTE3: need to implement a parallel data loader. (no worry)
        # get the index range

        # if the image need to crop, crop to the target size.
        ratio = self.ratio_list_batch[index]

        if self._roidb[index_ratio]['need_crop']:
            if ratio < 1:
                # this means that data_width << data_height, we need to crop the
                # data_height
                min_y = int(torch.min(gt_boxes[:,1]))
                max_y = int(torch.max(gt_boxes[:,3]))
                trim_size = int(np.floor(data_width / ratio))
                if trim_size > data_height:
                    trim_size = data_height                
                box_region = max_y - min_y + 1
                if min_y == 0:
                    y_s = 0
                else:
                    if (box_region-trim_size) < 0:
                        y_s_min = max(max_y-trim_size, 0)
                        y_s_max = min(min_y, data_height-trim_size)
                        if y_s_min == y_s_max:
                            y_s = y_s_min
                        else:
                            y_s = np.random.choice(range(y_s_min, y_s_max))
                    else:
                        y_s_add = int((box_region-trim_size)/2)
                        if y_s_add == 0:
                            y_s = min_y
                        else:
                            y_s = np.random.choice(range(min_y, min_y+y_s_add))
                # crop the image
                data = data[:, y_s:(y_s + trim_size), :, :]

                # shift y coordiante of gt_boxes
                gt_boxes[:, 1] = gt_boxes[:, 1] - float(y_s)
                gt_boxes[:, 3] = gt_boxes[:, 3] - float(y_s)

                # update gt bounding box according the trip
                gt_boxes[:, 1].clamp_(0, trim_size - 1)
                gt_boxes[:, 3].clamp_(0, trim_size - 1)

            else:
                # this means that data_width >> data_height, we need to crop the
                # data_width
                min_x = int(torch.min(gt_boxes[:,0]))
                max_x = int(torch.max(gt_boxes[:,2]))
                trim_size = int(np.ceil(data_height * ratio))
                if trim_size > data_width:
                    trim_size = data_width                
                box_region = max_x - min_x + 1
                if min_x == 0:
                    x_s = 0
                else:
                    if (box_region-trim_size) < 0:
                        x_s_min = max(max_x-trim_size, 0)
                        x_s_max = min(min_x, data_width-trim_size)
                        if x_s_min == x_s_max:
                            x_s = x_s_min
                        else:
                            x_s = np.random.choice(range(x_s_min, x_s_max))
                    else:
                        x_s_add = int((box_region-trim_size)/2)
                        if x_s_add == 0:
                            x_s = min_x
                        else:
                            x_s = np.random.choice(range(min_x, min_x+x_s_add))
                # crop the image
                data = data[:, :, x_s:(x_s + trim_size), :]

                # shift x coordiante of gt_boxes
                gt_boxes[:, 0] = gt_boxes[:, 0] - float(x_s)
                gt_boxes[:, 2] = gt_boxes[:, 2] - float(x_s)
                # update gt bounding box according the trip
                gt_boxes[:, 0].clamp_(0, trim_size - 1)
                gt_boxes[:, 2].clamp_(0, trim_size - 1)

        # based on the ratio, padding the image.
        if ratio < 1:
            # this means that data_width < data_height
            trim_size = int(np.floor(data_width / ratio))

            padding_data = torch.FloatTensor(int(np.ceil(data_width / ratio)), \
                                             data_width, 3).zero_()

            padding_data[:data_height, :, :] = data[0]
            # update im_info
            im_info[0, 0] = padding_data.size(0)
            # print("height %d %d \n" %(index, anchor_idx))
        elif ratio > 1:
            # this means that data_width > data_height
            # if the image need to crop.
            padding_data = torch.FloatTensor(data_height, \
                                             int(np.ceil(data_height * ratio)), 3).zero_()
            padding_data[:, :data_width, :] = data[0]
            im_info[0, 1] = padding_data.size(1)
        else:
            trim_size = min(data_height, data_width)
            padding_data = torch.FloatTensor(trim_size, trim_size, 3).zero_()
            padding_data = data[0][:trim_size, :trim_size, :]
            # gt_boxes.clamp_(0, trim_size)
            gt_boxes[:, :4].clamp_(0, trim_size)
            im_info[0, 0] = trim_size
            im_info[0, 1] = trim_size


        # check the bounding box:
        not_keep = (gt_boxes[:,0] == gt_boxes[:,2]) | (gt_boxes[:,1] == gt_boxes[:,3])
        keep = torch.nonzero(not_keep == 0).view(-1)

        gt_boxes_padding = torch.FloatTensor(self.max_num_box, gt_boxes.size(1)).zero_()
        if keep.numel() != 0:
            gt_boxes = gt_boxes[keep]
            num_boxes = min(gt_boxes.size(0), self.max_num_box)
            gt_boxes_padding[:num_boxes,:] = gt_boxes[:num_boxes]
        else:
            num_boxes = 0

            # permute trim_data to adapt to downstream processing
        padding_data = padding_data.permute(2, 0, 1).contiguous()
        im_info = im_info.view(3)

        return padding_data, im_info, gt_boxes_padding, num_boxes
    else:
        data = data.permute(0, 3, 1, 2).contiguous().view(3, data_height, data_width)
        im_info = im_info.view(3)

        gt_boxes = torch.FloatTensor([1,1,1,1,1])
        num_boxes = 0

        return data, im_info, gt_boxes, num_boxes
コード例 #38
0
def train_model(whichmodel, useMidTest=True, withDwt=True, oldData=False):
    max_acc = 0
    cls = [0, 1, 2]
    add_c = "_mid_" if useMidTest else "_end_"
    add_d = "_dwt_" if withDwt else "_1ch_"
    add_o = "_old_" if oldData else "_new_"
    add_c += add_d + add_o + "_190728_"
    save_pos = whichmodel + add_c + "_save.pt"
    best_save_pos = whichmodel + add_c + "_best.pt"
    log_pos = whichmodel + add_c + '_logdata.log'
    log = open(log_pos, 'w')
    if oldData:
        filenames = [
            "lzc/olddata/" + ("m" if withDwt else "big") + str(cls[i]) +
            ('_dwt300.csv' if withDwt else ".txt") for i in range(len(cls))
        ]
    else:
        filenames = [
            "lzc/" + str(cls[i]) + ('m_dwt300.csv' if withDwt else "m.csv")
            for i in range(len(cls))
        ]
    print(filenames)
    dataset = torch.utils.data.ConcatDataset([
        PlantDataset(filenames[i],
                     cls[i],
                     useMid=useMidTest,
                     dwt=withDwt,
                     old_data=oldData) for i in range(len(cls))
    ])

    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=BATCH_SIZE,
                                             shuffle=True,
                                             num_workers=4)
    test_dataloader = torch.utils.data.DataLoader(
        torch.utils.data.ConcatDataset([
            PlantDataset(filenames[i],
                         cls[i],
                         is_test=True,
                         useMid=useMidTest,
                         dwt=withDwt,
                         old_data=oldData) for i in range(len(cls))
        ]),
        batch_size=BATCH_SIZE,
        shuffle=False,
        num_workers=4)
    epoch = EPOCH
    model = get_model(whichmodel)
    optimizer = optim.Adam(model.parameters(), 0.0001)
    print(model)
    model = model.cuda()

    loss_func = nn.CrossEntropyLoss()
    # loss_func =nn.NLLLoss()
    for i in range(epoch):
        total_loss = torch.tensor(0.).cuda()
        total_test_loss = torch.tensor(0.).cuda()
        n_x = 0
        n_y = 0
        for i_batch, sample_batched in enumerate(dataloader):
            data = sample_batched['data'].cuda()
            target = sample_batched['target'].cuda()
            optimizer.zero_grad()
            data = data.transpose(1, 2)
            pred = model(data, data.size(0))
            loss = loss_func(pred, target)
            acc, sz, eq = output_acc(pred, target)
            n_x += 1
            total_loss += loss
            loss.backward()
            print(i_batch, loss, acc, eq, sz)
            optimizer.step()
        sz_now = 0
        eq_now = 0
        for i_batch, now in enumerate(test_dataloader):
            with torch.no_grad():
                data = now['data'].cuda()
                data = data.transpose(1, 2)
                target = now['target'].cuda()
                pred = model(data, data.size(0))
                acc, sz, eq = output_acc(pred, target, True)
                print(whichmodel, add_c, "Epoch ", i, acc, sz, eq, "max=",
                      max_acc)
                sz_now += sz
                eq_now += eq
                n_y += 1
                total_test_loss += F.cross_entropy(pred, target)
        if max_acc < (eq_now / sz_now):
            max_acc = eq_now / sz_now
            torch.save(model.state_dict(), best_save_pos)
        print(i,
              total_loss.item() / n_x,
              total_test_loss.item() / n_y, (eq_now / sz_now),
              max_acc,
              file=log)
        log.flush()
    torch.save(model.state_dict(), save_pos)
    log.close()