コード例 #1
0
def test(testloader, model, criterion, epoch, use_cuda):
    global best_acc

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    count = 0
    model.eval()

    end = time.time()
    csv_list = glob.glob("./dataset3/CUB-200-2011_rename_fixations/*/*_fixtaions.csv")
    #csv_list = glob.glob("./dataset/CUB-200-2011_rename_fixations/001.Black_footed_Albatross/*_fixtaions.csv")

    end = time.time()
    bar = Bar('Processing', max=len(testloader))
    for batch_idx, (inputs, targets, csv) in enumerate(testloader):
        # measure data loading time
        data_time.update(time.time() - end)
        if use_cuda:
            inputs, targets, csv_path = inputs.to(device=device), targets.to(device=device), csv.to(device=device)
        inputs, targets, csv_name = torch.autograd.Variable(inputs), torch.autograd.Variable(targets), torch.autograd.Variable(csv)
        targets = targets -1
        """
        targets_4 = []
        x_list = []
        x_numpy = targets.to('cpu').detach().numpy().copy()
        for i in range(len(x_numpy)):
            x = x_numpy[i]
            x_list = [x, x, x, x]
            targets_4.extend(x_list)
        arr_targets_4 = np.array(targets_4)
        targets = torch.tensor(arr_targets_4).to(device)
        #import pdb;pdb.set_trace()
        """

        #resize_crops = torch.Tensor(np.zeros((4 ,args.train_batch, 3, 32, 32))).to(device=device)
        #focus = random.randint(1, 5)

        #resize_crops_n = np.zeros((1,4,3,224,224))
        con = []

        for batch_num,(inputs_batch) in enumerate(inputs):
            x_pil = transforms.functional.to_pil_image(inputs_batch.cpu())
            transform_inputs_img = transforms.Resize(216) #pilとしてresize
            #transform_inputs_img = transforms.Resize(224)
            origin_img = transform_inputs_img(x_pil) #origin_imgはpil
            #origin_array = numpy.asarray(origin_img) #pilをnumpyに変換
            #d = torch.stack((a,a,a,a),0)
            #a = torch.Size([2, 3])のときtorch.Size([4, 2, 3])となる
            origin_tensor = transforms.functional.to_tensor(origin_img) #pilをtensorに変換([3, 216, 216])
            origin_tensor = torch.unsqueeze(origin_tensor,0) #torch.Size([1, 3, 216, 216])
            csv_num = int(csv_path[batch_num]) #ここあってる?
            #csv_pathに変えて

            arr_crop = crop_fixions(origin_img, csv_list[csv_num])#ちゃんと出てくる
            #この時点で正規化はなし
            crop_len = (arr_crop.shape[0])  #リストの何番目か
            x_tensor = arr_crop
            #x_tensor = torch.from_numpy(arr_crop) #torch.Size([crop_pieces_num, 216, 216, 3])
            #x_tensor = x_tensor.permute(0,3,1,2) #torch.Size([2, 3, 216, 216])

            if x_tensor.shape[0] == 2:
                torch_list4 = [origin_tensor, origin_tensor,x_tensor ]
                output4 = torch.cat(torch_list4, dim=0)
                #print(output4.shape)
                #print("2  ###############################" )
            elif x_tensor.shape[0] == 3:
                torch_list4 = [origin_tensor, x_tensor ]
                output4 = torch.cat(torch_list4, dim=0)
                #print(output4.shape)
                #print("3  ###############################" )
            elif x_tensor.shape[0] >= 4: #ここからを>=3にすればそれぞれの個数に対応できる
                torch_list4 = [origin_tensor, x_tensor ]
                output4 = torch.cat(torch_list4, dim=0)
                output4 = output4[:4]
            con.append(output4)
            resize_crops = torch.cat(con, dim = 0)
        resize_crops = resize_crops.to(device)

        #import pdb;pdb.set_trace()

        outputs = model(resize_crops)

        outputs = outputs.to('cpu')
        batch_size_num = 8
        outputs_s = torch.split(outputs, 4 ,dim = 0)
        con_list = []
        for i in range(batch_size_num):
            con_list2 = torch.zeros(1,35)
            for j in outputs_s[i]:
                con_list2 += j
            con_list2 = con_list2 / 4 #(わるサイズはaugmentation数)
            con_list.append(con_list2)
        outputs = torch.cat(con_list).to(device)

        loss = criterion(outputs, targets)
        #print(torch.max(outputs))
        #print(torch.max(targets))

        #print(attention.min(), attention.max())

        # measure accuracy and record loss
        prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
        losses.update(loss.item(), inputs.size(0))
        top1.update(prec1.item(), inputs.size(0))
        top5.update(prec5.item(), inputs.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix  = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
                    batch=batch_idx + 1,
                    size=len(testloader),
                    data=data_time.avg,
                    bt=batch_time.avg,
                    total=bar.elapsed_td,
                    eta=bar.eta_td,
                    loss=losses.avg,
                    top1=top1.avg,
                    top5=top5.avg,
                    )
        bar.next()
    bar.finish()
    return (losses.avg, top1.avg)
コード例 #2
0
def test(testloader, model, criterion, epoch, use_cuda):
    global best_acc

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    count = 0
    model.eval()

    end = time.time()
    csv_list = glob.glob(
        "./dataset/CUB-200-2011_rename_fixations/*/*_fixtaions.csv")
    #csv_list = glob.glob("./dataset/CUB-200-2011_rename_fixations/001.Black_footed_Albatross/*_fixtaions.csv")

    end = time.time()
    bar = Bar('Processing', max=len(testloader))
    for batch_idx, (inputs, targets, csv) in enumerate(testloader):
        # measure data loading time
        data_time.update(time.time() - end)
        if use_cuda:
            inputs, targets, csv_path = inputs.to(device=device), targets.to(
                device=device), csv.to(device=device)
        inputs, targets, csv_name = torch.autograd.Variable(
            inputs), torch.autograd.Variable(targets), torch.autograd.Variable(
                csv)
        targets = targets - 1
        #import pdb;pdb.set_trace()

        #resize_crops = torch.Tensor(np.zeros((4 ,args.train_batch, 3, 32, 32))).to(device=device)
        #focus = random.randint(1, 5)

        #resize_crops_n = np.zeros((1,4,3,224,224))
        con = []

        for batch_num, (inputs_batch) in enumerate(inputs):
            x_numpy = inputs[batch_num].to('cpu').detach().numpy().copy()
            x_numpy = np.transpose(x_numpy, (1, 2, 0))
            x_numpy = (x_numpy * 255).astype(np.uint8)

            img = Image.fromarray(x_numpy)
            origin_img = img.resize((168, 168))
            origin_img = np.asarray(origin_img)
            origin_img = np.asarray(origin_img, np.float32)
            origin_img = origin_img / np.max(origin_img)

            origin_img = origin_img[np.newaxis, :, :, :]

            csv_num = int(csv_path[batch_num])  #ここあってる?

            arr_crop = crop_fixions(img, csv_list[csv_num])
            crop_len = (arr_crop.shape[0])
            arr_crop = np.asarray(arr_crop, np.float32)

            con_numpy = np.concatenate([origin_img, arr_crop], 0)
            if con_numpy.shape[0] == 2:
                con_numpy = np.concatenate([con_numpy, origin_img], 0)
                con_numpy = np.concatenate([con_numpy, origin_img], 0)
            elif con_numpy.shape[0] == 3:
                con_numpy = np.concatenate([con_numpy, origin_img], 0)
            elif con_numpy.shape[0] == 5:
                con_numpy = np.delete(con_numpy, 1, 0)
            elif con_numpy.shape[0] == 4:
                con_numpy = con_numpy
            elif con_numpy.shape[0] == 6:
                con_numpy = np.delete(con_numpy, 1, 0)
                con_numpy = np.delete(con_numpy, 1, 0)
            elif con_numpy.shape[0] == 7:
                con_numpy = np.delete(con_numpy, 1, 0)
                con_numpy = np.delete(con_numpy, 1, 0)
                con_numpy = np.delete(con_numpy, 1, 0)

            con.append(con_numpy)

        con_n = np.array(con)
        #import pdb;pdb.set_trace()
        con_n = con_n.astype(np.float32)
        #import pdb;pdb.set_trace()
        resize_crops = torch.from_numpy(con_n).clone()
        resize_crops = resize_crops.permute(1, 0, 4, 2, 3)
        resize_crops = resize_crops.to(device)
        # compute output
        # compute output
        outputs, attention = model(resize_crops[0], resize_crops[1],
                                   resize_crops[2], resize_crops[3])
        #import pdb;pdb.set_trace()
        #'''
        attention1 = attention[0]
        attention2 = attention[1]
        attention3 = attention[2]
        attention4 = attention[3]

        if save_attention_map == True:
            vis_attention1 = attention1.data.cpu()
            vis_attention1 = vis_attention1.numpy()
            vis_attention2 = attention2.data.cpu()
            vis_attention2 = vis_attention2.numpy()
            vis_attention3 = attention3.data.cpu()
            vis_attention3 = vis_attention3.numpy()
            vis_attention4 = attention4.data.cpu()
            vis_attention4 = vis_attention4.numpy()
            vis_inputs = resize_crops[0].data.cpu()
            vis_inputs = vis_inputs.numpy()
            vis_inputs2 = resize_crops[1].data.cpu()
            vis_inputs2 = vis_inputs2.numpy()
            vis_inputs3 = resize_crops[2].data.cpu()
            vis_inputs3 = vis_inputs3.numpy()
            vis_inputs4 = resize_crops[3].data.cpu()
            vis_inputs4 = vis_inputs4.numpy()

            in_b, in_c, in_y, in_x = vis_inputs.shape
            i = 0
            #import pdb;pdb.set_trace()
            for item_img, item_att in zip(vis_inputs, vis_attention1):
                #import pdb;pdb.set_trace()#(0.1948, 0.2155, 0.1589),(0.2333, 0.2278, 0.26106)) 0.5, 0.5, 0.5
                v_img = ((item_img.transpose(
                    (1, 2, 0)) * [0.5, 0.5, 0.5]) + [0.5, 0.5, 0.5]) * 255
                v_img = v_img[:, :, ::-1]
                v_img2 = ((vis_inputs2[i].transpose(
                    (1, 2, 0)) * [0.5, 0.5, 0.5]) + [0.5, 0.5, 0.5]) * 255
                v_img2 = v_img2[:, :, ::-1]
                v_img3 = ((vis_inputs3[i].transpose(
                    (1, 2, 0)) * [0.5, 0.5, 0.5]) + [0.5, 0.5, 0.5]) * 255
                v_img3 = v_img3[:, :, ::-1]
                v_img4 = ((vis_inputs4[i].transpose(
                    (1, 2, 0)) * [0.5, 0.5, 0.5]) + [0.5, 0.5, 0.5]) * 255
                v_img4 = v_img4[:, :, ::-1]

                resize_att1 = cv2.resize(item_att[0], (in_x, in_y))
                resize_att1 = min_max(resize_att1)
                resize_att1 *= 255.
                #import pdb;pdb.set_trace()

                resize_att2 = cv2.resize(vis_attention2[i, 0, :], (in_x, in_y))
                resize_att2 = min_max(resize_att2)
                resize_att2 *= 255.

                resize_att3 = cv2.resize(vis_attention3[i, 0, :], (in_x, in_y))
                resize_att3 = min_max(resize_att3)
                resize_att3 *= 255.

                resize_att4 = cv2.resize(vis_attention4[i, 0, :], (in_x, in_y))
                resize_att4 = min_max(resize_att4)
                resize_att4 *= 255.

                v_img = np.uint8(v_img)
                v_img2 = np.uint8(v_img2)
                v_img3 = np.uint8(v_img3)
                v_img4 = np.uint8(v_img4)
                vis_map1 = np.uint8(resize_att1)
                jet_map1 = cv2.applyColorMap(vis_map1, cv2.COLORMAP_JET)
                jet_map1 = cv2.addWeighted(v_img, 0.5, jet_map1, 0.5, 0)

                vis_map2 = np.uint8(resize_att2)
                jet_map2 = cv2.applyColorMap(vis_map2, cv2.COLORMAP_JET)
                jet_map2 = cv2.addWeighted(v_img2, 0.5, jet_map2, 0.5, 0)

                vis_map3 = np.uint8(resize_att3)
                jet_map3 = cv2.applyColorMap(vis_map3, cv2.COLORMAP_JET)
                jet_map3 = cv2.addWeighted(v_img3, 0.5, jet_map3, 0.5, 0)

                vis_map4 = np.uint8(resize_att4)
                jet_map4 = cv2.applyColorMap(vis_map4, cv2.COLORMAP_JET)
                jet_map4 = cv2.addWeighted(v_img4, 0.5, jet_map4, 0.5, 0)

                img_concat = np.concatenate([v_img, v_img2, v_img3, v_img4],
                                            axis=1)
                jet_concat = np.concatenate(
                    [jet_map1, jet_map2, jet_map3, jet_map4], axis=1)
                img_jet_concat = np.concatenate([img_concat, jet_concat],
                                                axis=0)

                out_dir = os.path.join('outputs_convlstm_cifar10')
                if not os.path.exists(out_dir):
                    os.mkdir(out_dir)
                    #os.makedirs('./output/attention1')
                    #os.makedirs('./output/attention2')
                    #os.makedirs('./output/attention3')
                    #os.makedirs('./output/attention_con')
                    #os.makedirs('./output/raw')
                #out_path = os.path.join(out_dir, 'attention1', '{0:06d}.png'.format(count))
                #cv2.imwrite(out_path, jet_map1)

                #out_path = os.path.join(out_dir, 'attention2', '{0:06d}.png'.format(count))
                #cv2.imwrite(out_path, jet_map2)

                #out_path = os.path.join(out_dir, 'attention3', '{0:06d}.png'.format(count))
                #cv2.imwrite(out_path, jet_map3)

                out_path = os.path.join(out_dir, 'attention_con',
                                        '{0:06d}.png'.format(count))
                cv2.imwrite(out_path, img_jet_concat)

                #out_path = os.path.join(out_dir, 'raw', '{0:06d}.png'.format(count))
                #cv2.imwrite(out_path, v_img)
                count += 1
                i += 1
            import pdb
            pdb.set_trace()
        #'''

        loss = criterion(outputs, targets)
        #print(torch.max(outputs))
        #print(torch.max(targets))

        #print(attention.min(), attention.max())

        # measure accuracy and record loss
        prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
        losses.update(loss.item(), inputs.size(0))
        top1.update(prec1.item(), inputs.size(0))
        top5.update(prec5.item(), inputs.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
            batch=batch_idx + 1,
            size=len(testloader),
            data=data_time.avg,
            bt=batch_time.avg,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=losses.avg,
            top1=top1.avg,
            top5=top5.avg,
        )
        bar.next()
    bar.finish()
    return (losses.avg, top1.avg)
コード例 #3
0
def train(trainloader, model, criterion, optimizer, epoch, use_cuda):
    # switch to train mode
    model.train()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()

    csv_list = glob.glob("./dataset3/CUB-200-2011_rename_fixations/*/*_fixtaions.csv")
    #csv_list = glob.glob("./dataset/CUB-200-2011_rename_fixations/001.Black_footed_Albatross/*_fixtaions.csv")

    bar = Bar('Processing', max=len(trainloader))
    for batch_idx, (inputs, targets, csv) in enumerate(trainloader):
        # measure data loading time
        data_time.update(time.time() - end)

        if use_cuda:
            inputs, targets, csv_path = inputs.to(device=device), targets.to(device=device), csv.to(device=device)
        inputs, targets, csv_name = torch.autograd.Variable(inputs), torch.autograd.Variable(targets), torch.autograd.Variable(csv)
        targets = targets - 1 #絶対必要
        
        targets_4 = []
        x_list = []
        x_numpy = targets.to('cpu').detach().numpy().copy()
        for i in range(len(x_numpy)):
            x = x_numpy[i]
            x_list = [x, x, x, x]
            targets_4.extend(x_list)
        arr_targets_4 = np.array(targets_4)
        targets = torch.tensor(arr_targets_4).to(device)
    
        #import pdb;pdb.set_trace()
        #import pdb;pdb.set_trace()

        #resize_crops = torch.Tensor(np.zeros((4 ,args.train_batch, 3, 32, 32))).to(device=device)
        #focus = random.randint(1, 5)

        #resize_crops_n = np.zeros((1,4,3,224,224))
        con = []

        for batch_num,(inputs_batch) in enumerate(inputs):
            x_pil = transforms.functional.to_pil_image(inputs_batch.cpu())
            #inputs_batchはtensor
            #x_pilはpilに変換()
            transform_inputs_img = transforms.Resize(216) #pilとしてresize
            #transform_inputs_img = transforms.Resize(224)
            transform_randomverticalflip = transforms.RandomVerticalFlip(p=0.5) #この辺はpilにかけられる
            transform_randomhorizontalflip = transforms.RandomHorizontalFlip(p=0.5)
            #transform_normalize = transforms.Normalize((0.1948, 0.2155, 0.1589),(0.2333, 0.2278, 0.26106))
            origin_img = transform_inputs_img(x_pil) #origin_imgはpil
            #origin_array = numpy.asarray(origin_img) #pilをnumpyに変換
            #d = torch.stack((a,a,a,a),0)
            #a = torch.Size([2, 3])のときtorch.Size([4, 2, 3])となる
            origin_img_a = transform_randomverticalflip(x_pil)
            origin_img_a = transform_randomhorizontalflip(origin_img_a)
            origin_img_a = transform_inputs_img(origin_img_a)
            origin_tensor = transforms.functional.to_tensor(origin_img_a) #pilをtensorに変換([3, 216, 216])
            origin_tensor = torch.unsqueeze(origin_tensor,0) #torch.Size([1, 3, 216, 216])
            csv_num = int(csv_path[batch_num]) #ここあってる?
            
            #csv_pathに変えて
            arr_crop = crop_fixions_aug(origin_img, csv_list[csv_num])#ちゃんと出てくる
            #この時点で正規化はなし
            crop_len = (arr_crop.shape[0])  #リストの何番目か
            x_tensor = arr_crop
            #x_tensor = torch.from_numpy(arr_crop) #torch.Size([crop_pieces_num, 216, 216, 3])
            #x_tensor = x_tensor.permute(0,3,1,2) #torch.Size([2, 3, 216, 216])

            #import pdb;pdb.set_trace()
            if x_tensor.shape[0] == 2:
                torch_list4 = [origin_tensor, origin_tensor,x_tensor ]
                output4 = torch.cat(torch_list4, dim=0)
                #print(output4.shape)
                #print("2  ###############################" )
            elif x_tensor.shape[0] == 3:
                torch_list4 = [origin_tensor, x_tensor ]
                output4 = torch.cat(torch_list4, dim=0)
                #print(output4.shape)
                #print("3  ###############################" )
            elif x_tensor.shape[0] >= 4: #ここからを>=3にすればそれぞれの個数に対応できる
                torch_list4 = [origin_tensor, x_tensor ]
                output4 = torch.cat(torch_list4, dim=0)
                output4 = output4[:4]
                #print(output4.shape) #torch.Size([4, 3, 216, 216])
                #print(">=4  ###############################" )
                #/usr/local/lib/python3.6/dist-packages/sklearn/cluster/_mean_shift.py:231: UserWarning: Binning data failed with provided bin_size=13.829603, using data points as seeds." using data points as seeds." % bin_size)
                #このエラー何だろう
                #output4 = output4.unsqueeze(0)
            con.append(output4)
            resize_crops = torch.cat(con, dim = 0)
            #import pdb;pdb.set_trace()
            #print(resize_crops.shape)
        resize_crops = resize_crops.to(device)
        #import pdb;pdb.set_trace()
        #per_outputs, _ = model(resize_crops)
        #import pdb;pdb.set_trace()
        # compute output
        
        #per_outputs, _ = model(resize_crops[0], resize_crops[1], resize_crops[2], resize_crops[3])
        #import pdb;pdb.set_trace()
        per_outputs = model(resize_crops)

        #import pdb;pdb.set_trace()
        
        loss = criterion(per_outputs, targets)
        #import pdb;pdb.set_trace()
        print("outputs = " + str(torch.argmax(per_outputs)))
        print("targets = " + str(torch.argmax(targets)))
        #これは何?
        #print("targets(380) = " + str(targets))
        #print("csv_path(381) = " + str(csv_path))

        # measure accuracy and record loss
        prec1, prec5 = accuracy(per_outputs.data, targets.data, topk=(1, 5))
        losses.update(loss.item(), inputs.size(0))
        top1.update(prec1.item(), inputs.size(0))
        top5.update(prec5.item(), inputs.size(0))

        #import pdb;pdb.set_trace()

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix  = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
                    batch=batch_idx + 1,
                    size=len(trainloader),
                    data=data_time.avg,
                    bt=batch_time.avg,
                    total=bar.elapsed_td,
                    eta=bar.eta_td,
                    loss=losses.avg,
                    top1=top1.avg,
                    top5=top5.avg,
                    )
        bar.next()
    bar.finish()
    #import pdb;pdb.set_trace()
    return (losses.avg, top1.avg)
コード例 #4
0
def train(trainloader, model, criterion, optimizer, epoch, use_cuda):
    # switch to train mode
    model.train()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()

    csv_list = glob.glob(
        "./dataset/CUB-200-2011_rename_fixations/*/*_fixtaions.csv")
    #csv_list = glob.glob("./dataset/CUB-200-2011_rename_fixations/001.Black_footed_Albatross/*_fixtaions.csv")

    bar = Bar('Processing', max=len(trainloader))
    for batch_idx, (inputs, targets, csv) in enumerate(trainloader):
        # measure data loading time
        data_time.update(time.time() - end)

        if use_cuda:
            inputs, targets, csv_path = inputs.to(device=device), targets.to(
                device=device), csv.to(device=device)
        inputs, targets, csv_name = torch.autograd.Variable(
            inputs), torch.autograd.Variable(targets), torch.autograd.Variable(
                csv)
        targets = targets - 1  #絶対必要
        #import pdb;pdb.set_trace()
        #import pdb;pdb.set_trace()

        #resize_crops = torch.Tensor(np.zeros((4 ,args.train_batch, 3, 32, 32))).to(device=device)
        #focus = random.randint(1, 5)

        #resize_crops_n = np.zeros((1,4,3,224,224))
        con = []

        for batch_num, (inputs_batch) in enumerate(inputs):
            x_numpy = inputs[batch_num].to('cpu').detach().numpy().copy()
            x_numpy = np.transpose(x_numpy, (1, 2, 0))
            x_numpy = (x_numpy * 255).astype(np.uint8)

            img = Image.fromarray(x_numpy)
            origin_img = img.resize((168, 168))
            origin_img = np.asarray(origin_img)
            origin_img = np.asarray(origin_img, np.float32)
            origin_img = origin_img / np.max(origin_img)

            origin_img = origin_img[np.newaxis, :, :, :]

            csv_num = int(csv_path[batch_num])  #ここあってる?

            arr_crop = crop_fixions(img, csv_list[csv_num])
            crop_len = (arr_crop.shape[0])
            arr_crop = np.asarray(arr_crop, np.float32)

            con_numpy = np.concatenate([origin_img, arr_crop], 0)
            if con_numpy.shape[0] == 2:
                con_numpy = np.concatenate([con_numpy, origin_img], 0)
                con_numpy = np.concatenate([con_numpy, origin_img], 0)
            elif con_numpy.shape[0] == 3:
                con_numpy = np.concatenate([con_numpy, origin_img], 0)
            elif con_numpy.shape[0] == 5:
                con_numpy = np.delete(con_numpy, 1, 0)
            elif con_numpy.shape[0] == 4:
                con_numpy = con_numpy
            elif con_numpy.shape[0] == 6:
                con_numpy = np.delete(con_numpy, 1, 0)
                con_numpy = np.delete(con_numpy, 1, 0)
            elif con_numpy.shape[0] == 7:
                con_numpy = np.delete(con_numpy, 1, 0)
                con_numpy = np.delete(con_numpy, 1, 0)
                con_numpy = np.delete(con_numpy, 1, 0)

            con.append(con_numpy)

        con_n = np.array(con)
        #import pdb;pdb.set_trace()
        con_n = con_n.astype(np.float32)
        #import pdb;pdb.set_trace()
        resize_crops = torch.from_numpy(con_n).clone()
        resize_crops = resize_crops.permute(1, 0, 4, 2, 3)
        resize_crops = resize_crops.to(device)

        #import pdb;pdb.set_trace()
        # compute output

        per_outputs, _ = model(resize_crops[0], resize_crops[1],
                               resize_crops[2], resize_crops[3])

        loss = criterion(per_outputs, targets)
        #import pdb;pdb.set_trace()
        print("outputs = " + str(torch.argmax(per_outputs)))
        print("targets = " + str(torch.argmax(targets)))

        # measure accuracy and record loss
        prec1, prec5 = accuracy(per_outputs.data, targets.data, topk=(1, 5))
        losses.update(loss.item(), inputs.size(0))
        top1.update(prec1.item(), inputs.size(0))
        top5.update(prec5.item(), inputs.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
            batch=batch_idx + 1,
            size=len(trainloader),
            data=data_time.avg,
            bt=batch_time.avg,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=losses.avg,
            top1=top1.avg,
            top5=top5.avg,
        )
        bar.next()
    bar.finish()
    #import pdb;pdb.set_trace()
    return (losses.avg, top1.avg)