コード例 #1
0
def noise_tensor(size):
    n = Variable(torch.randn(size, 150))
    n.cpu()
    print(str(n.size()) + " noise size")
    if torch.cuda.is_available():
        return n.cuda()
    return n
コード例 #2
0
def noise_tensor(size):
    n = Variable(torch.randn(size, 100))
    n.cpu()
    if torch.cuda.is_available():
        return n.cuda()
    print(n.size() + "")
    return n
コード例 #3
0
def image_tensor(path):
    img = Image.open(path)
    trans = transforms.ToTensor()
    n = Variable(compose(trans(img)))
    n.cpu()
    if torch.cuda.is_available():
        return n.cuda()
    return n
コード例 #4
0
def fake_data_target(size):
    '''
    Tensor containing zeros, with shape = size
    '''
    print("fake target")
    data = Variable(torch.zeros(size, 1).cuda())
    data.cpu()
    if torch.cuda.is_available(): return data.cuda()
    return data
コード例 #5
0
def real_data_target(size):
    '''
    Tensor containing ones, with shape = size
    '''
    print("real target")
    data = Variable(torch.ones(size, 1).cuda())
    data.cpu()
    if torch.cuda.is_available(): return data.cuda()
    return data
コード例 #6
0
def image_tensor(path, size):
    img = Image.open(path)
    n = Variable(compose(img))
    n.resize_(size, 100)
    n.cpu()
    print(str(n.size()) + " image seed size")
    if torch.cuda.is_available():
        return n.cuda()
    return n
コード例 #7
0
    def __process_predict(val_loader, model, criterion):
        losses = AverageMeter()
        losses_lin = AverageMeter()

        model.eval()

        prediction_list = list()
        act_list = list()

        for local_batch, local_labels in val_loader:
            image_face = (local_batch[0]).to(DEVICE).permute(0, 3, 1,
                                                             2).float().cuda()
            image_left_eye = (local_batch[1]).to(DEVICE).permute(
                0, 3, 1, 2).float().cuda()
            image_right_eye = (local_batch[2]).to(DEVICE).permute(
                0, 3, 1, 2).float().cuda()
            face_grid = (local_batch[3]).to(DEVICE).float().cuda()
            gaze = torch.t(torch.stack(local_labels).to(DEVICE).float()).cuda()
            # image_face = (local_batch[0]).to(DEVICE).permute(0, 3, 1, 2).float()
            # image_left_eye = (local_batch[1]).to(DEVICE).permute(0, 3, 1, 2).float()
            # image_right_eye = (local_batch[2]).to(DEVICE).permute(0, 3, 1, 2).float()
            # face_grid = (local_batch[3]).to(DEVICE).float()
            # gaze = torch.t(torch.stack(local_labels).to(DEVICE).float())

            image_face = Variable(image_face, requires_grad=False)
            image_left_eye = Variable(image_left_eye, requires_grad=False)
            image_right_eye = Variable(image_right_eye, requires_grad=False)
            face_grid = Variable(face_grid, requires_grad=False)
            gaze = Variable(gaze, requires_grad=False)

            with torch.no_grad():
                output = model(image_face, image_left_eye, image_right_eye,
                               face_grid)

            loss = criterion(output, gaze)

            act_list.append(gaze.cpu().numpy())
            prediction_list.append(output.cpu().numpy())

            loss_lin = output - gaze
            loss_lin = torch.mul(loss_lin, loss_lin)
            loss_lin = torch.sum(loss_lin, 1)
            loss_lin = torch.mean(torch.sqrt(loss_lin))

            losses.update(loss.data.item(), image_face.size(0))
            losses_lin.update(loss_lin.item(), image_face.size(0))

        predictions = np.concatenate(prediction_list)
        y = np.concatenate(act_list)

        return predictions, y
コード例 #8
0
def extract_feature(val_loader, model):
    feature_extraction_model = copy.deepcopy(model)
    new_fc = torch.nn.Sequential(*list(model.fc.children())[:-1])
    feature_extraction_model.fc = new_fc

    # switch to evaluate mode
    model.eval()

    feature_list = list()
    act_list = list()
    for local_batch, local_labels in val_loader:
        image_face = (local_batch[0]).to(DEVICE).permute(0, 3, 1,
                                                         2).float().cuda()
        image_left_eye = (local_batch[1]).to(DEVICE).permute(0, 3, 1,
                                                             2).float().cuda()
        image_right_eye = (local_batch[2]).to(DEVICE).permute(
            0, 3, 1, 2).float().cuda()
        face_grid = (local_batch[3]).to(DEVICE).float().cuda()
        gaze = torch.t(torch.stack(local_labels).to(DEVICE).float()).cuda()
        # imFace = (local_batch[0]).to(DEVICE).permute(0, 3, 1, 2).float()
        # imEyeL = (local_batch[1]).to(DEVICE).permute(0, 3, 1, 2).float()
        # imEyeR = (local_batch[2]).to(DEVICE).permute(0, 3, 1, 2).float()
        # faceGrid = (local_batch[3]).to(DEVICE).float()
        # gaze = torch.t(torch.stack(local_labels).to(DEVICE).float())

        image_face = Variable(image_face, requires_grad=False)
        image_left_eye = Variable(image_left_eye, requires_grad=False)
        image_right_eye = Variable(image_right_eye, requires_grad=False)
        face_grid = Variable(face_grid, requires_grad=False)
        gaze = Variable(gaze, requires_grad=False)

        # compute output
        with torch.no_grad():
            output = feature_extraction_model(image_face, image_left_eye,
                                              image_right_eye, face_grid)

        feature_list.append(output.cpu().numpy())
        act_list.append(gaze.cpu().numpy())
    features = np.concatenate(feature_list)
    y = np.concatenate(act_list)

    return features, y
コード例 #9
0
D.cuda()

G_criterionLoss = GeneratorLoss().cuda()
D_criterionloss = nn.BCEWithLogitsLoss()

G.eval()
D.eval()


with torch.no_grad():
    for ix,(lr1, lr2, lr3, name) in enumerate(test_loader):
        y_vl_dim=lr1.size()[-1]

        if(lr1.size()[-1]!=lr2.size()[-1] or lr1.size()[-1]!=lr3.size()[-1]):
            continue    
       
        lr_batched=np.array([np.dstack((lr1.detach().tolist()[0],lr2.detach().tolist()[0],lr3.detach().tolist()[0]))])
        lr_batched=np.array(lr_batched)

        lr_batched=torch.from_numpy(lr_batched).float()
        val_z=Variable(lr_batched.permute(0,3,1,2))
        val_z=val_z.cuda()
        sr_test = G(val_z)

        output_map=sr_test.cpu().data.numpy()
        ccm_map=val_z.cpu().data.numpy()

        if not os.path.exists(os.path.join('./data/output_dir/')):
            os.mkdir('./data/output_dir/')
        np.save(os.path.join('./data/output_dir/Original_','{}.npy'.format(name[0])), ccm_map)
        np.save(os.path.join('./data/output_dir/ContactGAN_','{}.npy'.format(name[0])), output_map)
コード例 #10
0
def test(model,
         test_generator,
         total_size,
         loss,
         input_channels,
         confusion_matrix=None,
         conv=True,
         one_trace=False,
         verbose=False,
         elec=False,
         output_size=2,
         exo=False,
         three_convs=False,
         clf=[]):
    '''
    Function to compute results in test set given a model.
    @model: model to use
    '''
    total_loss = []
    correct = 0
    total = 0
    cm = meter.ConfusionMeter(output_size)
    with torch.no_grad():
        for values in tqdm(test_generator):
            if (conv and one_trace and not three_convs):
                if (elec):

                    inp = Variable(values[0]).view(values[0].size(0), 1,
                                                   values[0].size(1),
                                                   values[0].size(2))

                    # inp = Variable(values[0]).view(values[0].size(0), 1, values[0].size(1))
                    inp = inp.cuda()
                else:
                    inp = Variable(values[0]).view(values[0].size(0), 1,
                                                   values[0].size(1))
            elif (conv and not three_convs):
                inp = Variable(values[0]).view(values[0].size(0),
                                               values[0].size(1),
                                               values[0].size(2))
                inp = inp.cuda()
            elif (conv and three_convs):
                inp_tank1 = Variable(values[0]).view(values[0].size(0), 1,
                                                     values[0].size(1),
                                                     values[0].size(2))
                inp_tank2 = Variable(values[1]).view(values[1].size(0), 1,
                                                     values[0].size(1),
                                                     values[1].size(2))
                inp_tank3 = Variable(values[2]).view(values[2].size(0), 1,
                                                     values[0].size(1),
                                                     values[2].size(2))
                inp_tank1 = inp_tank1.cuda()
                inp_tank2 = inp_tank2.cuda()
                inp_tank3 = inp_tank3.cuda()
            else:
                inp = Variable(values[0])
            if (exo):
                if (one_trace):
                    inp_exo = Variable(values[1]).view(values[1].size(0),
                                                       values[1].size(1))
                    output = Variable(values[2].squeeze()).type(
                        torch.LongTensor)
                    inp_exo = inp_exo.cuda()
                    output = output.cuda()
                else:
                    if (not three_convs):
                        inp_exo = Variable(values[1]).view(
                            values[1].size(0), values[1].size(1))
                        output = Variable(values[2].squeeze()).type(
                            torch.LongTensor)
                        inp_exo = inp_exo.cuda()
                        output = output.cuda()
                    else:
                        inp_exo = []
                        for value in values[3]:
                            aux = Variable(value).view(value.size(0),
                                                       value.size(1))
                            aux = aux.cuda()
                            inp_exo.append(aux)
                        output = Variable(values[4].squeeze()).type(
                            torch.LongTensor)
                        output = output.cuda()
            else:
                if (not three_convs):
                    output = Variable(values[1].squeeze()).type(
                        torch.LongTensor)
                    output = output.cuda()
                else:
                    output = Variable(values[3].squeeze()).type(
                        torch.LongTensor)
                    output = output.cuda()

            if (clf != []):
                usesvm = True
            else:
                usesvm = False

            if (exo):
                if (not three_convs):
                    predicted = model(inp, inp_exo)
                else:
                    predicted = model(inp_tank1, inp_tank2, inp_tank3, inp_exo,
                                      usesvm)
            elif (not three_convs):
                if (usesvm):
                    predicted = model(inp)
                else:
                    predicted = model(inp, usesvm)
            else:
                predicted = model(inp_tank1, inp_tank2, inp_tank3, svm_=usesvm)

            if (clf != []):
                predicted_cpu = predicted.cpu().detach().numpy()
                output_cpu = output.cpu().detach().numpy()
                predicted = clf.predict(predicted_cpu)
                if (cm is not None):
                    try:
                        cm.add(
                            torch.tensor(predicted).data.squeeze(),
                            output.type(torch.LongTensor))
                    except:
                        cm.add(predicted.data,
                               output.unsqueeze(0).type(torch.LongTensor))
                # softmax to get the probability of the classes
                # predicted = F.softmax(predicted, dim=1)
                try:
                    # total_loss.append(loss(predicted, output))
                    computed_loss = np.abs(predicted - output_cpu)
                    computed_loss = np.asarray(
                        np.sum(computed_loss) / computed_loss.shape[0])
                    total_loss.append(computed_loss)
                except:
                    total_loss.append(loss(predicted, output.unsqueeze(0)))
                try:
                    total += output.size(0)
                except:
                    total += 1
                # correct += (labels == output).sum().item()
                correct += (predicted == output_cpu).sum().item()
            else:
                _, labels = torch.max(predicted.data, 1)
                if (cm is not None):
                    try:
                        cm.add(predicted.data.squeeze(),
                               output.type(torch.LongTensor))
                    except:
                        cm.add(predicted.data,
                               output.unsqueeze(0).type(torch.LongTensor))
                try:
                    total_loss.append(loss(predicted, output))
                except:
                    total_loss.append(loss(predicted, output.unsqueeze(0)))
                try:
                    total += output.size(0)
                except:
                    total += 1
                correct += (labels == output).sum().item()
    if (verbose):
        print(
            'Mean and standard deviation in dataset with size {} are: {} +- {}'
            .format(total_size, np.mean(total_loss), np.std(total_loss)))
    try:
        return np.mean(total_loss), (100 * correct) / total, cm
    except:
        return torch.mean(torch.stack(total_loss),
                          dim=0), (100 * correct) / total, cm
コード例 #11
0
def train(model, training_generator, n_epochs, total_size, loss, optimizer,
          val_generator, val_size, conv, one_trace, verbose, elec, output_size,
          exo, three_convs, input_channels, usesvm):
    # Loss and optimizer
    best_model = model
    i = 0
    validation_losses = []
    training_losses = []
    validation_accuracy = []
    training_accuracy = []
    cms = []
    clf = []
    for epoch in range(n_epochs):
        print("Epoch {}/{}".format(epoch, n_epochs))
        batch_loss = []
        correct = 0
        total = 0
        stop = 0
        for values in tqdm(training_generator):
            if (conv and one_trace and not three_convs):
                if (elec):
                    inp = Variable(values[0]).view(values[0].size(0), 1,
                                                   values[0].size(1),
                                                   values[0].size(2))
                    # inp = Variable(values[0]).view(values[0].size(0), 1, values[0].size(1))
                    inp = inp.cuda()
                else:
                    inp = Variable(values[0]).view(values[0].size(0), 1,
                                                   values[0].size(1))
            elif (conv and not three_convs):
                inp = Variable(values[0]).view(values[0].size(0),
                                               values[0].size(1),
                                               values[0].size(2))
                inp = inp.cuda()
            elif (conv and three_convs):
                inp_tank1 = Variable(values[0]).view(values[0].size(0), 1,
                                                     values[0].size(1),
                                                     values[0].size(2))
                inp_tank2 = Variable(values[1]).view(values[1].size(0), 1,
                                                     values[0].size(1),
                                                     values[1].size(2))
                inp_tank3 = Variable(values[2]).view(values[2].size(0), 1,
                                                     values[0].size(1),
                                                     values[2].size(2))
                inp_tank1 = inp_tank1.cuda()
                inp_tank2 = inp_tank2.cuda()
                inp_tank3 = inp_tank3.cuda()
            else:
                inp = Variable(values[0])

            if (exo):
                if (not three_convs):
                    inp_exo = Variable(values[1]).view(values[1].size(0),
                                                       values[1].size(1))
                    output = Variable(values[2].squeeze()).type(
                        torch.LongTensor)
                    inp_exo = inp_exo.cuda()
                    output = output.cuda()
                else:
                    inp_exo = []
                    for value in values[3]:
                        aux = Variable(value).view(value.size(0),
                                                   value.size(1))
                        aux = aux.cuda()
                        inp_exo.append(aux)
                    output = Variable(values[4].squeeze()).type(
                        torch.LongTensor)
                    output = output.cuda()
            else:
                if (not three_convs):
                    output = Variable(values[1].squeeze()).type(
                        torch.LongTensor)
                    output = output.cuda()
                else:
                    output = Variable(values[3].squeeze()).type(
                        torch.LongTensor)
                    output = output.cuda()

            # Forward pass
            if (exo):
                if (not three_convs):
                    predicted = model(inp, inp_exo)
                else:
                    predicted = model(inp_tank1, inp_tank2, inp_tank3, inp_exo,
                                      usesvm)
            elif (not three_convs):
                if (usesvm):
                    predicted = model(inp)
                else:
                    predicted = model(inp, usesvm)
            else:
                predicted = model(inp_tank1, inp_tank2, inp_tank3, svm_=usesvm)

            if (usesvm):
                predicted_cpu = predicted.cpu().detach().numpy()
                output_cpu = output.cpu().detach().numpy()

                if (np.unique(output_cpu).shape[0] > 1):
                    svm_ = svm.SVC(kernel='rbf', gamma='scale')
                    if (not one_trace):
                        clf = GridSearchCV(svm_, parameters, cv=5)
                    else:
                        clf = svm_
                    clf.fit(predicted_cpu, output_cpu)
                    predicted = clf.predict(predicted_cpu)
                    # softmax to get the probability of the classes
                    # predicted = F.softmax(predicted, dim=1)
                    # _, labels = torch.max(predicted.data, 1)

                    try:
                        total += output.size(0)
                    except:
                        total += 1
                    correct += (predicted == output_cpu).sum().item()
                    # correct += (labels == output).sum().item()
                    try:
                        computed_loss = np.abs(predicted - output_cpu)
                        computed_loss = np.asarray(
                            np.sum(computed_loss) / computed_loss.shape[0])
                        computed_loss = torch.tensor(computed_loss,
                                                     requires_grad=True)
                        # computed_loss = loss(predicted, output)
                    except:
                        computed_loss = loss(predicted, output.unsqueeze(0))
                    batch_loss.append(computed_loss.item())
            else:
                # softmax to get the probability of the classes
                # predicted = F.softmax(predicted, dim=1)
                _, labels = torch.max(predicted.data, 1)
                try:
                    total += output.size(0)
                except:
                    total += 1
                correct += (labels == output).sum().item()
                try:
                    computed_loss = loss(predicted, output)
                except:
                    computed_loss = loss(predicted, output.unsqueeze(0))

                batch_loss.append(computed_loss.item())
                # Backward and optimize
                optimizer.zero_grad()
                computed_loss.backward()
                optimizer.step()
                stop += 1

        training_losses.append(np.mean(batch_loss))

        model = model.eval()
        i += 1
        confusion_matrix = meter.ConfusionMeter(
            output_size)  # I have 5 classes here
        val_loss, accuracy, confusion_matrix = validation(
            model,
            val_generator,
            val_size,
            loss,
            confusion_matrix=confusion_matrix,
            conv=conv,
            one_trace=one_trace,
            verbose=verbose,
            elec=elec,
            exo=exo,
            three_convs=three_convs,
            input_channels=input_channels,
            clf=clf)
        model = model.train()
        validation_losses.append(val_loss)
        print('Training: Epoch [{}/{}], Loss: {:.4f}, Acc: {:.4f}'.format(
            epoch + 1, n_epochs, computed_loss.item(),
            (100 * correct) / total))
        print('Val: Epoch [{}/{}], Loss: {:.4f}, Acc: {:.4f}'.format(
            epoch + 1, n_epochs, val_loss, accuracy))

        validation_accuracy.append(accuracy)
        cms.append(confusion_matrix)
        training_accuracy.append((100 * correct) / total)

    results = {
        "validation_losses": validation_losses,
        "training_losses": training_losses,
        "validation_accuracy": validation_accuracy,
        "training_accuracy": training_accuracy,
        "confusion_matrix": cms
    }
    return model.eval(), clf, results
コード例 #12
0
ファイル: train_synthetic.py プロジェクト: jtpils/CSGNet
                one_hot_labels = Variable(
                    torch.from_numpy(one_hot_labels)).cuda()
                data = Variable(torch.from_numpy(data)).cuda()
                labels = Variable(torch.from_numpy(labels)).cuda()
                outputs = imitate_net([data, one_hot_labels, k])

                loss_k = (losses_joint(outputs, labels, time_steps=k + 1) / (
                    k + 1)) / len(data_labels_paths.keys()) / config.num_traj
                loss_k.backward()
                loss += loss_k.data
                del loss_k

        optimizer.step()
        train_loss += loss
        log_value('train_loss_batch',
                  loss.cpu().numpy(),
                  epoch * (config.train_size //
                           (config.batch_size * config.num_traj)) + batch_idx)

    mean_train_loss = train_loss / (config.train_size // (config.batch_size))
    log_value('train_loss', mean_train_loss.cpu().numpy(), epoch)
    imitate_net.eval()
    loss = Variable(torch.zeros(1)).cuda()
    metrics = {"cos": 0, "iou": 0, "cd": 0}
    IOU = 0
    COS = 0
    CD = 0
    for batch_idx in range(config.test_size // (config.batch_size)):
        parser = ParseModelOutput(generator.unique_draw, max_len // 2 + 1, max_len,
                          config.canvas_shape)
        for k in data_labels_paths.keys():
コード例 #13
0
ファイル: denoising_gan.py プロジェクト: kiharalab/ContactGAN
                    val_target_batched = [hr.detach().tolist()]
                    val_target_batched=np.array(val_target_batched)
                    val_target_batched=torch.from_numpy(val_target_batched).float()               
                    val_target=Variable(val_target_batched)
                    val_target=val_target.cuda()
                    hr_test=D(val_target).mean()
                    hr_fake=D(sr_test).mean()

                    G_loss_valid=G_criterionLoss(hr_fake, sr_test, val_target)
                    D_loss_valid=1 - hr_test + hr_fake
                    # D_loss_valid = D_criterionloss(hr_fake,hr_test)
    #                 print(torch.cuda.memory_allocated())


                    true_map=val_target.cpu().data.numpy()
                    output_map=sr_test.cpu().data.numpy()
                    ccm_map=val_z.cpu().data.numpy()
                    gan_accuracy_info=evaluate(true_map[0],output_map[0])
                    ccm_accuracy_info=evaluate(true_map[0],ccm_map[0])
                    gan_accuracy[ix] = gan_accuracy_info
                    ccm_accuracy[ix] = ccm_accuracy_info

                for r in [10, 5, 2, 1]:
                    for type in ['short', 'medium', 'long']:
                        top_l_r = str('L/{}'.format(r))
                        score = np.average([info[top_l_r][type] for info in gan_accuracy.values()])
                        ccmScore=np.average([info[top_l_r][type] for info in ccm_accuracy.values()])

                        print('ContactGAN - For {} and {}-range contacts: {}'.format(top_l_r, type, score),flush=True)
                        print('METHOD - For {} and {}-range contacts: {}'.format(top_l_r, type, ccmScore),flush=True)
コード例 #14
0
def val_epoch(cfg, epoch, model, writer, use_cuda, args):
    print('validation at epoch {}'.format(epoch))

    num_gpus = len(args.gpu.split(','))
  
    model.eval()

    video_list = [line.rstrip().replace('.txt', '') for line in open(cfg.test_list, 'r').readlines()]

    if args.input_type == 'rgb':
        assert os.path.exists(cfg.rgb_test_file)
        data = h5.File(cfg.rgb_test_file, 'r')
    elif args.input_type == 'flow':
        assert os.path.exists(cfg.flow_test_file)
        data = h5.File(cfg.flow_test_file, 'r')
    elif args.input_type == 'combined':
        assert os.path.exists(cfg.combined_test_file)
        data = h5.File(cfg.combined_test_file, 'r')

    initial_predictions, predictions, ground_truth = [], [], []
    for i, video in tqdm(enumerate(video_list)):
        features = data[video]
        labels = build_labels(video, cfg.annotations_file, len(features), cfg.num_classes, args.add_background)

        if args.add_background:
            num_classes = cfg.num_classes + 1
        else:
            num_classes = cfg.num_classes

        features = np.array(features)
        labels = np.array(labels)
        features = Variable(torch.from_numpy(features).type(torch.FloatTensor))
        labels =  Variable(torch.from_numpy(labels).type(torch.FloatTensor))
        assert len(features) == len(labels)

        with torch.no_grad():
            if args.num_clips > 0:
                eval_mode = args.eval_mode
                if len(features) < args.num_clips:
                    eval_mode = 'pad'
                if eval_mode == 'truncate':
                    features = features[0:len(features) - (len(features) % args.num_clips)]
                    labels = labels[0:len(labels) - (len(labels) % args.num_clips)]
                    features = torch.stack([features[i:i + args.num_clips] for i in range(0, len(features), args.num_clips)])
                    labels = torch.stack([labels[i:i + args.num_clips] for i in range(0, len(labels), args.num_clips)])
                elif eval_mode == 'pad':
                    features_to_append = torch.zeros(args.num_clips - len(features) % args.num_clips, features.shape[1])
                    labels_to_append = torch.zeros(args.num_clips - len(labels) % args.num_clips, labels.shape[1])
                    features = torch.cat((features, features_to_append), 0)
                    labels = torch.cat((labels, labels_to_append), 0)
                    features = torch.stack([features[i:i + args.num_clips] for i in range(0, len(features), args.num_clips)])
                    labels = torch.stack([labels[i:i + args.num_clips] for i in range(0, len(labels), args.num_clips)]) 
                elif eval_mode == 'slide':
                    slide_rate = 16
                    features_to_append = torch.zeros(slide_rate - len(features) % slide_rate, features.shape[-1])
                    labels_to_append = torch.zeros(slide_rate - len(labels) % slide_rate, labels.shape[-1])
                    features = torch.cat((features, features_to_append), 0)
                    labels = torch.cat((labels, labels_to_append), 0)
                    features = torch.stack([features[i:i + args.num_clips] for i in range(0, len(features) - args.num_clips + 1, slide_rate)])
                    labels = torch.stack([labels[i:i + args.num_clips] for i in range(0, len(labels) - args.num_clips + 1, slide_rate)])
                assert len(features) > 0
            else:
                features = torch.unsqueeze(features, 0)
                labels = torch.unsqueeze(labels, 0)

            features = features.cuda()
            labels = labels.cuda()

            out = model(features)
            outputs = out['final_output']
            initial = out['init_output']

            outputs = nn.Sigmoid()(outputs)
            initial = nn.Sigmoid()(initial)

            outputs = outputs.reshape(-1, num_classes)
            initial = initial.reshape(-1, num_classes)
            labels = labels.reshape(-1, num_classes)
            outputs = outputs.cpu().data.numpy()
            initial = initial.cpu().data.numpy()
            labels = labels.cpu().data.numpy()
      
        assert len(outputs) == len(labels)
        predictions.extend(outputs)
        ground_truth.extend(labels)
        initial_predictions.extend(initial)

    ground_truth = np.array(ground_truth)
    predictions = np.array(predictions)
    initial_predictions = np.array(initial_predictions)

    avg_precision_score = average_precision_score(ground_truth, predictions, average=None)
    initial_avg_precision_score = average_precision_score(ground_truth, initial_predictions, average=None)
    predictions = (np.array(predictions) > args.f1_threshold).astype(int)
    ground_truth = (np.array(ground_truth) > args.f1_threshold).astype(int)
    results_actions = precision_recall_fscore_support(np.array(ground_truth), np.array(predictions), average=None)
    f1_scores, precision, recall = results_actions[2], results_actions[0], results_actions[1]

    if args.add_background:
        avg_precision_score = avg_precision_score[:-1]
        initial_avg_precision_score = initial_avg_precision_score[:-1]
        f1_scores = f1_score[:-1]
    print('Validation Epoch: %d, F1-Score: %s' % (epoch, str(f1_scores)), flush=True)
    print('Validation Epoch: %d, Average Precision: %s' % (epoch, str(avg_precision_score)), flush=True)
    print('Validation Epoch: %d, F1-Score: %4f, mAP: %4f, initial mAP: %4f' 
                                                        % (epoch, np.nanmean(f1_scores), np.nanmean(avg_precision_score), np.nanmean(initial_avg_precision_score)), flush=True)

    writer.add_scalar('Validation F1 Score', np.nanmean(f1_scores), epoch)
    writer.add_scalar('Validation Precision', np.nanmean(precision), epoch)
    writer.add_scalar('Validation Recall', np.nanmean(recall), epoch)
    writer.add_scalar('Validation AP', np.nanmean(avg_precision_score), epoch)
    return np.nanmean(avg_precision_score)