def train(epochs):
    for epoch in range(epochs):
        logger.Print(f"|~~~~~~~~~~~~~~~~~~~~~~~~Training epoch:{epoch}~~~~~~~~~~~~~~~~~~~~~~~~~~~|")     
        model.train()     
        scheduler.step()
        print(epoch,scheduler.get_lr()[0])
        for itr, data in enumerate(train_data,1):
            rgb_img = data[0]
            depth_img = data[1]
            ir_img = data[2]
            hsv_img = data[3]
            ycb_img = data[4]
            labels = data[5]

            if use_cuda:
                rgb_img = rgb_img.cuda()
                depth_img = depth_img.cuda()
                ir_img = ir_img.cuda()
                hsv_img = hsv_img.cuda()
                ycb_img = ycb_img.cuda()
                labels = labels.cuda()

            optimizer.zero_grad()
            outputs = model(hsv_img,ycb_img)
            score, prob_outputs = torch.max(outputs.data, 1)
            loss = criterion(outputs,labels)
            loss.backward()
            optimizer.step()

            if itr%200 == 0:
                message = f'|epoch:{epoch}-iter:{itr}|loss:{loss:.6f}|'
                logger.Print(message)
                y_prob_list = []
                y_pLabel_list = []
                y_label_list = []
                for i in range(len(labels)):
                    if prob_outputs[i] > 0.5: 
                        y_pLabel_list.append(1)
                    else:
                        y_pLabel_list.append(0)
                y_prob_list = prob_outputs.data.cpu().numpy()
                y_label_list = labels.data.cpu().numpy()
                eval_result = eval_fun(y_prob_list,y_pLabel_list,y_label_list)
                logger.Print(eval_result)
                loss_history.append(loss.item())

        if epoch%5 == 0:    
            logger.Print(f"|~~~~~~~~~~~~~~~~~~~~~~~~val epoch:{epoch}~~~~~~~~~~~~~~~~~~~~~~~~~~~|")     
            val(epoch, val_data)
            logger.Print(f"|~~~~~~~~~~~~~~~~~~~~~~~~test epoch:{epoch}~~~~~~~~~~~~~~~~~~~~~~~~~~~|")     
            val(epoch, test_data)
            pass
 

    plot_figure(save_path,loss_history)
    loss_np = np.array(loss_history)
    np.save(save_path+f'/loss.npy',loss_np)
Exemplo n.º 2
0
def val(epoch=0):
    logger.Print(
        f"|~~~~~~~~~~~~~~~~~~~~~~~~val epoch:{epoch}~~~~~~~~~~~~~~~~~~~~~~~~~~~|"
    )
    model.eval()
    y_prob_list = []
    y_pLabel_list = []
    y_label_list = []
    for itr, data in enumerate(val_data, 1):
        rgb_img = data[0]
        depth_img = data[1]
        ir_img = data[2]
        hsv_img = data[3]
        YCbCr_img = data[4]
        labels = data[5]
        epoch_data = val_data
        if use_cuda:
            rgb_img = rgb_img.cuda()
            depth_img = depth_img.cuda()
            ir_img = ir_img.cuda()
            hsv_img = hsv_img.cuda()
            YCbCr_img = YCbCr_img.cuda()
            labels = labels.cuda()

        img_crops = image_crop_f(ir_img)
        features, outputs = model(rgb_img, depth_img, ir_img, hsv_img,
                                  YCbCr_img, img_crops)
        prob_outputs = F.softmax(outputs, 1)[:, 1]  # 预测为1的概率

        for i in range(len(labels)):
            if prob_outputs[i] > 0.5:
                y_pLabel_list.append(1)
            else:
                y_pLabel_list.append(0)
            y_prob_list.append(prob_outputs[i].data.cpu().numpy())
            y_label_list.append(labels[i].data.cpu().numpy())

    eval_fun(y_prob_list, y_pLabel_list, y_label_list, logger)
Exemplo n.º 3
0
def val(epoch=0, data_set=val_data):
    y_prob_list = []
    y_pLabel_list = []
    y_label_list = []
    model.eval()
    with open(save_path + f'/prob_{epoch}.txt', 'w') as fb:
        with torch.no_grad():
            for itr, data in enumerate(data_set, 1):
                rgb_img = data[0]
                depth_img = data[1]
                ir_img = data[2]
                hsv_img = data[3]
                ycb_img = data[4]
                labels = data[5]
                if use_cuda:
                    rgb_img = rgb_img.cuda()
                    depth_img = depth_img.cuda()
                    ir_img = ir_img.cuda()
                    hsv_img = hsv_img.cuda()
                    ycb_img = ycb_img.cuda()
                    labels = labels.cuda()

                outputs = model(ir_img)
                prob_outputs = F.softmax(outputs, 1)[:, 1]  # 预测为1的概率
                for i in range(len(labels)):
                    if prob_outputs[i] > 0.5:
                        y_pLabel_list.append(1)
                    else:
                        y_pLabel_list.append(0)
                    message = f'{prob_outputs[i]:0.6f},{labels[i]}'
                    fb.write(message)
                    fb.write('\n')
                    y_prob_list.append(prob_outputs[i].data.cpu().numpy())
                    y_label_list.append(labels[i].data.cpu().numpy())
        fb.close()

    # pdb.set_trace()
    with open(save_path + f'/val_{epoch}.txt', 'w') as f:
        for i in range(len(y_label_list)):
            message = f'{y_prob_list[i]:.6f} {y_pLabel_list[i]} {y_label_list[i]}'
            f.write(message)
            f.write('\n')
        f.close()

    eval_result = eval_fun(y_prob_list, y_pLabel_list, y_label_list, logger)
    eval_history.append(eval_result)
    logger.Print(eval_result)
def train(epochs):
    for epoch in range(epochs):
        logger.Print(
            f"|~~~~~~~~~~~~~~~~~~~~~~~~Training epoch:{epoch}~~~~~~~~~~~~~~~~~~~~~~~~~~~|"
        )
        model.train()
        scheduler.step()
        y_prob_list = []
        y_pLabel_list = []
        y_label_list = []
        total_loss = 0
        total_itr = 0

        for itr, data in enumerate(train_data, 1):
            rgb_img = data[0]
            depth_img = data[1]
            ir_img = data[2]
            hsv_img = data[3]
            ycb_img = data[4]
            labels = data[5]

            if use_cuda:
                rgb_img = rgb_img.cuda()
                depth_img = depth_img.cuda()
                ir_img = ir_img.cuda()
                hsv_img = hsv_img.cuda()
                ycb_img = ycb_img.cuda()
                labels = labels.cuda()

            optimizer.zero_grad()
            outputs = model(rgb_img, depth_img, ir_img, hsv_img, ycb_img)
            score, prob_outputs = torch.max(outputs.data, 1)

            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            total_loss += loss.item()
            total_itr = itr

            if itr > 150:
                for i in range(len(labels)):
                    if prob_outputs[i] > 0.5:
                        y_pLabel_list.append(1)
                    else:
                        y_pLabel_list.append(0)
                y_prob_list.extend(prob_outputs.data.cpu().numpy())
                y_label_list.extend(labels.data.cpu().numpy())

        eval_result, score = eval_fun(y_prob_list, y_pLabel_list, y_label_list)
        train_score.append(score)
        logger.Print(eval_result)
        avg_loss = total_loss / total_itr
        train_loss.append(avg_loss)
        message = f'|epoch:{epoch}|loss:{avg_loss:.6f}|'
        logger.Print(message)

        logger.Print(
            f"|~~~~~~~~~~~~~~~~~~~~~~~~val epoch:{epoch}~~~~~~~~~~~~~~~~~~~~~~~~~~~|"
        )
        val(epoch, val_data, 0)

        if (epoch + 1) % 5 == 0:
            logger.Print(
                f"|~~~~~~~~~~~~~~~~~~~~~~~~test epoch:{epoch}~~~~~~~~~~~~~~~~~~~~~~~~~~~|"
            )
            val(epoch, test_data, 1)
            pass

        if (epoch + 1) % 50 == 0:
            plot_curve(save_path, train_score, eval_score, epoch * 5)
            plot_figure(save_path, train_loss, eval_loss, epoch * 5)

    for i in range(len(eval_history)):
        logger.Print(eval_history[i])

    train_loss_np = np.array(train_loss)
    eval_loss_np = np.array(eval_loss)
    np.save(save_path + '/train_loss_np.npy', train_loss_np)
    np.save(save_path + '/eval_loss_np.npy', eval_loss_np)
    train_np = np.array(train_score)
    eval_np = np.array(eval_score)
    test_np = np.array(test_score)
    np.save(save_path + f'/train.npy', train_np)
    np.save(save_path + f'/eval.npy', eval_np)
    np.save(save_path + f'/test.npy', test_np)
def val(epoch=0, data_set=val_data, flag=0):
    y_prob_list = []
    y_pLabel_list = []
    y_label_list = []
    model.eval()
    total_loss = 0
    total_itr = 0
    with open(save_path + f'/prob_{epoch}.txt', 'w') as fb:
        with torch.no_grad():
            for itr, data in enumerate(data_set, 1):
                rgb_img = data[0]
                depth_img = data[1]
                ir_img = data[2]
                hsv_img = data[3]
                ycb_img = data[4]
                labels = data[5]
                if use_cuda:
                    rgb_img = rgb_img.cuda()
                    depth_img = depth_img.cuda()
                    ir_img = ir_img.cuda()
                    hsv_img = hsv_img.cuda()
                    ycb_img = ycb_img.cuda()
                    labels = labels.cuda()

                outputs = model(rgb_img, depth_img, ir_img, hsv_img, ycb_img)
                prob_outputs = F.softmax(outputs, 1)[:, 1]  # 预测为1的概率
                loss = criterion(outputs, labels)

                total_loss += loss.item()
                total_itr = itr

                for i in range(len(labels)):
                    if prob_outputs[i] > 0.5:
                        y_pLabel_list.append(1)
                    else:
                        y_pLabel_list.append(0)
                    message = f'{prob_outputs[i]:0.6f},{labels[i]}'
                    fb.write(message)
                    fb.write('\n')
                    y_prob_list.append(prob_outputs[i].data.cpu().numpy())
                    y_label_list.append(labels[i].data.cpu().numpy())
        fb.close()

    eval_result, score = eval_fun(y_prob_list, y_pLabel_list, y_label_list,
                                  logger)
    eval_history.append(eval_result)
    logger.Print(eval_result)

    if flag == 0:
        eval_score.append(score)
        avg_loss = total_loss / total_itr
        eval_loss.append(avg_loss)
        message = f'|eval|loss:{avg_loss:.6f}|'
        logger.Print(message)
    else:
        test_score.append(score)

    with open(save_path + f'/val_{epoch}.txt', 'w') as f:
        for i in range(len(y_label_list)):
            message = f'{y_prob_list[i]:.6f} {y_pLabel_list[i]} {y_label_list[i]}'
            f.write(message)
            f.write('\n')
        f.close()
Exemplo n.º 6
0
def train(epochs):
    loss_history = []
    data_loader = train_data
    for epoch in range(args.epochs):
        model.train(True)
        # logger.Print(f"|~~~~~~~~~~~~~~~~~~~~~~~~Training epoch:{epoch}~~~~~~~~~~~~~~~~~~~~~~~~~~~|")
        # pdb.set_trace()
        for itr, data in enumerate(data_loader, 1):
            rgb_img = data[0]
            depth_img = data[1]
            ir_img = data[2]
            hsv_img = data[3]
            YCbCr_img = data[4]
            labels = data[5]
            if use_cuda:
                rgb_img = rgb_img.cuda()
                depth_img = depth_img.cuda()
                ir_img = ir_img.cuda()
                hsv_img = hsv_img.cuda()
                YCbCr_img = YCbCr_img.cuda()
                labels = labels.cuda()

            optimizer.zero_grad()
            img_crops = image_crop_f(rgb_img)
            features, outputs = model(rgb_img, depth_img, ir_img, hsv_img,
                                      YCbCr_img, img_crops)
            prob_outputs = F.softmax(outputs, 1)[:, 1]  # 预测为1的概率
            score, preds_outputs = torch.max(outputs.data, 1)

            mask_live = preds_outputs.view(-1, 1).float()
            global Live_centers
            Live_centers = torch.mean(features * mask_live, 0)
            mask_spoof = torch.abs(preds_outputs.view(-1, 1) - 1).float()
            global Spoof_centers
            Spoof_centers = torch.mean(features * (mask_spoof), 0)

            loss_anti = criterion(outputs, labels)
            loss_feat = criterion_mse(Live_centers, Spoof_centers)
            loss_feat = torch.exp(-loss_feat * 0.001)
            a = 1
            b = 0.1
            loss = loss_anti * a + loss_feat * b
            loss.backward()
            optimizer.step()
            loss_history.append(loss)

            if itr % 100 == 0:
                # logger.Print(f"|~~~~~~~~~~~~~~~~~~~~~~~~Training~~~~~~~~~~~~~~~~~~~~~~~~~~~|")
                message = f'|epoch:{epoch}-iter:{itr}|loss:{loss:.6f} |loss_feat:{loss_feat:.6f} |loss_anti:{loss_anti:0.6f}|'
                logger.Print(message)
                y_prob_list = []
                y_pLabel_list = []
                y_label_list = []
                for i in range(len(labels)):
                    if prob_outputs[i] > 0.5:
                        y_pLabel_list.append(1)
                    else:
                        y_pLabel_list.append(0)
                y_prob_list = prob_outputs.data.cpu().numpy()
                y_label_list = labels.data.cpu().numpy()
                eval_fun(y_prob_list, y_pLabel_list, y_label_list, logger)
            # with open(save_path+f'/val_{epoch}.txt', 'w') as f:
            #     for i in range(len(y_prob_list)):
            #         message = f'{y_prob_list[i]:.6f} {y_pLabel_list[i]} {y_label_list[i]}'
            #         f.write(message)
            #         f.write('\n')
            #     f.close()

        if epoch % 2 == 0:
            val(epoch)
            plot(loss_history)
            # with open(save_path+f'/val_{epoch}.txt', 'w') as f:
        else:
            plot(loss_history)
        if (epoch + 1) % 49 == 0:
            test(epoch)
            torch.save(model.state_dict(), save_path + f'/model_new.tar')