def main():

    # --------- 1. get image path and name ---------
    model_name = 'u2net_portrait'  #u2netp

    image_dir = './test_data/test_portrait_images/portrait_im'
    prediction_dir = './test_data/test_portrait_images/portrait_results'
    if (not os.path.exists(prediction_dir)):
        os.mkdir(prediction_dir)

    model_dir = './saved_models/u2net_portrait/u2net_portrait.pth'

    img_name_list = glob.glob(image_dir + '/*')
    print("Number of images: ", len(img_name_list))

    # --------- 2. dataloader ---------
    #1. dataloader
    test_salobj_dataset = SalObjDataset(
        img_name_list=img_name_list,
        lbl_name_list=[],
        transform=transforms.Compose([RescaleT(512),
                                      ToTensorLab(flag=0)]))
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=1)

    # --------- 3. model define ---------

    print("...load U2NET---173.6 MB")
    net = U2NET(3, 1)

    net.load_state_dict(torch.load(model_dir))
    if torch.cuda.is_available():
        net.cuda()
    net.eval()

    # --------- 4. inference for each image ---------
    for i_test, data_test in enumerate(test_salobj_dataloader):

        print("inferencing:", img_name_list[i_test].split(os.sep)[-1])

        inputs_test = data_test['image']
        inputs_test = inputs_test.type(torch.FloatTensor)

        if torch.cuda.is_available():
            inputs_test = Variable(inputs_test.cuda())
        else:
            inputs_test = Variable(inputs_test)

        d1, d2, d3, d4, d5, d6, d7 = net(inputs_test)

        # normalization
        pred = 1.0 - d1[:, 0, :, :]
        pred = normPRED(pred)

        # save results to test_results folder
        save_output(img_name_list[i_test], pred, prediction_dir)

        del d1, d2, d3, d4, d5, d6, d7
Beispiel #2
0
def preprocess(image):
    #print('Start image preprocess')
    label_3 = np.zeros(image.shape)
    label = np.zeros(label_3.shape[0:2])

    if (3 == len(label_3.shape)):
        label = label_3[:, :, 0]
    elif (2 == len(label_3.shape)):
        label = label_3

    if (3 == len(image.shape) and 2 == len(label.shape)):
        label = label[:, :, np.newaxis]
    elif (2 == len(image.shape) and 2 == len(label.shape)):
        image = image[:, :, np.newaxis]
        label = label[:, :, np.newaxis]

    transform = transforms.Compose([RescaleT(320), ToTensorLab(flag=0)])
    sample = transform({
        'imidx': np.array([0]),
        'image': image,
        'label': label
    })
    #print('Preprocess completed')

    return sample
Beispiel #3
0
    def __init__(self, model_name='u2net', cuda_mode=True, output_format='np'):
        self.model_name = model_name
        self.cuda_mode = cuda_mode and torch.cuda.is_available()  # Fallback to CPU mode, if cuda is not available
        self.trans = transforms.Compose([RescaleT(320), ToTensorLab(flag=0)])
        self.output_format = output_format

        # Validate
        if output_format not in self.FORMATS:
            raise AssertionError('Invalid "output_format"', 'Use "np" or "pil"')
        if model_name not in self.MODEL_NAMES:
            raise AssertionError('Invalid "model_name"', 'Use "u2net" or "u2netp"')

        if model_name == 'u2net':
            print("Model: U2NET (173.6 MB)")
            self.net = U2NET(3, 1)  # 173.6 MB
        elif model_name == 'u2netp':
            print("Model: U2NetP (4.7 MB)")
            self.net = U2NETP(3, 1)  # 4.7 MB
        else:
            raise AssertionError('Invalid "model_name"', 'Use "u2net" or "u2netp"')

        # Load network
        model_file = os.path.join(os.path.dirname(__file__), 'saved_models', model_name + '.pth')
        print("model_file:", model_file)

        if cuda_mode:
            print("CUDA mode")
            self.net.load_state_dict(torch.load(model_file))
            self.net.cuda()
        else:
            print("CPU mode")
            self.net.load_state_dict(torch.load(model_file, map_location=torch.device('cpu')))

        self.net.eval()
Beispiel #4
0
def main():

    # --------- 1. get image path and name ---------
    model_name = 'u2net'  #u2netp

    image_dir = './save_images/images/'
    prediction_dir = './save_images/' + model_name + '_result/'
    model_dir = './saved_models/' + model_name + '/' + model_name + '.pth'

    img_name_list = glob.glob(image_dir + '*')
    print(img_name_list)

    # --------- 2. dataloader ---------
    #1. dataloader
    test_salobj_dataset = SalObjDataset(
        img_name_list=img_name_list,
        lbl_name_list=[],
        transform=transforms.Compose([RescaleT(320),
                                      ToTensorLab(flag=0)]))
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=1)

    # --------- 3. model define ---------
    if (model_name == 'u2net'):
        print("...load U2NET---173.6 MB")
        net = U2NET(3, 1)
    elif (model_name == 'u2netp'):
        print("...load U2NEP---4.7 MB")
        net = U2NETP(3, 1)
    net.load_state_dict(torch.load(model_dir))
    if torch.cuda.is_available():
        net.cuda()
    net.eval()

    # --------- 4. inference for each image ---------
    for i_test, data_test in enumerate(test_salobj_dataloader):

        print("inferencing:", img_name_list[i_test].split("/")[-1])

        inputs_test = data_test['image']
        inputs_test = inputs_test.type(torch.FloatTensor)

        if torch.cuda.is_available():
            inputs_test = Variable(inputs_test.cuda())
        else:
            inputs_test = Variable(inputs_test)

        d1, d2, d3, d4, d5, d6, d7 = net(inputs_test)

        # normalization
        pred = d1[:, 0, :, :]
        pred = normPRED(pred)

        # save results to test_results folder
        save_output(img_name_list[i_test], pred, prediction_dir)

        del d1, d2, d3, d4, d5, d6, d7
Beispiel #5
0
def run(orig_image):
    torch.cuda.empty_cache()

    image = np.array(orig_image)

    # Preprocess image
    label_3 = np.zeros(image.shape)

    label = np.zeros(label_3.shape[0:2])
    if (3 == len(label_3.shape)):
        label = label_3[:, :, 0]
    elif (2 == len(label_3.shape)):
        label = label_3

    if (3 == len(image.shape) and 2 == len(label.shape)):
        label = label[:, :, np.newaxis]
    elif (2 == len(image.shape) and 2 == len(label.shape)):
        image = image[:, :, np.newaxis]
        label = label[:, :, np.newaxis]

    transform = transforms.Compose([RescaleT(320), ToTensorLab(flag=0)])
    sample = transform({
        'imidx': np.array([0]),
        'image': image,
        'label': label
    })

    # Process image
    image_process = sample['image'].unsqueeze(0)
    image_process = image_process.type(torch.FloatTensor)

    if torch.cuda.is_available():
        image_process = Variable(image_process.cuda())
    else:
        image_process = Variable(image_process)

    d1, d2, d3, d4, d5, d6, d7 = net(image_process)

    # normalization
    pred = d1[:, 0, :, :]
    pred = normPRED(pred)

    # save mask
    predict = pred
    predict = predict.squeeze()
    predict_np = predict.cpu().data.numpy()

    im = Image.fromarray(predict_np * 255).convert('RGB').convert('L')
    imo = im.resize((image.shape[1], image.shape[0]), resample=Image.BILINEAR)

    pb_np = np.array(imo)

    del d1, d2, d3, d4, d5, d6, d7

    return imo
Beispiel #6
0
 def process_image(self, image):
     """
     :param image: image of type numpy array
     :return: processed image
     """
     transformer = transforms.Compose([RescaleT(256), ToTensorLab(flag=0)])
     image = transformer(image)
     image = image.unsqueeze(0)
     image = image.type(torch.FloatTensor)
     image = Variable(image)
     return image
Beispiel #7
0
def main():
    image_dir = '/media/markytools/New Volume/Courses/EE298CompVis/finalproject/datasets/DUTS/DUTS-TE/DUTS-TE-Image/'
    prediction_dir = './predictionout/'
    model_dir = './saved_models/basnet_bsi_dataaugandarchi/basnet_bsi_epoch_207_itr_272000_train_9.345837_tar_1.082428.pth'

    img_name_list = glob.glob(image_dir + '*.jpg')

    # --------- 2. dataloader ---------
    #1. dataload
    test_salobj_dataset = SalObjDataset(
        img_name_list=img_name_list,
        lbl_name_list=[],
        transform=transforms.Compose([RescaleT(256),
                                      ToTensorLab(flag=0)]),
        category="test")
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=1)

    # --------- 3. model define ---------
    print("...load BASNet...")
    net = BASNet(3, 1)
    net.load_state_dict(torch.load(model_dir))
    if torch.cuda.is_available():
        net.cuda()
    net.eval()

    # --------- 4. inference for each image ---------
    for i_test, data_test in enumerate(test_salobj_dataloader):

        print("inferencing:", img_name_list[i_test].split("/")[-1])

        inputs_test = data_test['image']
        inputs_test = inputs_test.type(torch.FloatTensor)

        if torch.cuda.is_available():
            inputs_test = Variable(inputs_test.cuda())
        else:
            inputs_test = Variable(inputs_test)

        # d1,d2,d3,d4,d5,d6,d7,d8 = net(inputs_test)
        d1, d2, d3, d4, d5, d6, d7, d8, d1_struct, d2_struct, d3_struct, d4_struct, d5_struct, d6_struct, d7_struct = net(
            inputs_test)

        # normalization
        pred = d1[:, 0, :, :]
        pred = normPRED(pred)

        # save results to test_results folder
        save_output(img_name_list[i_test], pred, prediction_dir)

        del d1, d2, d3, d4, d5, d6, d7, d8
Beispiel #8
0
def main(colored=False, imagepath=''):

    # --------- 1. get image path and name ---------
    model_name='u2net'#u2netp

    prediction_dir = './test_data/' + model_name + '_results/'
    model_dir = './saved_models/'+ model_name + '/' + model_name + '.pth'

    # --------- 2. dataloader ---------
    #1. dataloader
    test_salobj_dataset = SalObjDataset(img_name_list = [imagepath],
                                        lbl_name_list = [],
                                        transform=transforms.Compose([RescaleT(320),
                                                                      ToTensorLab(flag=0)])
                                        )
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=1)

    # --------- 3. model define ---------
    if(model_name=='u2net'):
        print("...load U2NET---173.6 MB")
        net = U2NET(3,1)
    elif(model_name=='u2netp'):
        print("...load U2NEP---4.7 MB")
        net = U2NETP(3,1)
    net.load_state_dict(torch.load(model_dir, map_location=torch.device('cpu')))
    if torch.cuda.is_available():
        net.cuda()
    net.eval()

    # --------- 4. inference for each image ---------
    for _, data_test in enumerate(test_salobj_dataloader):
        inputs_test = data_test['image']
        inputs_test = inputs_test.type(torch.FloatTensor)

        if torch.cuda.is_available():
            inputs_test = Variable(inputs_test.cuda())
        else:
            inputs_test = Variable(inputs_test)

        d1,d2,d3,d4,d5,d6,d7= net(inputs_test)

        # normalization
        pred = d1[:,0,:,:]
        pred = normPRED(pred)

        del d1,d2,d3,d4,d5,d6,d7
        # save results to test_results folder
        return save_output(imagepath, pred, prediction_dir, colored=colored)
def main():
    # --------- 1. get image path and name ---------
    model_name = 'u2net'
    cwd = Path(os.getcwd())
    image_dir = cwd / 'test_data' / 'test_human_images'
    prediction_dir = cwd / 'test_data' / 'test_human_images_results'
    prediction_dir.mkdir(exist_ok=True)
    model_dir = cwd / 'saved_models' / (model_name + '_human_seg') / (model_name + '_human_seg.pth')

    img_name_list = list(image_dir.glob('*'))
    print("Images in test:", len(img_name_list))

    # --------- 2. dataloader ---------
    # 1. dataloader
    test_salobj_dataset = SalObjDataset(img_name_list=img_name_list,
                                        lbl_name_list=[],
                                        transform=transforms.Compose([RescaleT(320), ToTensorLab(flag=0)])
                                        )
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=1)

    # --------- 3. model define ---------
    print("...load U2NET---173.6 MB")
    net = U2NET(3, 1)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    net.load_state_dict(torch.load(model_dir, map_location=device)).to(device)
    net.eval()

    # --------- 4. inference for each image ---------
    for i_test, data_test in enumerate(test_salobj_dataloader):
        image_path = img_name_list[i_test]
        print("inferencing:", image_path.name)

        inputs_test = data_test['image']
        inputs_test = inputs_test.to(next(net.parameters()))
        with torch.no_grad():
            d1, d2, d3, d4, d5, d6, d7 = net(inputs_test)

            # normalization
            pred = d1[:, 0, :, :]
            pred = normPRED(pred)

            # save results to test_results folder
            save_output(image_path, pred, prediction_dir)

            del d1, d2, d3, d4, d5, d6, d7
Beispiel #10
0
def infer(
    net,
    image_dir = os.path.join(os.getcwd(), 'test_data', 'test_images'),
    prediction_dir = os.path.join(os.getcwd(), 'test_data', 'u2net' + '_results')
    ):

    
    img_name_list = glob.glob(image_dir + os.sep + '*')
    prediction_dir = prediction_dir + os.sep

    # --------- 2. dataloader ---------
    #1. dataloader
    test_salobj_dataset = SalObjDataset(img_name_list = img_name_list,
                                        lbl_name_list = [],
                                        transform=transforms.Compose([RescaleT(320),
                                                                      ToTensorLab(flag=0)])
                                        )
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=1)

    # --------- 4. inference for each image ---------
    for i_test, data_test in enumerate(test_salobj_dataloader):

        print("Generating mask for:",img_name_list[i_test].split(os.sep)[-1])

        inputs_test = data_test['image']
        inputs_test = inputs_test.type(torch.FloatTensor)

        if torch.cuda.is_available():
            inputs_test = Variable(inputs_test.cuda())
        else:
            inputs_test = Variable(inputs_test)

        d1,d2,d3,d4,d5,d6,d7= net(inputs_test)

        # normalization
        pred = d1[:,0,:,:]
        pred = normPRED(pred)

        # save results to test_results folder
        if not os.path.exists(prediction_dir):
            os.makedirs(prediction_dir, exist_ok=True)
        save_images(img_name_list[i_test],pred,prediction_dir)

        del d1,d2,d3,d4,d5,d6,d7
Beispiel #11
0
def main():
    # --------- 1. get image path and name ---------
    model_name = 'u2netp'  # u2netp

    image_dir = '../train2014'
    prediction_dir = os.path.join(os.getcwd(), 'test_data', model_name + '_results' + os.sep)
    model_dir = '../models/' + model_name + '.pth'

    img_name_list = glob.glob(image_dir + os.sep + '*')

    # --------- 2. dataloader ---------
    # 1. dataloader
    test_salobj_dataset = SalObjDataset(img_name_list=img_name_list,
                                        lbl_name_list=[],
                                        transform=transforms.Compose([RescaleT(320),
                                                                      ToTensorLab(flag=0)])
                                        )
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=1)
    if (model_name == 'u2net'):
        print("...load U2NET---173.6 MB")
        net = U2NET(3, 1)
    elif (model_name == 'u2netp'):
        print("...load U2NEP---4.7 MB")
        net = U2NETP(3, 1)
    net.load_state_dict(torch.load(model_dir))
    # if torch.cuda.is_available():
    #     net.cuda()
    net.eval()
    all_out = {}
    for i_test, data_test in tqdm(enumerate(test_salobj_dataloader)):
        sep_ = img_name_list[i_test].split(os.sep)[-1]
        inputs_test = data_test['image']
        inputs_test = inputs_test.type(torch.FloatTensor)
        #
        # if torch.cuda.is_available():
        #     inputs_test = Variable(inputs_test.cuda())
        # else:
        inputs_test = Variable(inputs_test)
        d = net(inputs_test)

        pred = normPRED(d)
        all_out[sep_] = pred

    pickle.dump(all_out, open("../data/coco_train_u2net.pik", "wb"), protocol=2)
Beispiel #12
0
def run_prediction(files):
    img_name_list = files

    # --------- 2. dataloader ---------
    #1. dataload
    test_salobj_dataset = SalObjDataset(
        img_name_list=img_name_list,
        lbl_name_list=[],
        transform=transforms.Compose([RescaleT(256),
                                      ToTensorLab(flag=0)]))
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=1)

    # --------- 3. model define ---------
    print("...load BASNet...")
    net = BASNet(3, 1)
    net.load_state_dict(torch.load(model_dir))
    if torch.cuda.is_available():
        net.cuda()
    net.eval()

    # --------- 4. inference for each image ---------
    for i_test, data_test in enumerate(test_salobj_dataloader):

        print("inferencing:", img_name_list[i_test].split("/")[-1])

        inputs_test = data_test['image']
        inputs_test = inputs_test.type(torch.FloatTensor)

        if torch.cuda.is_available():
            inputs_test = Variable(inputs_test.cuda())
        else:
            inputs_test = Variable(inputs_test)

        d1, d2, d3, d4, d5, d6, d7, d8 = net(inputs_test)

        # normalization
        pred = d1[:, 0, :, :]
        pred = normPRED(pred)

        # save results to test_results folder
        save_output(img_name_list[i_test], pred, prediction_dir)

        del d1, d2, d3, d4, d5, d6, d7, d8
Beispiel #13
0
def main():

    # --------- 1. get image path and name ---------
    model_name = 'u2net_portrait' #u2netp

    image_dir = './test_data/test_portrait_images/portrait_im'
    prediction_dir = './test_data/test_portrait_images/portrait_results'
    if(not os.path.exists(prediction_dir)):
        os.mkdir(prediction_dir)
    model_dir = './saved_models/u2net_portrait/u2net_portrait.pth'
    img_name_list = glob.glob(image_dir+'/*')
    print("Number of images: ", len(img_name_list))

    # --------- 2. dataloader ---------

    test_salobj_dataset = sal_generator(batch_size=1,
                                        img_name_list = img_name_list,
                                        lbl_name_list = [],
                                        transform=transforms.Compose([RescaleT(512),
                                                                      ToTensorLab(flag=0)])
                                        )

    # --------- 3. model define ---------

    print("...load U2NET---173.6 MB")
    net = U2NET(3,1)

    net.load(model_dir)

    # --------- 4. inference for each image ---------
    for i_test, data_test in enumerate(test_salobj_dataset):

        print("inferencing:", img_name_list[i_test].split(os.sep)[-1])

        d1,d2,d3,d4,d5,d6,d7= net(data_test, steps=1)

        # normalization
        pred = 1.0 - d1[:,0,:,:]
        pred = normPRED(pred)

        # save results to test_results folder
        save_output(img_name_list[i_test],pred,prediction_dir)

        del d1,d2,d3,d4,d5,d6,d7
Beispiel #14
0
def remove_bg(images):
    dataset = SalObjDataset(img_name_list=images,
                            lbl_name_list=[],
                            transform=transforms.Compose(
                                [RescaleT(320),
                                 ToTensorLab(flag=0)]))
    dataloader = DataLoader(dataset,
                            batch_size=1,
                            shuffle=False,
                            num_workers=1)

    net = U2NET(3, 1)

    net.load_state_dict(torch.load(model_dir, map_location='cpu'))

    net.eval()

    outputs = []
    for i, data in enumerate(dataloader):

        inputs = data['image']
        inputs = inputs.type(torch.FloatTensor)
        inputs = Variable(inputs)

        d1, d2, d3, d4, d5, d6, d7 = net(inputs)

        pred = d1[:, 0, :, :]
        pred = normPRED(pred)

        filename = save_output(images[i], pred, output_dir)
        outputs.append(filename)

        img = cv2.imread(images[i])

        mask = cv2.imread(filename, 0)

        rgba = cv2.cvtColor(img, cv2.COLOR_RGB2RGBA)
        rgba[:, :, 3] = mask

        cv2.imwrite(filename, rgba)

        del d1, d2, d3, d4, d5, d6, d7

    return outputs
Beispiel #15
0
def preprocess(image):
    label_3 = np.zeros(image.shape)
    label = np.zeros(label_3.shape[0:2])

    if (3 == len(label_3.shape)):
        label = label_3[:, :, 0]
    elif (2 == len(label_3.shape)):
        label = label_3

    if (3 == len(image.shape) and 2 == len(label.shape)):
        label = label[:, :, np.newaxis]
    elif (2 == len(image.shape) and 2 == len(label.shape)):
        image = image[:, :, np.newaxis]
        label = label[:, :, np.newaxis]

    transform = transforms.Compose([RescaleT(256), ToTensorLab(flag=0)])
    sample = transform({'image': image, 'label': label})

    return sample
Beispiel #16
0
def main():
    model_name='u2netp'

    model_dir = os.path.join(os.getcwd(), 'saved_models', model_name, model_name + '.pth')
    image_dir = os.path.join(os.getcwd(), 'test_data', 'test_images')
    img_name_list = glob.glob(image_dir + os.sep + '*')

    test_salobj_dataset = SalObjDataset(img_name_list = img_name_list,
                                        lbl_name_list = [],
                                        transform=transforms.Compose([RescaleT(320),
                                                                        ToTensorLab(flag=0)])
                                        )
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=1)

    if(model_name=='u2net'):
        print("...load U2NET---173.6 MB")
        net = U2NET(3,1)
    elif(model_name=='u2netp'):
        print("...load U2NEP---4.7 MB")
        net = U2NETP(3,1)

    net.load_state_dict(torch.load(model_dir))
    if torch.cuda.is_available():
        net.cuda()
    net.eval()

    for data_test in test_salobj_dataloader:
        inputs_test = data_test['image']
        inputs_test = inputs_test.type(torch.FloatTensor)
        if torch.cuda.is_available():
            inputs_test = Variable(inputs_test.cuda())
        else:
            inputs_test = Variable(inputs_test)

        dummy_input = inputs_test
        torch.onnx.export(net, dummy_input,"exported/onnx/{}.onnx".format(model_name),
        opset_version=10)
        
        break
Beispiel #17
0
def get_foreground(img):

    global model_loaded, net
    if not model_loaded:
        basnet_model_file = downloads.download_from_gdrive(
            gdrive_fileid='1s52ek_4YTDRt_EOkx1FS53u-vJa0c4nu',
            output_path='BASNet/saved_models/basnet_bsi/basnet.pth')
        net = load_model(basnet_model_file)
        model_loaded = True

    img = np.array(img)
    size = image.get_size(img)

    label = np.zeros(img.shape)
    sample = {'image': img, 'label': label}

    transform = transforms.Compose([RescaleT(256), ToTensorLab(flag=0)])
    sample = transform(sample)

    inputs_test = sample['image']
    inputs_test = inputs_test.type(torch.FloatTensor)
    if torch.cuda.is_available():
        inputs_test = Variable(inputs_test.cuda())
    else:
        inputs_test = Variable(inputs_test)
    inputs_test = inputs_test.unsqueeze(0)

    d1, d2, d3, d4, d5, d6, d7, d8 = net(inputs_test)
    pred = d1[:, 0, :, :]
    pred = normPRED(pred)
    del d1, d2, d3, d4, d5, d6, d7, d8

    pred = pred.squeeze()
    pred_np = pred.cpu().data.numpy()

    im_mask = Image.fromarray(pred_np * 255).convert('RGB')
    im_mask = im_mask.resize(size, resample=Image.BILINEAR)
    im_mask = np.array(im_mask)

    return im_mask
Beispiel #18
0
def main():

    # --------- 1. get image path and name ---------
    model_name = 'u2net'  # u2netp

    image_dir = os.path.join(os.getcwd(), 'test_data', 'test_images')
    prediction_dir = os.path.join(os.getcwd(), 'test_data',
                                  model_name + '_results' + os.sep)
    model_dir = os.path.join(os.getcwd(), 'saved_models', model_name,
                             model_name + '.pth')

    img_name_list = glob.glob(image_dir + os.sep + '*')
    print(img_name_list)

    # --------- 2. dataloader ---------

    test_salobj_dataset = SalObjDataset(
        img_name_list=img_name_list,
        lbl_name_list=[],
        transform=transforms.Compose([RescaleT(320),
                                      ToTensorLab(flag=0)]))

    if (model_name == 'u2net'):
        print("...load U2NET---173.6 MB")
        net = U2NET(3, 1)
    elif (model_name == 'u2netp'):
        print("...load U2NEP---4.7 MB")
        net = U2NETP(3, 1)

    for i_test, data_test in enumerate(len(img_name_list)):
        print("inferencing:", img_name_list[i_test].split(os.sep)[-1])

        d1, d2, d3, d4, d5, d6, d7 = net.predict(test_salobj_dataset, steps=1)
        pred = d1[:, 0, :, :]
        pred = normPRED(pred)
        if not os.path.exists(prediction_dir):
            os.makedirs(prediction_dir, exist_ok=True)
        save_output(img_name_list[i_test], pred, prediction_dir)
        del d1, d2, d3, d4, d5, d6, d7
Beispiel #19
0
def main():
    name = 'test'
    # please input the height  unit:M
    body_height = 1.63

    #"Input image" path
    image_dir = os.path.join(os.getcwd(), 'input')

    #"Output model" path
    outbody_filenames = './output/{}.obj'.format(name)

    #########################################################
    #this code used for image segmentation to remove the background to get Silhouettes
    # --------- 1. get image path and name ---------

    model_name='u2net'#u2net or u2netp

    #set orignal silhouette images path
    prediction_dir1 = os.path.join(os.getcwd(), 'Silhouette' + os.sep)

    #set the path of silhouette images after horizontal flippath
    prediction_dir = os.path.join(os.getcwd(), 'test_data' + os.sep)

    model_dir = os.path.join(os.getcwd(), 'saved_models', model_name, model_name + '.pth')
    img_name_list = glob.glob(image_dir + os.sep + '*')
    print(img_name_list)

    # --------- 2. dataloader ---------

    test_salobj_dataset = SalObjDataset(img_name_list = img_name_list,
                                        lbl_name_list = [],
                                        transform=transforms.Compose([RescaleT(320),
                                                                      ToTensorLab(flag=0)])
                                        )
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=1)

    # --------- 3. silhouette cutting model define ---------
    if(model_name=='u2net'):
        print("...load U2NET---173.6 MB")
        net = U2NET(3,1)
    elif(model_name=='u2netp'):
        print("...load U2NEP---4.7 MB")
        net = U2NETP(3,1)
    net.load_state_dict(torch.load(model_dir))
    if torch.cuda.is_available():
        net.cuda()
    net.eval()

    # --------- 4. inference for each image ---------
    for i_test, data_test in enumerate(test_salobj_dataloader):

        print("inferencing:",img_name_list[i_test].split(os.sep)[-1])

        inputs_test = data_test['image']
        inputs_test = inputs_test.type(torch.FloatTensor)

        if torch.cuda.is_available():
            inputs_test = Variable(inputs_test.cuda())
        else:
            inputs_test = Variable(inputs_test)

        d1,d2,d3,d4,d5,d6,d7= net(inputs_test)

        # normalization
        pred = d1[:,0,:,:]
        pred = normPRED(pred)

        # save results to test_results folder
        if not os.path.exists(prediction_dir):
            os.makedirs(prediction_dir, exist_ok=True)
        save_output(img_name_list[i_test],pred,prediction_dir,prediction_dir1)

        del d1,d2,d3,d4,d5,d6,d7


    ###########################################################
    #this code used for reconstruct 3d model:

   #--------5.get the silhouette images --------
    img_filenames = ['./test_data/front.png', './test_data/side.png']

    # img = cv2.imread(img_filenames[1])
    # cv2.flip(img,1)



    # -----------6.load input data---------
    sampling_num = 648
    data = np.zeros([2, 2, sampling_num])
    for i in np.arange(len(img_filenames)):
        img = img_filenames[i]
        im = getBinaryimage(img, 600)  # deal with white-black image simply
        sample_points = getSamplePoints(im, sampling_num, i)
        center_p = np.mean(sample_points, axis=0)
        sample_points = sample_points - center_p
        data[i, :, :] = sample_points.T

    data = repeat_data(data)

    #--------7 load CNN model----reconstruct 3d body shape
    print('==> begining...')
    len_out = 22
    model_name = './Models/model.ckpt'
    ourModel = RegressionPCA(len_out)
    ourModel.load_state_dict(torch.load(model_name))
    ourModel.eval()

    #----------8 output results--------------
    save_obj(outbody_filenames, ourModel, body_height, data)
Beispiel #20
0
		imidx = imidx + "." + bbb[i]

	tra_lbl_name_list.append(data_dir + tra_label_dir + imidx + label_ext)

print("---")
print("train images: ", len(tra_img_name_list))
print("train labels: ", len(tra_lbl_name_list))
print("---")

train_num = len(tra_img_name_list)

salobj_dataset = SalObjDataset(
    img_name_list=tra_img_name_list,
    lbl_name_list=tra_lbl_name_list,
    transform=transforms.Compose([
        RescaleT(320),
        RandomCrop(288),
        ToTensorLab(flag=0)]))
salobj_dataloader = DataLoader(salobj_dataset, batch_size=batch_size_train, shuffle=True
                               #shuffle=False
                               , num_workers=0)

# ------- 3. define model --------
# define the net
#选择模型
if(model_name=='u2net'):
    net = U2NET(3, 1)
elif(model_name=='u2netp'):
    net = U2NETP(3,1)

if torch.cuda.is_available():
Beispiel #21
0
    imo.save(d_dir+imidx+'.png')

# --------- 1. get image path and name ---------


#image_dir = './test_data/test_images/'
image_dir = '../SOD_datasets/DUTS-TE/img/'
prediction_dir = './test_data/test_results/'
model_dir = './saved_models/basnet_bsi_original/basnet_bsi_itr_574000_train_1.013724_tar_0.054404.pth'

img_name_list = glob.glob(image_dir + '*.jpg')

# --------- 2. dataloader ---------
# 1. dataload
test_salobj_dataset = SalObjDataset(img_name_list=img_name_list, lbl_name_list=[
], transform=transforms.Compose([RescaleT(256), ToTensorLab(flag=0)]))
test_salobj_dataloader = DataLoader(
    test_salobj_dataset, batch_size=1, shuffle=False, num_workers=1)

# --------- 3. model define ---------
print("...load BASNet...")
net = BASNet(3, 1)
net.load_state_dict(torch.load(model_dir))
if torch.cuda.is_available():
    net.cuda()
net.eval()

# --------- 4. inference for each image ---------
for i_test, data_test in enumerate(test_salobj_dataloader):

    print("inferencing:", img_name_list[i_test].split("/")[-1])
def main():

    # --------- 1. get image path and name ---------
    model_name = 'u2netp'  # fixed as u2netp

    image_dir = os.path.join(
        os.getcwd(), 'input'
    )  # changed to 'images' directory which is populated while running the script
    prediction_dir = os.path.join(
        os.getcwd(), 'output/'
    )  # changed to 'results' directory which is populated after the predictions
    model_dir = os.path.join(os.getcwd(), model_name +
                             '.pth')  # path to u2netp pretrained weights

    img_name_list = glob.glob(image_dir + os.sep + '*')
    print(img_name_list)

    # --------- 2. dataloader ---------
    #1. dataloader
    test_salobj_dataset = SalObjDataset(
        img_name_list=img_name_list,
        lbl_name_list=[],
        transform=transforms.Compose([RescaleT(320),
                                      ToTensorLab(flag=0)]))
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=1)

    # --------- 3. model define ---------
    net = U2NETP(3, 1)
    if torch.cuda.is_available():
        net.load_state_dict(torch.load(model_dir))
        net.cuda()
    else:
        net.load_state_dict(
            torch.load(model_dir, map_location=torch.device('cpu')))

    net.eval()

    # --------- 4. inference for each image ---------
    for i_test, data_test in enumerate(test_salobj_dataloader):

        print("inferencing:", img_name_list[i_test].split(os.sep)[-1])

        inputs_test = data_test['image']
        inputs_test = inputs_test.type(torch.FloatTensor)

        if torch.cuda.is_available():
            inputs_test = Variable(inputs_test.cuda())
        else:
            inputs_test = Variable(inputs_test)

        d1, d2, d3, d4, d5, d6, d7 = net(inputs_test)

        # normalization
        pred = d1[:, 0, :, :]
        pred = normPRED(pred)

        # save results to test_results folder
        if not os.path.exists(prediction_dir):
            os.makedirs(prediction_dir, exist_ok=True)
        save_output(img_name_list[i_test], pred, prediction_dir)

        del d1, d2, d3, d4, d5, d6, d7
Beispiel #23
0
def main():

    # --------- 1. get image path and name ---------
    #model_name='u2net'
    model_name = 'u2netp'

    image_dir = os.path.join(os.getcwd(), 'test_data', 'test_images')
    prediction_dir = os.path.join(os.getcwd(), 'test_data',
                                  model_name + '_results' + os.sep)
    model_dir = os.path.join(os.getcwd(), 'saved_models', model_name,
                             model_name + '.pth')

    img_name_list = glob.glob(image_dir + os.sep + '*')
    print(img_name_list)

    # --------- 2. dataloader ---------
    #1. dataloader
    test_salobj_dataset = SalObjDataset(
        img_name_list=img_name_list,
        lbl_name_list=[],
        transform=transforms.Compose([RescaleT(320),
                                      ToTensorLab(flag=0)]))
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=1)

    # --------- 3. model define ---------
    if (model_name == 'u2net'):
        print("...load U2NET---173.6 MB")
        net = U2NET(3, 1)
    elif (model_name == 'u2netp'):
        print("...load U2NEP---4.7 MB")
        net = U2NETP(3, 1)
    net.load_state_dict(torch.load(model_dir))
    if torch.cuda.is_available():
        net.cuda()
    net.eval()

    # --------- 4. inference for each image ---------
    for i_test, data_test in enumerate(test_salobj_dataloader):

        print("inferencing:", img_name_list[i_test].split(os.sep)[-1])

        inputs_test = data_test['image']
        #print("test", inputs_test.shape)
        inputs_test = inputs_test.type(torch.FloatTensor)

        if torch.cuda.is_available():
            inputs_test = Variable(inputs_test.cuda())
        else:
            inputs_test = Variable(inputs_test)

        d1, d2, d3, d4, d5, d6, d7 = net(inputs_test)

        # normalization
        pred = d1[:, 0, :, :]
        pred = normPRED(pred)
        #print("pred",pred.shape)
        # save results to test_results folder
        if not os.path.exists(prediction_dir):
            os.makedirs(prediction_dir, exist_ok=True)
        save_output(img_name_list[i_test], pred, prediction_dir)

        del d1, d2, d3, d4, d5, d6, d7
Beispiel #24
0
def main():

    # --------- 1. get image path and name ---------
    model_name = 'u2net'  #u2netp
    image_dir = os.path.join(os.getcwd(), 'test_data', 'test_images')
    prediction_dir = os.path.join(os.getcwd(), 'test_data',
                                  model_name + '_results' + os.sep)
    # image_dir = os.path.join('/nfs/project/huxiaoliang/data/white_or_not/white_bg_image')
    # prediction_dir = os.path.join('/nfs/project/huxiaoliang/data/white_or_not/white_bg_image_pred'+ os.sep)
    model_dir = os.path.join('/nfs/private/modelfiles/u2net-saved_models',
                             model_name, model_name + '.pth')

    img_name_list = glob.glob(image_dir + os.sep + '*')
    print(img_name_list)

    # --------- 2. dataloader ---------
    #1. dataloader
    test_salobj_dataset = SalObjDataset(
        img_name_list=img_name_list,
        lbl_name_list=[],
        transform=transforms.Compose([RescaleT(320),
                                      ToTensorLab(flag=0)]))
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=1)

    # --------- 3. model define ---------
    if (model_name == 'u2net'):
        print("...load U2NET---173.6 MB")
        net = U2NET(3, 1)
    elif (model_name == 'u2netp'):
        print("...load U2NEP---4.7 MB")
        net = U2NETP(3, 1)
    net.load_state_dict(torch.load(model_dir))
    if torch.cuda.is_available():
        net.cuda()
    net.eval()

    # --------- 4. inference for each image ---------
    error = []
    for i_test, data_test in enumerate(test_salobj_dataloader):
        try:
            print("inferencing:", img_name_list[i_test].split(os.sep)[-1])

            inputs_test = data_test['image']
            inputs_test = inputs_test.type(torch.FloatTensor)

            if torch.cuda.is_available():
                inputs_test = Variable(inputs_test.cuda())
            else:
                inputs_test = Variable(inputs_test)

            d1, d2, d3, d4, d5, d6, d7 = net(inputs_test)

            # normalization
            pred = d1[:, 0, :, :]
            pred = normPRED(pred)

            # save results to test_results folder
            if not os.path.exists(prediction_dir):
                os.makedirs(prediction_dir, exist_ok=True)
            save_output(img_name_list[i_test], pred, prediction_dir)

            del d1, d2, d3, d4, d5, d6, d7
        except Exception as ex:
            traceback.print_exc()
            error.append(img_name_list[i_test].split(os.sep)[-1])
    print('异常数据:', error)
Beispiel #25
0
    image_dir = os.path.join(os.getcwd(), 'test_data', 'test_images')
    prediction_dir = os.path.join(os.getcwd(), 'test_data',
                                  model_name + '_results' + os.sep)
    model_dir = os.path.join(os.getcwd(), 'saved_models', model_name,
                             model_name + '.pth')

    img_name_list = glob.glob(image_dir + os.sep + '*')
    print(img_name_list)

    # --------- 2. dataloader ---------
    #1. dataloader
    test_salobj_dataset = SalObjDataset(
        img_name_list=img_name_list,
        lbl_name_list=[],
        transform=transforms.Compose([RescaleT(320),
                                      ToTensorLab(flag=0)]))
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=1)

    # --------- 3. model define ---------
    if (model_name == 'u2net'):
        print("...load U2NET---173.6 MB")
        net = U2NET(3, 1)
    elif (model_name == 'u2netp'):
        print("...load U2NEP---4.7 MB")
        net = U2NETP(3, 1)
    criterion = torch.nn.CrossEntropyLoss()
    net.load_state_dict(torch.load(model_dir))
Beispiel #26
0
def main():

    # --------- 1. get image path and name ---------
    model_name = 'u2net'  #u2netp

    image_dir = "/home/vybt/Downloads/U2_Net_Test"
    prediction_dir = "/home/vybt/Downloads/u-2--bps-net-prediction"
    model_dir = '/media/vybt/DATA/SmartFashion/deep-learning-projects/U-2-Net/saved_models/u2net/_bps_bce_itr_300000_train_0.107041_tar_0.011690.pth'

    img_name_list = glob.glob(image_dir + os.sep + '*')
    # print(img_name_list)

    # --------- 2. dataloader ---------
    #1. dataloader
    test_salobj_dataset = SalObjDataset(
        img_name_list=img_name_list,
        lbl_name_list=[],
        transform=transforms.Compose([RescaleT(320),
                                      ToTensorLab(flag=0)]))
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=1)

    # --------- 3. model define ---------
    if model_name == 'u2net':
        print("...load U2NET---173.6 MB")
        net = U2NET(3, 8)
    elif model_name == 'u2netp':
        print("...load U2NEP---4.7 MB")
        net = U2NETP(3, 8)

    net.load_state_dict(torch.load(model_dir))

    if torch.cuda.is_available():
        net.cuda()
    net.eval()

    # --------- 4. inference for each image ---------
    for i_test, data_test in enumerate(test_salobj_dataloader):

        print("Inference: ", img_name_list[i_test].split(os.sep)[-1])

        inputs_test = data_test['image']
        inputs_test = inputs_test.type(torch.FloatTensor)

        if torch.cuda.is_available():
            inputs_test = Variable(inputs_test.cuda())
        else:
            inputs_test = Variable(inputs_test)

        # print("inputs test: {}".format(inputs_test.shape))
        d1, d2, d3, d4, d5, d6, d7 = net(inputs_test)

        # normalization
        pred = d1
        pred = normPRED(pred)

        # save results to test_results folder
        if not os.path.exists(prediction_dir):
            os.makedirs(prediction_dir, exist_ok=True)
        save_output(img_name_list[i_test], pred, prediction_dir)

        del d1, d2, d3, d4, d5, d6, d7
Beispiel #27
0
def main():
    # --------- 1. get image path and name ---------
    model_name = 'u2netp'  # u2net

    # image_dir = os.path.join(os.getcwd(), 'test_data', 'test_images')
    image_dir = '/home/hha/dataset/circle/circle'
    prediction_dir = '/home/hha/dataset/circle/circle_pred'
    model_dir = '/home/hha/pytorch_code/U-2-Net-master/saved_models/u2netp/u2netp.pthu2netp_bce_itr_2000_train_0.077763_tar_0.006976.pth'
    # model_dir = os.path.join(os.getcwd(), 'saved_models', model_name, model_name + '.pth')

    img_name_list = glob.glob(image_dir + os.sep + '*')

    img_name_list = list(filter(lambda f: f.find('_mask') < 0, img_name_list))

    # print(img_name_list)

    # --------- 2. dataloader ---------
    # 1. dataloader
    test_salobj_dataset = SalObjDataset(
        img_name_list=img_name_list,
        lbl_name_list=[],
        transform=transforms.Compose([
            RescaleT(320),  # 320
            ToTensorLab(flag=0)
        ]))
    test_salobj_dataloader = DataLoader(test_salobj_dataset,
                                        batch_size=1,
                                        shuffle=False,
                                        num_workers=0)

    # --------- 3. model define ---------
    if (model_name == 'u2net'):
        print("...load U2NET---173.6 MB")
        net = U2NET(3, 1)
    elif (model_name == 'u2netp'):
        print("...load U2NEP---4.7 MB")
        net = U2NETP(3, 1)
    net.load_state_dict(torch.load(model_dir))
    if torch.cuda.is_available():
        net.cuda()
    net.eval()

    # --------- 4. inference for each image ---------
    for i_test, data_test in enumerate(test_salobj_dataloader):

        print("inferencing:", img_name_list[i_test].split(os.sep)[-1])

        inputs_test = data_test['image']
        inputs_test = inputs_test.type(torch.FloatTensor)

        if torch.cuda.is_available():
            inputs_test = inputs_test.cuda()
        else:
            inputs_test = inputs_test

        d1, d2, d3, d4, d5, d6, d7 = net(inputs_test)

        # normalization
        pred = d1[:, 0, :, :]
        # pred = normPRED(pred)

        # save results to test_results folder
        if not os.path.exists(prediction_dir):
            os.makedirs(prediction_dir, exist_ok=True)
        save_output(img_name_list[i_test], pred, prediction_dir)

        del d1, d2, d3, d4, d5, d6, d7
Beispiel #28
0
def predict():

    # ensure an image was properly uploaded to our endpoint
    if flask.request.method == "POST":
        if flask.request.files.get("image"):

            file_image = flask.request.files["image"]
            #image = Image.open(image)
            # preprocess the image and prepare it for classification
            salobjdataset = SalObjDataset(img_name_list=[file_image],
                                          lbl_name_list=[],
                                          transform=transforms.Compose([
                                              RescaleT(256),
                                              ToTensorLab(flag=0)
                                          ]))
            saldataloader = DataLoader(salobjdataset,
                                       batch_size=1,
                                       shuffle=False,
                                       num_workers=1)

            for image in saldataloader:
                input_image = image['image']
                input_image = input_image.type(torch.FloatTensor)

            # classify the input image and then initialize the list
            # of predictions to return to the client
            #img = img.type(torch.FloatTensor)

            if torch.cuda.is_available():
                input_image = Variable(input_image.cuda())
            else:
                input_image = Variable(input_image)

            load_model()

            d1, d2, d3, d4, d5, d6, d7, d8 = net(input_image)
            pred = d1[:, 0, :, :]
            pred = normPRED(pred)

            predict = pred
            predict = predict.squeeze()
            predict_np = predict.cpu().data.numpy()

            im = Image.fromarray(predict_np * 255).convert('RGB')
            image = io.imread(file_image)
            imo = im.resize((image.shape[1], image.shape[0]),
                            resample=Image.BILINEAR)

            pb_np = np.array(imo)

            in_img = cv2.imread(file_image)
            in_img = cv2.cvtColor(in_img, cv2.COLOR_BGR2RGB)

            result_img = cv2.imread(imo)
            result_img = cv2.cvtColor(result_img, cv2.COLOR_BGR2RGB)

            new_img = np.where(result_img < [250, 250, 250], [254, 254, 254],
                               in_img)
            new_img = new_img.astype('uint8')
            new_img = cv2.cvtColor(new_img, cv2.COLOR_BGR2RGB)

            byte_io = io.BytesIO()
            new_img.save(byte_io, 'PNG')
            byte_io.seek(0)

    return flask.send_file(byte_io, mimetype='image/png')
Beispiel #29
0
def train():

    data_dir = './train_data/'
    tra_image_dir = 'DUTS-TR/DUTS-TR-Image/'
    tra_label_dir = 'DUTS-TR/DUTS-TR-Mask/'

    image_ext = '.jpg'
    label_ext = '.png'

    model_dir = "./saved_models/basnet_bsi/"

    epoch_num = 100
    batch_size_train = 1
    batch_size_val = 1
    train_num = 0
    val_num = 0

    tra_img_name_list = glob.glob(data_dir + tra_image_dir + '*' + image_ext)
    tra_lbl_name_list = glob.glob(data_dir + tra_label_dir + '*' + label_ext)
    #	tra_lbl_name_list = []
    #	for img_path in tra_img_name_list:
    #		img_name = img_path.split("/")[-1]

    #		aaa = img_name.split(".")
    #		print(aaa)
    #		bbb = aaa[0:-1]
    #		imidx = bbb[0]
    #		for i in range(1,len(bbb)):
    #			imidx = imidx + "." + bbb[i]
    #			print(imidx)
    #		tra_lbl_name_list.append(data_dir + tra_label_dir + '*' + label_ext)

    print("---")
    print("train images: ", len(tra_img_name_list))
    print("train labels: ", len(tra_lbl_name_list))
    print("---")

    train_num = len(tra_img_name_list)

    salobj_dataset = SalObjDataset(img_name_list=tra_img_name_list,
                                   lbl_name_list=tra_lbl_name_list,
                                   transform=transforms.Compose([
                                       RescaleT(256),
                                       RandomCrop(224),
                                       ToTensorLab(flag=0)
                                   ]))
    salobj_dataloader = DataLoader(salobj_dataset,
                                   batch_size=batch_size_train,
                                   shuffle=True,
                                   num_workers=1)

    # ------- 3. define model --------
    # define the net
    net = BASNet(3, 1)
    #if torch.cuda.is_available():
    #     print(torch.cuda.is_available())
    net.cuda()

    # ------- 4. define optimizer --------
    print("---define optimizer...")
    optimizer = optim.Adam(net.parameters(),
                           lr=0.001,
                           betas=(0.9, 0.999),
                           eps=1e-08,
                           weight_decay=0)

    # ------- 5. training process --------
    print("---start training...")
    ite_num = 0
    running_loss = 0.0
    running_tar_loss = 0.0
    ite_num4val = 0

    for epoch in range(0, epoch_num):
        net.train()

        for i, data in enumerate(salobj_dataloader):
            ite_num = ite_num + 1
            ite_num4val = ite_num4val + 1

            inputs, labels = data['image'], data['label']

            inputs = inputs.type(torch.FloatTensor)
            labels = labels.type(torch.FloatTensor)

            # wrap them in Variable
            #		if torch.cuda.is_available():
            inputs_v, labels_v = Variable(inputs.cuda(),
                                          requires_grad=False), Variable(
                                              labels.cuda(),
                                              requires_grad=False)
            #	else:
            #		inputs_v, labels_v = Variable(inputs, requires_grad=False), Variable(labels, requires_grad=False)

            # y zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            d0, d1, d2, d3, d4, d5, d6, d7 = net(inputs_v)
            loss2, loss = muti_bce_loss_fusion(d0, d1, d2, d3, d4, d5, d6, d7,
                                               labels_v)

            loss.backward()
            optimizer.step()

            # # print statistics
            running_loss += loss.data
            running_tar_loss += loss2.data

            # del temporary outputs and loss
            del d0, d1, d2, d3, d4, d5, d6, d7, loss2, loss

            print(
                "[epoch: %3d/%3d, batch: %5d/%5d, ite: %d] train loss: %3f, tar: %3f "
                % (epoch + 1, epoch_num,
                   (i + 1) * batch_size_train, train_num, ite_num,
                   running_loss / ite_num4val, running_tar_loss / ite_num4val))

            if ite_num % 2000 == 0:  # save model every 2000 iterations

                torch.save(
                    net.state_dict(),
                    model_dir + "basnet_bsi_itr_%d_train_%3f_tar_%3f.pth" %
                    (ite_num, running_loss / ite_num4val,
                     running_tar_loss / ite_num4val))
                running_loss = 0.0
                running_tar_loss = 0.0
                net.train()  # resume train
                ite_num4val = 0

    print('-------------Congratulations! Training Done!!!-------------')
Beispiel #30
0
def train():
    if os.name == 'nt':
        data_dir = 'C:/Users/marky/Documents/Courses/saliency/datasets/DUTS/'
    else:
        data_dir = os.getenv(
            "HOME") + '/Documents/Courses/EE298-CV/finalproj/datasets/DUTS/'
    tra_image_dir = 'DUTS-TR/DUTS-TR-Image/'
    tra_label_dir = 'DUTS-TR/DUTS-TR-Mask/'
    test_image_dir = 'DUTS-TE/DUTS-TE-Image/'
    test_label_dir = 'DUTS-TE/DUTS-TE-Mask/'

    image_ext = '.jpg'
    label_ext = '.png'

    model_dir = "./saved_models/basnet_bsi_aug/"
    resume_train = False
    resume_model_path = model_dir + "basnet_bsi_epoch_81_itr_106839_train_1.511335_tar_0.098392.pth"
    last_epoch = 1
    epoch_num = 100000
    batch_size_train = 8
    batch_size_val = 1
    train_num = 0
    val_num = 0
    enableInpaintAug = False
    device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")

    # ------- 5. training process --------
    print("---start training...")
    test_increments = 6250
    ite_num = 0
    running_loss = 0.0
    running_tar_loss = 0.0
    ite_num4val = 1
    next_test = ite_num + 0
    visdom_tab_title = "StructArchWithoutStructImgs(WithHFlip)"
    ############
    ############
    ############
    ############

    tra_img_name_list = glob.glob(data_dir + tra_image_dir + '*' + image_ext)
    print("data_dir + tra_image_dir + '*' + image_ext: ",
          data_dir + tra_image_dir + '*' + image_ext)
    test_img_name_list = glob.glob(data_dir + test_image_dir + '*' + image_ext)
    print("data_dir + test_image_dir + '*' + image_ext: ",
          data_dir + test_image_dir + '*' + image_ext)

    tra_lbl_name_list = []
    for img_path in tra_img_name_list:
        img_name = img_path.split("/")[-1]
        aaa = img_name.split(".")
        bbb = aaa[0:-1]
        imidx = bbb[0]
        for i in range(1, len(bbb)):
            imidx = imidx + "." + bbb[i]
        tra_lbl_name_list.append(data_dir + tra_label_dir + imidx + label_ext)
    test_lbl_name_list = []
    for img_path in test_img_name_list:
        img_name = img_path.split("/")[-1]
        aaa = img_name.split(".")
        bbb = aaa[0:-1]
        imidx = bbb[0]
        for i in range(1, len(bbb)):
            imidx = imidx + "." + bbb[i]
        test_lbl_name_list.append(data_dir + test_label_dir + imidx +
                                  label_ext)

    print("---")
    print("train images: ", len(tra_img_name_list))
    print("train labels: ", len(tra_lbl_name_list))
    print("---")

    print("---")
    print("test images: ", len(test_img_name_list))
    print("test labels: ", len(test_lbl_name_list))
    print("---")

    train_num = len(tra_img_name_list)
    test_num = len(test_img_name_list)
    salobj_dataset = SalObjDataset(img_name_list=tra_img_name_list,
                                   lbl_name_list=tra_lbl_name_list,
                                   transform=transforms.Compose([
                                       RescaleT(256),
                                       RandomCrop(224),
                                       ToTensorLab(flag=0)
                                   ]),
                                   category="train",
                                   enableInpaintAug=enableInpaintAug)
    salobj_dataset_test = SalObjDataset(img_name_list=test_img_name_list,
                                        lbl_name_list=test_lbl_name_list,
                                        transform=transforms.Compose([
                                            RescaleT(256),
                                            RandomCrop(224),
                                            ToTensorLab(flag=0)
                                        ]),
                                        category="test",
                                        enableInpaintAug=enableInpaintAug)
    salobj_dataloader = DataLoader(salobj_dataset,
                                   batch_size=batch_size_train,
                                   shuffle=True,
                                   num_workers=1)
    salobj_dataloader_test = DataLoader(salobj_dataset_test,
                                        batch_size=batch_size_val,
                                        shuffle=True,
                                        num_workers=1)

    # ------- 3. define model --------
    # define the net
    net = BASNet(3, 1)
    if resume_train:
        # print("resume_model_path:", resume_model_path)
        checkpoint = torch.load(resume_model_path)
        net.load_state_dict(checkpoint)
    if torch.cuda.is_available():
        net.to(device)

    # ------- 4. define optimizer --------
    print("---define optimizer...")
    optimizer = optim.Adam(net.parameters(),
                           lr=0.001,
                           betas=(0.9, 0.999),
                           eps=1e-08,
                           weight_decay=0)

    plotter = VisdomLinePlotter(env_name=visdom_tab_title)

    best_ave_mae = 100000
    best_max_fmeasure = 0
    best_relaxed_fmeasure = 0
    best_ave_maxf = 0
    best_own_RelaxedFmeasure = 0
    for epoch in range(last_epoch - 1, epoch_num):
        ### Train network
        train_loss0 = AverageMeter()
        train_loss1 = AverageMeter()
        train_loss2 = AverageMeter()
        train_loss3 = AverageMeter()
        train_loss4 = AverageMeter()
        train_loss5 = AverageMeter()
        train_loss6 = AverageMeter()
        train_loss7 = AverageMeter()
        train_struct_loss1 = AverageMeter()
        train_struct_loss2 = AverageMeter()
        train_struct_loss3 = AverageMeter()
        train_struct_loss4 = AverageMeter()
        train_struct_loss5 = AverageMeter()
        train_struct_loss6 = AverageMeter()
        train_struct_loss7 = AverageMeter()

        test_loss0 = AverageMeter()
        test_loss1 = AverageMeter()
        test_loss2 = AverageMeter()
        test_loss3 = AverageMeter()
        test_loss4 = AverageMeter()
        test_loss5 = AverageMeter()
        test_loss6 = AverageMeter()
        test_loss7 = AverageMeter()
        test_struct_loss1 = AverageMeter()
        test_struct_loss2 = AverageMeter()
        test_struct_loss3 = AverageMeter()
        test_struct_loss4 = AverageMeter()
        test_struct_loss5 = AverageMeter()
        test_struct_loss6 = AverageMeter()
        test_struct_loss7 = AverageMeter()

        average_mae = AverageMeter()
        average_maxf = AverageMeter()
        average_relaxedf = AverageMeter()
        average_own_RelaxedFMeasure = AverageMeter()
        net.train()
        for i, data in enumerate(salobj_dataloader):
            ite_num = ite_num + 1
            ite_num4val = ite_num4val + 1
            inputs, labels, labels_struct = data['image'], data['label'], data[
                'label2']

            inputs = inputs.type(torch.FloatTensor)
            labels = labels.type(torch.FloatTensor)
            labels_struct = labels_struct.type(torch.FloatTensor)

            # wrap them in Variable
            if torch.cuda.is_available():
                inputs_v, labels_v, labels_struct_v = Variable(
                    inputs.to(device), requires_grad=False), Variable(
                        labels.to(device), requires_grad=False), Variable(
                            labels_struct.to(device), requires_grad=False)
            else:
                inputs_v, labels_v, labels_struct_v = Variable(
                    inputs, requires_grad=False), Variable(
                        labels,
                        requires_grad=False), Variable(labels_struct,
                                                       requires_grad=False)

            # y zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            d0, d1, d2, d3, d4, d5, d6, d7, d1_struct, d2_struct, d3_struct, d4_struct, d5_struct, d6_struct, d7_struct = net(
                inputs_v)
            loss2, loss = muti_bce_loss_fusion(
                d0, d1, d2, d3, d4, d5, d6, d7, d1_struct, d2_struct,
                d3_struct, d4_struct, d5_struct, d6_struct, d7_struct,
                labels_v, train_loss0, train_loss1, train_loss2, train_loss3,
                train_loss4, train_loss5, train_loss6, train_loss7,
                train_struct_loss1, train_struct_loss2, train_struct_loss3,
                train_struct_loss4, train_struct_loss5, train_struct_loss6,
                train_struct_loss7)
            loss.backward()
            optimizer.step()

            # # print statistics
            running_loss += loss.data
            running_tar_loss += loss2.data

            # del temporary outputs and loss
            del d0, d1, d2, d3, d4, d5, d6, d7, d1_struct, d2_struct, d3_struct, d4_struct, d5_struct, d6_struct, d7_struct, loss2, loss

            print(
                "[train epoch: %3d/%3d, batch: %5d/%5d, ite: %d] train loss: %3f, tar: %3f "
                % (epoch + 1, epoch_num,
                   (i + 1) * batch_size_train, train_num, ite_num,
                   running_loss / ite_num4val, running_tar_loss / ite_num4val))
        plotter.plot('loss0', 'train', 'Main Loss 0', epoch + 1,
                     float(train_loss0.avg))
        plotter.plot('loss1', 'train', 'Main Loss 1', epoch + 1,
                     float(train_loss1.avg))
        plotter.plot('loss2', 'train', 'Main Loss 2', epoch + 1,
                     float(train_loss2.avg))
        plotter.plot('loss3', 'train', 'Main Loss 3', epoch + 1,
                     float(train_loss3.avg))
        plotter.plot('loss4', 'train', 'Main Loss 4', epoch + 1,
                     float(train_loss4.avg))
        plotter.plot('loss5', 'train', 'Main Loss 5', epoch + 1,
                     float(train_loss5.avg))
        plotter.plot('loss6', 'train', 'Main Loss 6', epoch + 1,
                     float(train_loss6.avg))
        plotter.plot('loss7', 'train', 'Main Loss 7', epoch + 1,
                     float(train_loss7.avg))
        plotter.plot('structloss1', 'train', 'Struct Loss 1', epoch + 1,
                     float(train_struct_loss1.avg))
        plotter.plot('structloss2', 'train', 'Struct Loss 2', epoch + 1,
                     float(train_struct_loss2.avg))
        plotter.plot('structloss3', 'train', 'Struct Loss 3', epoch + 1,
                     float(train_struct_loss3.avg))
        plotter.plot('structloss4', 'train', 'Struct Loss 4', epoch + 1,
                     float(train_struct_loss4.avg))
        plotter.plot('structloss5', 'train', 'Struct Loss 5', epoch + 1,
                     float(train_struct_loss5.avg))
        plotter.plot('structloss6', 'train', 'Struct Loss 6', epoch + 1,
                     float(train_struct_loss6.avg))
        plotter.plot('structloss7', 'train', 'Struct Loss 7', epoch + 1,
                     float(train_struct_loss7.avg))

        ### Validate model
        print("---Evaluate model---")
        if ite_num >= next_test:  # test and save model 10000 iterations, due to very large DUTS-TE dataset
            next_test = ite_num + test_increments
            net.eval()
            max_epoch_fmeasure = 0
            for i, data in enumerate(salobj_dataloader_test):
                inputs, labels = data['image'], data['label']
                inputs = inputs.type(torch.FloatTensor)
                labels = labels.type(torch.FloatTensor)
                if torch.cuda.is_available():
                    inputs_v, labels_v = Variable(
                        inputs.to(device),
                        requires_grad=False), Variable(labels.to(device),
                                                       requires_grad=False)
                else:
                    inputs_v, labels_v = Variable(
                        inputs,
                        requires_grad=False), Variable(labels,
                                                       requires_grad=False)
                d0, d1, d2, d3, d4, d5, d6, d7, d1_struct, d2_struct, d3_struct, d4_struct, d5_struct, d6_struct, d7_struct = net(
                    inputs_v)

                pred = d0[:, 0, :, :]
                pred = normPRED(pred)
                pred = pred.squeeze()
                predict_np = pred.cpu().data.numpy()
                im = Image.fromarray(predict_np * 255).convert('RGB')
                img_name = test_img_name_list[i]
                image = cv2.imread(img_name)
                imo = im.resize((image.shape[1], image.shape[0]),
                                resample=Image.BILINEAR)
                imo = imo.convert("L")  ###  Convert to grayscale 1-channel
                resizedImg_np = np.array(
                    imo)  ### Result is 2D numpy array predicted salient map
                img__lbl_name = test_lbl_name_list[i]
                gt_img = np.array(Image.open(img__lbl_name).convert(
                    "L"))  ### Ground truth salient map

                ### Compute metrics
                result_mae = getMAE(gt_img, resizedImg_np)
                average_mae.update(result_mae, 1)
                precision, recall = getPRCurve(gt_img, resizedImg_np)
                result_maxfmeasure = getMaxFMeasure(precision, recall)
                result_maxfmeasure = result_maxfmeasure.mean()
                average_maxf.update(result_maxfmeasure, 1)
                if (result_maxfmeasure > max_epoch_fmeasure):
                    max_epoch_fmeasure = result_maxfmeasure
                result_relaxedfmeasure = getRelaxedFMeasure(
                    gt_img, resizedImg_np)
                result_ownrelaxedfmeasure = own_RelaxedFMeasure(
                    gt_img, resizedImg_np)
                average_relaxedf.update(result_relaxedfmeasure, 1)
                average_own_RelaxedFMeasure.update(result_ownrelaxedfmeasure,
                                                   1)
                loss2, loss = muti_bce_loss_fusion(
                    d0, d1, d2, d3, d4, d5, d6, d7, d1_struct, d2_struct,
                    d3_struct, d4_struct, d5_struct, d6_struct, d7_struct,
                    labels_v, test_loss0, test_loss1, test_loss2, test_loss3,
                    test_loss4, test_loss5, test_loss6, test_loss7,
                    test_struct_loss1, test_struct_loss2, test_struct_loss3,
                    test_struct_loss4, test_struct_loss5, test_struct_loss6,
                    test_struct_loss7)
                del d0, d1, d2, d3, d4, d5, d6, d7, d1_struct, d2_struct, d3_struct, d4_struct, d5_struct, d6_struct, d7_struct, loss2, loss
                print(
                    "[test epoch: %3d/%3d, batch: %5d/%5d, ite: %d] test loss: %3f, tar: %3f "
                    % (epoch + 1, epoch_num, (i + 1) * batch_size_val,
                       test_num, ite_num, running_loss / ite_num4val,
                       running_tar_loss / ite_num4val))
            model_name = model_dir + "basnet_bsi_epoch_%d_itr_%d_train_%3f_tar_%3f.pth" % (
                epoch + 1, ite_num, running_loss / ite_num4val,
                running_tar_loss / ite_num4val)
            torch.save(net.state_dict(), model_name)
            running_loss = 0.0
            running_tar_loss = 0.0
            net.train()  # resume train
            ite_num4val = 1
            if (average_mae.avg < best_ave_mae):
                best_ave_mae = average_mae.avg
                newname = model_dir + "bestMAE/basnet_bsi_epoch_%d_itr_%d_train_%3f_tar_%3f_mae_%3f.pth" % (
                    epoch + 1, ite_num, running_loss / ite_num4val,
                    running_tar_loss / ite_num4val, best_ave_mae)
                fold_dir = newname.rsplit("/", 1)
                if not os.path.isdir(fold_dir[0]): os.mkdir(fold_dir[0])
                copyfile(model_name, newname)
            if (max_epoch_fmeasure > best_max_fmeasure):
                best_max_fmeasure = max_epoch_fmeasure
                newname = model_dir + "bestEpochMaxF/basnet_bsi_epoch_%d_itr_%d_train_%3f_tar_%3f_maxfmeas_%3f.pth" % (
                    epoch + 1, ite_num, running_loss / ite_num4val,
                    running_tar_loss / ite_num4val, best_max_fmeasure)
                fold_dir = newname.rsplit("/", 1)
                if not os.path.isdir(fold_dir[0]): os.mkdir(fold_dir[0])
                copyfile(model_name, newname)
            if (average_maxf.avg > best_ave_maxf):
                best_ave_maxf = average_maxf.avg
                newname = model_dir + "bestAveMaxF/basnet_bsi_epoch_%d_itr_%d_train_%3f_tar_%3f_avemfmeas_%3f.pth" % (
                    epoch + 1, ite_num, running_loss / ite_num4val,
                    running_tar_loss / ite_num4val, best_ave_maxf)
                fold_dir = newname.rsplit("/", 1)
                if not os.path.isdir(fold_dir[0]): os.mkdir(fold_dir[0])
                copyfile(model_name, newname)
            if (average_relaxedf.avg > best_relaxed_fmeasure):
                best_relaxed_fmeasure = average_relaxedf.avg
                newname = model_dir + "bestAveRelaxF/basnet_bsi_epoch_%d_itr_%d_train_%3f_tar_%3f_averelaxfmeas_%3f.pth" % (
                    epoch + 1, ite_num, running_loss / ite_num4val,
                    running_tar_loss / ite_num4val, best_relaxed_fmeasure)
                fold_dir = newname.rsplit("/", 1)
                if not os.path.isdir(fold_dir[0]): os.mkdir(fold_dir[0])
                copyfile(model_name, newname)
            if (average_own_RelaxedFMeasure.avg > best_own_RelaxedFmeasure):
                best_own_RelaxedFmeasure = average_own_RelaxedFMeasure.avg
                newname = model_dir + "bestOwnRelaxedF/basnet_bsi_epoch_%d_itr_%d_train_%3f_tar_%3f_averelaxfmeas_%3f.pth" % (
                    epoch + 1, ite_num, running_loss / ite_num4val,
                    running_tar_loss / ite_num4val, best_own_RelaxedFmeasure)
                fold_dir = newname.rsplit("/", 1)
                if not os.path.isdir(fold_dir[0]): os.mkdir(fold_dir[0])
                copyfile(model_name, newname)
            plotter.plot('loss0', 'test', 'Main Loss 0', epoch + 1,
                         float(test_loss0.avg))
            plotter.plot('loss1', 'test', 'Main Loss 1', epoch + 1,
                         float(test_loss1.avg))
            plotter.plot('loss2', 'test', 'Main Loss 2', epoch + 1,
                         float(test_loss2.avg))
            plotter.plot('loss3', 'test', 'Main Loss 3', epoch + 1,
                         float(test_loss3.avg))
            plotter.plot('loss4', 'test', 'Main Loss 4', epoch + 1,
                         float(test_loss4.avg))
            plotter.plot('loss5', 'test', 'Main Loss 5', epoch + 1,
                         float(test_loss5.avg))
            plotter.plot('loss6', 'test', 'Main Loss 6', epoch + 1,
                         float(test_loss6.avg))
            plotter.plot('loss7', 'test', 'Main Loss 7', epoch + 1,
                         float(test_loss7.avg))
            plotter.plot('structloss1', 'test', 'Struct Loss 1', epoch + 1,
                         float(test_struct_loss1.avg))
            plotter.plot('structloss2', 'test', 'Struct Loss 2', epoch + 1,
                         float(test_struct_loss2.avg))
            plotter.plot('structloss3', 'test', 'Struct Loss 3', epoch + 1,
                         float(test_struct_loss3.avg))
            plotter.plot('structloss4', 'test', 'Struct Loss 4', epoch + 1,
                         float(test_struct_loss4.avg))
            plotter.plot('structloss5', 'test', 'Struct Loss 5', epoch + 1,
                         float(test_struct_loss5.avg))
            plotter.plot('structloss6', 'test', 'Struct Loss 6', epoch + 1,
                         float(test_struct_loss6.avg))
            plotter.plot('structloss7', 'test', 'Struct Loss 7', epoch + 1,
                         float(test_struct_loss7.avg))
            plotter.plot('mae', 'test', 'Average Epoch MAE', epoch + 1,
                         float(average_mae.avg))
            plotter.plot('max_maxf', 'test', 'Max Max Epoch F-Measure',
                         epoch + 1, float(max_epoch_fmeasure))
            plotter.plot('ave_maxf', 'test', 'Average Max F-Measure',
                         epoch + 1, float(average_maxf.avg))
            plotter.plot('ave_relaxedf', 'test', 'Average Relaxed F-Measure',
                         epoch + 1, float(average_relaxedf.avg))
            plotter.plot('own_RelaxedFMeasure', 'test',
                         'Own Average Relaxed F-Measure', epoch + 1,
                         float(average_own_RelaxedFMeasure.avg))
    print('-------------Congratulations! Training Done!!!-------------')