Example #1
0
 parser.add_argument('-in_images',
                     type=str,
                     default='Contents/Images/',
                     help='input images directory')
 parser.add_argument('-alpha',
                     type=str,
                     default='Contents/Dataset/Alpha/',
                     help='images path')
 parser.add_argument('-fx',
                     type=float,
                     default=1,
                     help='output size in x axis')
 parser.add_argument('-fy',
                     type=float,
                     default=1,
                     help='output size in y axis')
 args = parser.parse_args()
 for directory in [args.alpha]:
     if not os.path.exists(directory):
         os.makedirs(directory)
 images_loader = ImagesLoader(args.in_images)
 images_loader = ImageResizer(images_loader, args.fx, args.fy)
 mog = cv.bgsegm.createBackgroundSubtractorMOG()
 index = 0
 for image in images_loader:
     image = mog.apply(image)
     outputname = os.path.join(args.alpha,
                               str(index).rjust(4, '0') + '.png')
     cv.imwrite(outputname, image)
     index += 1
     print("Processed " + str(index) + " out of " + str(len(images_loader)))
Example #2
0
                        type=int,
                        default=4,
                        help='number of filters in first layer')
    parser.add_argument('-model',
                        type=str,
                        default='Contents/Models/munet_sub.model',
                        help='path of segnet model')
    parser.add_argument('-epochs',
                        type=int,
                        default=100,
                        help='number of epochs')
    parser.add_argument('-batch', type=int, default=2, help='batch size')
    parser.add_argument('-lr', type=float, default=0.001, help='learning rate')
    args = parser.parse_args()

    images_loader = ImagesLoader(args.images)
    images_loader = ImageConverter(images_loader)
    alpha_loader = ImagesLoader(args.alpha, flag=cv.IMREAD_GRAYSCALE)
    alpha_loader = ImageConverter(alpha_loader)
    input_loader = Numpy((images_loader, alpha_loader))
    mask_loader = NumpyLoader(args.masks)
    loader = Loaders((input_loader, mask_loader))
    train_loader, test_loader = divide_dataset(loader,
                                               ratio=args.ratio,
                                               seed=args.seed,
                                               shuffle=True)
    train_loader = DataLoader(train_loader, args.batch, True, num_workers=4)
    test_loader = DataLoader(test_loader, args.batch, False, num_workers=4)
    classes = getClasses(args.classes)
    weights = None
    loaded = False
Example #3
0
    from Loaders.Concatinators import Numpy
    from functions import matrices_to_images
    from Loaders.functions import concat_loaders
    parser = argparse.ArgumentParser()
    parser.add_argument('-in1',
                        type=str,
                        default='Contents/_Dataset/vLabels/',
                        help='first input mask')
    parser.add_argument('-in2',
                        type=str,
                        default='Contents/_Dataset/Alpha/',
                        help='second input mask')
    parser.add_argument('-out',
                        type=str,
                        default='Contents/_Dataset/AND/',
                        help='output')
    args = parser.parse_args()

    if not os.path.exists(args.out):
        os.makedirs(args.out)
    masks1 = ImagesLoader(args.in1, flag=cv.IMREAD_GRAYSCALE)
    masks2 = ImagesLoader(args.in2, flag=cv.IMREAD_GRAYSCALE)
    masks = concat_loaders(masks1, masks2)
    index = 0
    for mask1, mask2 in masks:
        mask = cv.bitwise_and(mask1, mask2)
        outputname = os.path.join(args.out, str(index).rjust(4, '0') + '.png')
        cv.imwrite(outputname, mask)
        print(outputname)
        index += 1
Example #4
0
                     type=str,
                     default='Contents/Dataset/weights.npy',
                     help='weights of each class')
 parser.add_argument('-fx',
                     type=float,
                     default=1,
                     help='output size in x axis')
 parser.add_argument('-fy',
                     type=float,
                     default=1,
                     help='output size in y axis')
 args = parser.parse_args()
 for directory in [args.masks, args.labels]:
     if not os.path.exists(directory):
         os.makedirs(directory)
 masks_loader = ImagesLoader(args.in_masks)
 masks_loader = ImageResizer(masks_loader, args.fx, args.fy)
 classes = getClasses(args.classes)
 batch_loader = DataLoader(masks_loader, args.batch, False, num_workers=4)
 weights = np.zeros(len(classes))
 index = 0
 for masks in batch_loader:
     masks = torch.ByteTensor(masks).to(args.device)
     matrices, _weights = images_to_matrices(masks, classes, args.device)
     masks = matrices_to_images(matrices, classes, args.device)
     weights += _weights
     for i in range(len(masks)):
         outputname = os.path.join(args.masks,
                                   str(index).rjust(4, '0') + '.npy')
         np.save(outputname, matrices[i].cpu().numpy())
         outputname = os.path.join(args.labels,
Example #5
0
             os.path.join(parent_dir, _directory)
             for _directory in os.listdir(parent_dir)
     ]:
         images = os.path.join(directory, 'images')
         masks = os.path.join(directory, 'masks')
         assert os.path.isdir(images) == os.path.isdir(masks)
         if os.path.isdir(images) and os.path.isdir(masks):
             directories.append((images, masks))
 print(directories)
 classes = getClasses(args.classes)
 weights = np.zeros(len(classes))
 uindex = 0
 for images_dir, labels_dir in directories:
     print((images_dir, labels_dir))
     index = 0
     images_loader = ImagesLoader(images_dir)
     images_loader = ImageResizer(images_loader, args.fx, args.fy)
     masks_loader = ImagesLoader(labels_dir)
     masks_loader = ImageResizer(masks_loader, args.fx, args.fy)
     loader = concat_loaders(images_loader, masks_loader)
     batch_loader = DataLoader(loader, args.batch, False, num_workers=4)
     mog = cv.bgsegm.createBackgroundSubtractorMOG()
     for images, masks in batch_loader:
         masks = torch.ByteTensor(masks).to(args.device)
         matrices, _weights = images_to_matrices(masks, classes,
                                                 args.device)
         masks = matrices_to_images(matrices, classes, args.device)
         weights += _weights
         for i in range(len(images)):
             image = images[i].cpu().numpy()
             mogimg = mog.apply(image)