Ejemplo n.º 1
0
def main():
    global args
    args = parser.parse_args()

    print("=> using pre-trained model '{}'".format(args.arch))
    model = convnets.factory({'arch': args.arch},
                             cuda=True,
                             data_parallel=True)

    extract_name = 'arch,{}_size,{}'.format(args.arch, args.size)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    if args.dataset == 'coco':
        if 'coco' not in args.dir_data:
            raise ValueError('"coco" string not in dir_data')
        dataset = datasets.COCOImages(args.data_split,
                                      dict(dir=args.dir_data),
                                      transform=transforms.Compose([
                                          transforms.Scale(args.size),
                                          transforms.CenterCrop(args.size),
                                          transforms.ToTensor(),
                                          normalize,
                                      ]))
    elif args.dataset == 'vgenome':
        if args.data_split != 'train':
            raise ValueError('train split is required for vgenome')
        if 'vgenome' not in args.dir_data:
            raise ValueError('"vgenome" string not in dir_data')
        dataset = datasets.VisualGenomeImages(args.data_split,
                                              dict(dir=args.dir_data),
                                              transform=transforms.Compose([
                                                  transforms.Scale(args.size),
                                                  transforms.CenterCrop(
                                                      args.size),
                                                  transforms.ToTensor(),
                                                  normalize,
                                              ]))

    data_loader = DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.workers,
                             pin_memory=True)

    dir_extract = os.path.join(args.dir_data, 'extract', extract_name)
    path_file = os.path.join(dir_extract, args.data_split + 'set')
    os.system('mkdir -p ' + dir_extract)

    extract(data_loader, model, path_file, args.mode)
Ejemplo n.º 2
0
def main():
    global args
    args = parser.parse_args()

    print("=> using pre-trained model '{}'".format(args.arch))
    # model = convnets.factory({'arch':}, cuda=True, data_parallel=True)

    # model = convnets.factory({'arch':args.arch}, cuda=True, data_parallel=True)
    # model = convnets.factory({'arch':'resnet18'}, cuda=False, data_parallel=False)

    # if debug:
    if args.dataset == "idrid":
        model = convnets_idrid.factory({'arch': args.arch},
                                       cuda=True,
                                       data_parallel=True)
    elif args.dataset == "tools":
        model = convnets_tools.factory({'arch': args.arch},
                                       cuda=True,
                                       data_parallel=True)
    elif args.dataset == "breast":
        model = convnets_breast.factory({'arch': args.arch},
                                        cuda=True,
                                        data_parallel=True)
    elif args.dataset == "med":
        model = convnets_med.factory({'arch': args.arch},
                                     cuda=True,
                                     data_parallel=True)

    extract_name = 'arch,{}_size,{}'.format(args.arch, args.size)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    if args.dataset == 'coco':
        if 'coco' not in args.dir_data:
            raise ValueError('"coco" string not in dir_data')
        dataset = datasets.COCOImages(args.data_split,
                                      dict(dir=args.dir_data),
                                      transform=transforms.Compose([
                                          transforms.Resize(args.size),
                                          transforms.CenterCrop(args.size),
                                          transforms.ToTensor()
                                      ]))
    elif args.dataset == 'vgenome':
        if args.data_split != 'train':
            raise ValueError('train split is required for vgenome')
        if 'vgenome' not in args.dir_data:
            raise ValueError('"vgenome" string not in dir_data')
        dataset = datasets.VisualGenomeImages(args.data_split,
                                              dict(dir=args.dir_data),
                                              transform=transforms.Compose([
                                                  transforms.Resize(args.size),
                                                  transforms.CenterCrop(
                                                      args.size),
                                                  transforms.ToTensor(),
                                                  normalize,
                                              ]))
    elif args.dataset == 'idrid':
        dataset = datasets.IDRIDImages(args.data_split,
                                       dict(dir=args.dir_data),
                                       transform=transforms.Compose([
                                           transforms.Resize(args.size),
                                           transforms.CenterCrop(args.size),
                                           transforms.ToTensor(),
                                           normalize,
                                       ]))
    elif args.dataset == 'tools':
        dataset = datasets.TOOLSImages(args.data_split,
                                       dict(dir=args.dir_data),
                                       transform=transforms.Compose([
                                           transforms.Resize(args.size),
                                           transforms.CenterCrop(args.size),
                                           transforms.ToTensor(),
                                           normalize,
                                       ]))
    elif args.dataset == 'breast':
        dataset = datasets.BREASTImages(args.data_split,
                                        dict(dir=args.dir_data),
                                        transform=transforms.Compose([
                                            transforms.Resize(args.size),
                                            transforms.CenterCrop(args.size),
                                            transforms.ToTensor(),
                                            normalize,
                                        ]))
    elif args.dataset == 'med':
        if gen_utils.str2bool(args.is_augment_image):
            transform = transforms.Compose([
                transforms.Resize(args.size),
                # transforms.CenterCrop(args.size),
                # transforms.RandomHorizontalFlip(),
                transforms.RandomRotation(degrees=(-20, 20)),
                augment_utils.PowerPILMed(),
                transforms.ToTensor(),
                normalize,
            ])
        else:
            transform = transforms.Compose([
                transforms.Resize(args.size),
                transforms.CenterCrop(args.size),
                transforms.ToTensor(),
                normalize,
            ])
        dataset = datasets.MEDImages(args.data_split,
                                     dict(dir=args.dir_data),
                                     transform=transform)

    data_loader = DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.workers,
                             pin_memory=True)

    dir_extract = os.path.join(args.dir_data, 'extract', extract_name)
    path_file = os.path.join(dir_extract, args.data_split + 'set')
    os.system('mkdir -p ' + dir_extract)

    # if args.dataset == "med":
    #     extract_med(data_loader, model, path_file, args.mode)
    # else:
    extract(data_loader, model, path_file, args.mode, args.is_augment_image)