Ejemplo n.º 1
0
                        help='number of workers for dataloader')
    parser.add_argument('-b',
                        type=int,
                        default=16,
                        help='batch size for dataloader')
    parser.add_argument('-s',
                        type=bool,
                        default=True,
                        help='whether shuffle the dataset')

    args_dict = vars(parser.parse_args())
    logger.info(args_dict)

    net_type = args_dict['net']
    use_gpu = args_dict['gpu']
    net = build_network(archi=net_type, use_gpu=use_gpu)
    # logger.info(net)

    net.load_state_dict(torch.load(args_dict['weights']), args_dict['gpu'])
    net.eval()

    example_image_dir = '../datasets/Image_Search_Dataset/train'
    dataset = datasets.ImageFolder(example_image_dir, transform=None)
    idx_to_class = {v: k for k, v in dataset.class_to_idx.items()}

    original_path = "../datasets/Image_Search_Dataset/train"
    list_classes = next(os.walk(original_path))[1]
    if os.path.isfile('lsh.p'):
        logger.info("load indexed dict")
        lsh = pickle.load(open('lsh.p', 'rb'))
        feature_dict = pickle.load(open('feature_dict.p', 'rb'))
Ejemplo n.º 2
0
    parser.add_argument('-s',
                        type=bool,
                        default=True,
                        help='whether shuffle the dataset')
    args = parser.parse_args()
    args_dict = vars(parser.parse_args())
    logger.info(args_dict)

    ###         Initialize the model
    net_type = args_dict['net']
    use_gpu = args_dict['gpu']
    standard_folder = global_settings.TRAIN_FOLDER
    list_author = next(os.walk(standard_folder))[1]
    num_classes = len(list_author)
    net = build_network(archi=net_type,
                        use_gpu=use_gpu,
                        num_classes=num_classes)
    net.cuda()

    # net_type = args_dict['net']
    # use_gpu = args_dict['gpu']
    # standard_folder = global_settings.TRAIN_FOLDER
    # list_author = next(os.walk(standard_folder))[1]
    # num_class = len(list_author)
    # net = build_network(archi = net_type, use_gpu=use_gpu)  # , num_class=num_class
    logger.info(net)

    net.load_state_dict(torch.load(args.weights), args.gpu)
    net.eval()

    test_image_dir = global_settings.TEST_FOLDER
Ejemplo n.º 3
0
    ###         Datasets loader
    stamp_training_loader = get_training_dataloader(global_settings.TRAIN_MEAN,
                                                    global_settings.TRAIN_STD,
                                                    num_workers=args.w,
                                                    batch_size=args.b,
                                                    shuffle=args.s)
    stamp_test_loader, idx_to_class = get_test_dataloader(
        global_settings.TRAIN_MEAN,
        global_settings.TRAIN_STD,
        num_workers=args.w,
        batch_size=args.b,
        shuffle=args.s)

    ###         Initialize the model
    net = build_network(args)
    # net_type = args_dict['net']
    # use_gpu = args_dict['gpu']
    # standard_folder = global_settings.TRAIN_FOLDER
    # list_author = next(os.walk(standard_folder))[1]
    # num_classes = len(list_author)
    # net = build_network(archi=net_type, use_gpu=use_gpu, num_classes=num_classes)

    ###         Optimizer & compute loss function
    loss_function = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=5e-4)

    #   Log infor modularity to Pytorch models & optimizer