Esempio n. 1
0
 def __init__(self, username):
     self.driver = config_driver()
     self.username = str(username)
     self.name = 'douban_' + self.username
     config_log(self.name)
     load_cookies(self.driver, self.name)
     self.posts = douban['posts'].get(self.username)
     self.up_posts()
Esempio n. 2
0
 def __init__(self, username):
     self.driver = config_driver()
     self.post_count = hupu['posts_count']
     self.user_id = hupu['user_id']
     self.commentaries = hupu['commentaries']
     self.comment_count = 0
     self.posts = Queue()
     self.max_error_num = 5
     self.username = str(username)
     self.name = 'hupu_' + self.username
     config_log(self.name)
     load_cookies(self.driver, self.name)
     self.comment_posts()
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description=textwrap.dedent(
            """\
            Fashion Hash Net Training Script
            --------------------------------
            Actions:
                1. train: train fashion net.
                2. evaluate: evaluate NDCG and accuracy.
                3. retrieval: retrieval for items.
                """
        ),
    )
    actions = ACTION_FUNS.keys()
    parser.add_argument("action", help="|".join(sorted(actions)))
    parser.add_argument("--cfg", help="configuration file.")
    args = parser.parse_args()
    with open(args.cfg, "r") as f:
        kwargs = yaml.load(f, Loader=yaml.FullLoader)
    config = polyvore.param.FashionParam(**kwargs)
    # config.add_timestamp()
    logfile = utils.config_log(stream_level=config.log_level, log_file=config.log_file)
    LOGGER.info("Logging to file %s", logfile)
    LOGGER.info("Fashion param : %s", config)

    if args.action in actions:
        ACTION_FUNS[args.action](config)
        exit(0)
    else:
        LOGGER.info("Action %s is not in %s", args.action, "|".join(actions))
        exit(1)
Esempio n. 4
0
    # sequences = test_set.get_all_sequences()
    # logger.debug("Sequences {}".format(sequences))
    # logger.debug("Test words {}".format(test_set.wordlist))
    # logger.debug("Sentences {}".format(test_set._load_sentence_word_indices()))

    sentences = test_set._load_sentence_word_indices()
    sentences_indexes = []
    for k, v in sentences.items():
        sentences_indexes.append(v)
    sentences_indexes

    # for test_word_index, test_word in enumerate(test_set.wordlist):
    # for test_word_index in range(test_set.num_items):
    for sentence_indexes in sentences_indexes:
        sentence_score = get_sentence_score(sentence_indexes, models, test_set, probabilities, guesses, alpha_start, alpha_transition)
        logger.debug("Sentence indexes {}".format(sentence_indexes))
        logger.debug("Sentence score {}".format(sentence_score))
    return probabilities, guesses

if __name__ == "__main__":
    from  asl_test_recognizer import TestRecognize
    from utils import config_log

    config_log()
    logger = logging.getLogger('recognizer')
    test_model = TestRecognize()
    test_model.setUp()
    test_model.test_recognize_probabilities_interface()
    test_model.test_recognize_guesses_interface()

Esempio n. 5
0
set_per_sub = 3  # number of image set per subject within a batch
pooling_type = 'NAN'
if feat_type == 'casianet':
    feat_dim = 320
else:
    feat_dim = 512

max_iter = 51000
test_iter = 50

save_dir = './data/IJBA/model/model_{}_s{}_{}'.format(feat_type, set_size,
                                                      pooling_type)
if not os.path.exists(save_dir):
    os.makedirs(save_dir)

logger = utils.config_log(save_dir,
                          'train_s{}b{}'.format(set_size, batch_size))
meta_dir = './data/IJBA/{}'.format(feat_type)
logger_result = open(os.path.join(save_dir, 'eval_result.txt'), 'w')
NAN_split_tars = np.zeros((10, 4), dtype=np.float32)
AVE_split_tars = np.zeros((10, 4), dtype=np.float32)
for idx_split in range(1, 11):
    # lst_sub_faces = pickle.load(open(os.path.join(meta_dir, 'train_subject_{}.bin'.format(idx_split)), 'rb'))
    label_qry = pickle.load(open('label_qry20200114145205rpc.p', 'rb'))
    label_ref = pickle.load(open('label_ref20200115092149rpc.p', 'rb'))
    feature_qry = torch.load('feature_qry20200114145205rpc.p')
    feature_ref = torch.load('feature_ref20200115092149rpc.p')

    lst_sub_faces = torch.cat((feature_qry, feature_ref), 0)
    label = label_qry + label_ref
    set_size = len(label_ref) // len(set(label_ref))
    count_qry = len(label_qry)
Esempio n. 6
0
num_epoch = parsed.num_epoch
save_dir = parsed.save_dir
train_filelist = parsed.train_filelist
root_dir = parsed.img_root_dir
pre_trained_model = parsed.pre_trained_model
gpus = parsed.gpus
train_batch_size = parsed.train_batch_size
step = parsed.step

input_image_size = 224
num_classes = 10575 - 25
lst_gpu = list(gpus.split(','))
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
if not os.path.exists(save_dir):
    os.makedirs(save_dir)
logger = utils.config_log(log_dir=save_dir,
                          fname='webface_resnet34_coso_train')
logger.info(parsed)

# image loader
train_transforms = read_utils.webface_train_transforms
train_img_loader = ImageFilelist(root_dir=root_dir,
                                 fname=train_filelist,
                                 transform=train_transforms)
train_loader = torch.utils.data.DataLoader(train_img_loader,
                                           batch_size=train_batch_size,
                                           shuffle=True,
                                           num_workers=12)

# network
conv_net, spd_net = resnet.resnet34_cov(num_classes=num_classes,
                                        norm_type='T',
Esempio n. 7
0
def create_lmdb(dataset, image_dir):
    LOGGER.info("Creating LMDB to %s", dataset)
    image_list = utils.check.list_files(image_dir)
    env = lmdb.open(dataset, map_size=2**40)
    # open json file
    with env.begin(write=True) as txn:
        for image_name in tqdm(image_list):
            fn = os.path.join(image_dir, image_name)
            with open(fn, "rb") as f:
                img_data = f.read()
                txn.put(image_name.encode("ascii"), img_data)
    env.close()
    LOGGER.info("Converted Polyvore to LDMB")


if __name__ == "__main__":

    LOGGER.info("Log to file %s", utils.config_log())
    parser = argparse.ArgumentParser(description="Create LMDB")
    parser.add_argument(
        "src",
        default="data/polyvore/images/291x291",
        type=str,
        help="image folder for polyvore dataset",
    )
    parser.add_argument("dst", type=str, help="folder to save lmdb")

    args = parser.parse_args()
    create_lmdb(args.dst, args.src)
    exit(0)
num_epoch = parsed.num_epoch
save_dir = parsed.save_dir
train_filelist = parsed.train_filelist
root_dir = parsed.img_root_dir
pre_trained_model = parsed.pre_trained_model
gpus = parsed.gpus
train_batch_size = parsed.train_batch_size
step = parsed.step

input_image_size = 224
num_classes = 10575 - 25

os.environ['CUDA_VISIBLE_DEVICES'] = gpus
if not os.path.exists(save_dir):
    os.makedirs(save_dir)
logger = utils.config_log(log_dir=save_dir, fname='train')
logger.info(parsed)

train_transforms = read_utils.webface_train_transforms
train_img_loader = ImageFilelist(root_dir=root_dir,
                                 fname=train_filelist,
                                 transform=train_transforms)
train_loader = torch.utils.data.DataLoader(train_img_loader,
                                           batch_size=train_batch_size,
                                           shuffle=True,
                                           num_workers=8)

conv_net, spd_net = resnet.resnet34_cov(num_classes=num_classes,
                                        norm_type='T',
                                        num_iter=5)
r_loss = layer_utils.RingLoss(loss_weight=ring_weight)