Exemplo n.º 1
0
def main():

   op = options
   opt = op.parse()
   # initialize train or test working directory
   opt.model_dir = os.path.join("results",opt.name)
   logging.info = ("model directory %s" % opt.model_dir)
   if not os.path.exists(opt.model_dir):
       os.makedirs(opt.model_dir)
   log_dir = opt.model_dir
   log_path = log_dir + "/train.log"
   util.opt2file(opt, log_dir + "/opt.txt")
   #log setting
   log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
   formatter = logging.Formatter(log_format)
   fh = logging.FileHandler(log_path, 'a')
   fh.setFormatter(formatter)
   ch = logging.StreamHandler()
   ch.setFormatter(formatter)
   logging.getLogger().addHandler(fh)
   logging.getLogger().addHandler(ch)
   log_level = logging.INFO
   logging.getLogger().setLevel(log_level)
    #define database
   indices = list(range(opt.num_example))
   rand_indices = np.random.RandomState(0)
   rand_indices.shuffle(indices)
   train_idx = indices[0:0.9*len(indices)]
   valid_idx = indices[0.9*len(indices)::]
   ds_train = Try_On_dataset(root=opt.data_dir,
                                  indices=train_idx,
                                  data_aug=opt.data_aug,
                                  img_size=opt.img_size,
                                  crop_size=opt.crop_size)
   ds_valid = Try_On_dataset(root=opt.data_dir,
                                  indices=valid_idx,
                                  data_aug=opt.data_aug,
                                  img_size=opt.img_size,
                                  crop_size=opt.crop_size)
   loader_train = dataloader(ds_train,shuffel=True,batch_size=opt.batch_size,num_workers=opt.num_wokers)
   loader_valid = dataloader(ds_valid,shuffel=True,batch_size=opt.batch_size,num_workers=opt.num_wokers)
def main():
    # parse options
    op = Options()
    opt = op.parse()

    # initialize train or test working dir
    trainer_dir = "trainer_" + opt.name
    opt.model_dir = os.path.join(opt.dir, trainer_dir, "Train")
    opt.data_dir = os.path.join(opt.dir, trainer_dir, "Data")
    opt.test_dir = os.path.join(opt.dir, trainer_dir, "Test")

    if not os.path.exists(opt.data_dir):
        os.makedirs(opt.data_dir)
    if opt.mode == "Train":
        if not os.path.exists(opt.model_dir):
            os.makedirs(opt.model_dir)
        log_dir = opt.model_dir
        log_path = log_dir + "/train.log"
    if opt.mode == "Test":
        if not os.path.exists(opt.test_dir):
            os.makedirs(opt.test_dir)
        log_dir = opt.test_dir
        log_path = log_dir + "/train_Epoch2.log"

    # save options to disk
    util.opt2file(opt, log_dir + "/opt.txt")

    # log setting
    #log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    log_format = '%(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(log_path, 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    log_level = logging.INFO
    logging.getLogger().setLevel(log_level)

    # load train or test data
    data_loader = MultiLabelDataLoader(opt)
    if opt.mode == "Train":
        train_set = data_loader.GetTrainSet()
        val_set = data_loader.GetValSet()
    elif opt.mode == "Test":
        test_set = data_loader.GetTestSet()

    num_classes = data_loader.GetNumClasses()
    rid2name = data_loader.GetRID2Name()
    id2rid = data_loader.GetID2RID()
    opt.class_num = len(num_classes)

    # load model
    model = load_model(opt, num_classes)

    # define loss function
    criterion = nn.CrossEntropyLoss(weight=opt.loss_weight)

    # use cuda
    if opt.cuda:
        model = model.cuda(opt.devices[0])
        criterion = criterion.cuda(opt.devices[0])
        cudnn.benchmark = True

    # Train model
    if opt.mode == "Train":
        train(model, criterion, train_set, val_set, opt, (rid2name, id2rid))
    elif opt.mode == "Test-Train":
        train(model, criterion, test_set, val_set, opt, (rid2name, id2rid))
    # Test model
    elif opt.mode == "Test":
        test(model, criterion, test_set, opt)
def main():
    # parse options 
    op = Options()
    opt = op.parse()

    # special setting
    opt.shuffle = False
    opt.batch_size = 1
    opt.load_thread = 1

    # initialize train or test working dir
    test_dir = os.path.join(opt.classify_dir , opt.name)
    opt.model_dir = opt.dir + "/trainer_" + opt.name + "/Train/"
    if not os.path.exists(test_dir):
        os.mkdir(test_dir)

    # save options to disk
    opt2file(opt, os.path.join(test_dir, "opt.txt"))
    
    # log setting 
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(test_dir + "/deploy.log", 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    logging.getLogger().setLevel(logging.INFO)
    
    # load label  
    if opt.label_file == "":
        opt.label_file = opt.dir + "/label.txt"
    rid2name, id2rid, rid2id = load_label(opt.label_file)
    num_classes = [len(rid2name[index])-2 for index in range(len(rid2name))]
        
    # load transformer
    transformer = get_transformer(opt) 

    # load model
    model = load_model(opt, num_classes)
    model.eval()
    
    # use cuda
    if opt.cuda:
        model = model.cuda(opt.devices[0])
        cudnn.benchmark = True
    
    l = open(test_dir + "/classify_res_data.txt", 'w')
    with open(opt.classify_dir + "/data.txt") as data:
        for num, line in enumerate(data):
            logging.info(str(num+1))
            line = json.loads(line)
            input_tensor = load_image(line["image_file"], line["box"], opt, transformer) 
            input_tensor = input_tensor.unsqueeze(0)
            if opt.cuda:
                input_tensor = input_tensor.cuda(opt.devices[0])
            outputs = model(Variable(input_tensor, volatile=True)) 
            if not isinstance(outputs, list):
                outputs = [outputs]
            line["classify_res"] = list() 
            for index, out in enumerate(outputs):
                out = out.cpu()
                #print "out:", out
                softmax = F.softmax(out, dim=1).data.squeeze()
                #print "softmax:", softmax 
                probs, ids = softmax.sort(0, True)
                classify_res = {}
                for i in range(len(probs)):
                    classify_res[rid2name[index][id2rid[index][ids[i]]]] = probs[i]
                classify_res["max_score"] = probs[0]
                classify_res["best_label"] = rid2name[index][id2rid[index][ids[0]]]
                line["classify_res"].append(classify_res)
            l.write(json.dumps(line, separators=(',', ':'))+'\n')
    l.close()
    logging.info("classification done")
def main():
    # parse options
    op = Options()
    opt = op.parse()

    # initialize train or test working dir
    trainer_dir = "trainer_" + opt.name
    opt.model_dir = os.path.join(opt.dir, trainer_dir, "Train")
    opt.data_dir = os.path.join(opt.dir, trainer_dir, "Data")
    opt.test_dir = os.path.join(opt.dir, trainer_dir, "Test")

    if not os.path.exists(opt.data_dir):
        os.makedirs(opt.data_dir)
    if opt.mode == "Train":
        if not os.path.exists(opt.model_dir):
            os.makedirs(opt.model_dir)
        log_dir = opt.model_dir
        log_path = log_dir + "/train.log"
    if opt.mode == "Test":
        if not os.path.exists(opt.test_dir):
            os.makedirs(opt.test_dir)
        log_dir = opt.test_dir
        log_path = log_dir + "/test.log"

    # save options to disk
    util.opt2file(opt, log_dir + "/opt.txt")

    # log setting
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(log_path, 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    log_level = logging.INFO
    logging.getLogger().setLevel(log_level)

    # load train or test data
    ds = DeepFashionDataset(opt)
    num_data = len(ds)
    indices = list(range(num_data))
    split = int((opt.ratio[1] + opt.ratio[2]) * num_data)
    validation_Test_idx = np.random.choice(indices, size=split, replace=False)
    train_idx = list(set(indices) - set(validation_Test_idx))
    train_sampler = SubsetRandomSampler(train_idx)
    # validation Set
    split = int(round(0.5 * len(validation_Test_idx)))
    validation_idx = np.random.choice(validation_Test_idx, size=split, replace=False)
    validation_sampler = SubsetRandomSampler(validation_idx)
    # Test set
    test_idx = list(set(validation_Test_idx) - set(validation_idx))
    test_sampler = SubsetRandomSampler(test_idx)

    train_set = DataLoader(ds, batch_size=opt.batch_size, shuffle=False, sampler=train_sampler)
    val_set= DataLoader(ds, batch_size=opt.batch_size, shuffle=False, sampler=validation_sampler)
    test_set = DataLoader(ds, batch_size=opt.batch_size, shuffle=False, sampler=test_sampler)



    num_classes = [opt.numctg,opt.numattri] #temporary lets put the number of class []
    opt.class_num = len(num_classes)

    # load model
    model = load_model(opt, num_classes)

    # define loss function
    criterion_softmax = nn.CrossEntropyLoss(weight=opt.loss_weight)
    criterion_binary=torch.nn.BCELoss()


    # use cuda
    if opt.cuda:
        model = model.cuda(opt.devices[0])
        criterion_softmax = criterion_softmax.cuda(opt.devices[0])
        criterion_binary= criterion_binary.cuda(opt.devices[0])
        cudnn.benchmark = True

    # Train model
    if opt.mode == "Train":
        train(model, criterion_softmax,criterion_binary, train_set, val_set, opt)
    # Test model
    elif opt.mode == "Test":
        test(model, criterion_softmax,criterion_binary, test_set, opt)
Exemplo n.º 5
0
def main():
    # parse options
    op = Options()
    opt = op.parse()

    # save log to disk
    if opt.mode == "Train":
        log_path = opt.out_dir + "/train.log"

    # save options to disk
    util.opt2file(opt, opt.out_dir + "/opt.txt")

    # log setting
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(log_path, 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    log_level = logging.INFO
    logging.getLogger().setLevel(log_level)

    # load train or test data
    data_loader = PoseDataLoader(opt)
    if opt.mode == "Train":
        train_set = data_loader.GetTrainSet()
        val_set = data_loader.GetValSet()

    # load model
    model = load_model(opt)

    # define loss function
    criterion = JointsMSELoss(opt)

    # define optimizer
    if opt.optim == 'Adam':
        optimizer = optim.Adam(model.parameters(),
                               opt.lr,
                               betas=(0.9, 0.999),
                               eps=1e-08,
                               weight_decay=opt.weight_decay,
                               amsgrad=False)
    else:
        optimizer = optim.SGD(model.parameters(),
                              opt.lr,
                              momentum=opt.momentum,
                              weight_decay=opt.weight_decay)
    # define laerning rate scheluer
    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=opt.lr_decay_in_epoch,
                                          gamma=opt.gamma)

    # use cuda
    if len(opt.device_ids) == 1:
        model = model.cuda(opt.device_ids[0])
        cudnn.benchmark = True
    elif len(opt.device_ids) > 1:
        model = nn.DataParallel(model.cuda(opt.device_ids[0]),
                                device_ids=opt.device_ids)
        cudnn.benchmark = True

    # Train model
    if opt.mode == "Train":
        train(model, criterion, train_set, val_set, optimizer, scheduler, opt)
def main():

    print("parse opt...")

    # parse options
    op = Options()
    opt = op.parse()

    # initialize train or test working dir
    opt.model_dir = os.path.join("results", opt.name)
    logging.info("Model directory: %s" % opt.model_dir)

    if not os.path.exists(opt.model_dir):
        os.makedirs(opt.model_dir)
    log_dir = opt.model_dir
    log_path = log_dir + "/train.log"

    # save options to disk
    util.opt2file(opt, log_dir + "/opt.txt")

    # log setting
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(log_path, 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    log_level = logging.INFO
    logging.getLogger().setLevel(log_level)
    '''
    pkl_file = "%s/metadata.pkl" % opt.data_dir
    if not os.path.exists(pkl_file):
        # If metadata file does not exist, manually create it
        # from the txt files and save a pkl.
        filenames, attrs = get_list_attr_img(opt.data_dir)
        categories = get_list_category_img(opt.data_dir)
        with open(pkl_file, "wb") as f:
            pickle.dump({'filenames': filenames,
                         'attrs': attrs,
                         'categories': categories}, f)
    else:
        logging.info("Found %s..." % pkl_file)
        with open(pkl_file, "rb") as f:
            dat = pickle.load(f)
            filenames = dat['filenames']
            attrs = dat['attrs']
            categories = dat['categories']
    '''

    attrs = get_list_attr_img(opt.data_dir)
    categories = get_list_category_img(opt.data_dir)
    bboxes = get_bboxes(opt.data_dir)

    indices = list(range(len(attrs.keys())))
    rnd_state = np.random.RandomState(0)
    rnd_state.shuffle(indices)
    train_idx = indices[0:int(0.9 * len(indices))]
    valid_idx = indices[int(0.9 * len(indices)):int(0.95 * len(indices))]
    test_idx = indices[int(0.95 * len(indices))::]

    # Define datasets.
    ds_train = DeepFashionDataset(root=opt.data_dir,
                                  indices=train_idx,
                                  attrs=attrs,
                                  categories=categories,
                                  bboxes=bboxes,
                                  data_aug=opt.data_aug,
                                  img_size=opt.img_size,
                                  crop_size=opt.crop_size)
    ds_valid = DeepFashionDataset(root=opt.data_dir,
                                  indices=valid_idx,
                                  attrs=attrs,
                                  categories=categories,
                                  bboxes=bboxes,
                                  data_aug=opt.data_aug,
                                  img_size=opt.img_size,
                                  crop_size=opt.crop_size)
    '''
    ds_test = DeepFashionDataset(root=opt.data_dir,
                                 indices=test_idx,
                                 img_size=opt.img_size,
                                 crop_size=opt.crop_size)
    '''
    # Define data loaders.
    loader_train = DataLoader(ds_train,
                              shuffle=True,
                              batch_size=opt.batch_size,
                              num_workers=opt.num_workers)
    loader_valid = DataLoader(ds_valid,
                              shuffle=False,
                              batch_size=opt.batch_size,
                              num_workers=opt.num_workers)
    '''
    loader_test = DataLoader(ds_train,
                             shuffle=False,
                             batch_size=opt.batch_size,
                             num_workers=1)
    '''

    # load model
    model = FashionResnet(50, 1000, opt.resnet_type)
    logging.info(model)

    if opt.optimizer == 'adam':
        optimizer = optim.Adam(model.parameters(), lr=opt.lr, eps=opt.eps)
    else:
        optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=0.9)

    # load exsiting model
    last_epoch = 0
    if opt.resume is not None:
        if opt.resume == 'auto':
            import glob
            # List all the pkl files.
            files = glob.glob("%s/*.pth" % opt.model_dir)
            # Make them absolute paths.
            files = [os.path.abspath(key) for key in files]
            if len(files) > 0:
                # Get creation time and use that.
                latest_chkpt = max(files, key=os.path.getctime)
                logging.info("Auto-resume mode found latest checkpoint: %s" %
                             latest_chkpt)
                last_epoch = load_model(model,
                                        latest_chkpt,
                                        optimizer,
                                        devices=opt.devices)
        else:
            logging.info("Loading checkpoint: %s" % opt.resume)
            last_epoch = load_model(model,
                                    opt.resume,
                                    optimizer,
                                    devices=opt.devices)

    #Weight_attribute = get_weight_attr_img(opt)

# print(len(Weight_attribute))
# define loss function
    criterion_softmax = nn.CrossEntropyLoss()  #weight=opt.loss_weight
    if opt.loss == 'bce':
        if opt.pos_weights:
            logging.info("Using pos_weights...")
            pos_weights = (1 - attrs).sum(dim=0) / attrs.sum(dim=0)
            # Scale pos_weights such that its maximum value will be == pos_weights_scale.
            # This is in case pos_weights has too big of a range.
            pos_weights = pos_weights / (pos_weights.max() /
                                         opt.pos_weights_scale)
            criterion_binary = torch.nn.BCEWithLogitsLoss(
                pos_weight=pos_weights, reduction='none')
        else:
            criterion_binary = torch.nn.BCEWithLogitsLoss(reduction='none')
    else:
        if opt.pos_weights:
            raise Exception("`pos_weights` only works with BCE loss!")
        criterion_binary = HingeLoss()

    # use cuda
    if opt.cuda:
        model = model.cuda(opt.devices[0])
        criterion_softmax = criterion_softmax.cuda(opt.devices[0])
        criterion_binary = criterion_binary.cuda(opt.devices[0])

    # float16
    if opt.fp16:
        if not amp_imported:
            raise Exception("""Was not able to import apex library. This is
                required for float16 mode.""")
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          enabled=True,
                                          opt_level='O1')

    # Train model
    if opt.mode == "train":
        logging.info("Running in train mode")
        train(model=model,
              optimizer=optimizer,
              criterion_softmax=criterion_softmax,
              criterion_binary=criterion_binary,
              train_loader=loader_train,
              val_loader=loader_valid,
              opt=opt,
              epoch=last_epoch)
    # Test model
    elif opt.mode == "validate":
        logging.info("Running in validate mode")
        accs = forward_dataset(model, criterion_softmax, criterion_binary,
                               loader_valid, opt)
        for key in accs:
            print("%s --> %.4f +/- %.4f" %
                  (key, np.mean(accs[key]), np.std(accs[key])))
    elif opt.mode == "test":
        logging.info("Running in test mode")
        ds_test = DeepFashionDataset(root=opt.data_dir,
                                     indices=test_idx,
                                     attrs=attrs,
                                     categories=categories,
                                     bboxes=bboxes,
                                     img_size=opt.img_size,
                                     crop_size=opt.crop_size)
        loader_test = DataLoader(ds_test,
                                 shuffle=False,
                                 batch_size=opt.batch_size,
                                 num_workers=opt.num_workers)
        accs = forward_dataset(model, criterion_softmax, criterion_binary,
                               loader_test, opt)
        for key in accs:
            print("%s --> %.4f +/- %.4f" %
                  (key, np.mean(accs[key]), np.std(accs[key])))
def main():
    # parse options 
    op = Options()
    opt = op.parse()

    # initialize train or test working dir
    trainer_dir = "trainer_" + opt.name
    opt.model_dir = os.path.join(opt.dir, trainer_dir, "Train") 
    opt.data_dir = os.path.join(opt.dir, trainer_dir, "Data") 
    opt.test_dir = os.path.join(opt.dir, trainer_dir, "Test") 
    
    if not os.path.exists(opt.data_dir):
        os.makedirs(opt.data_dir)
    if opt.mode == "Train":
        if not os.path.exists(opt.model_dir):        
            os.makedirs(opt.model_dir)
        log_dir = opt.model_dir 
        log_path = log_dir + "/train.log"
    if opt.mode == "Test":
        if not os.path.exists(opt.test_dir):
            os.makedirs(opt.test_dir)
        log_dir = opt.test_dir
        log_path = log_dir + "/test.log"

    # save options to disk
    util.opt2file(opt, log_dir+"/opt.txt")
    
    # log setting 
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(log_path, 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    log_level = logging.INFO
    logging.getLogger().setLevel(log_level)
    
    # load train or test data
    data_loader = MultiLabelDataLoader(opt)
    if opt.mode == "Train":
        train_set = data_loader.GetTrainSet()
        val_set = data_loader.GetValSet()
    elif opt.mode == "Test":
        test_set = data_loader.GetTestSet()

    num_classes = data_loader.GetNumClasses()
    rid2name = data_loader.GetRID2Name()
    id2rid = data_loader.GetID2RID()
    opt.class_num = len(num_classes)

    # load model
    model = load_model(opt, num_classes)

    # define loss function
    criterion = nn.CrossEntropyLoss(weight=opt.loss_weight) 
    
    # use cuda
    if opt.cuda:
        model = model.cuda(opt.devices[0])
        criterion = criterion.cuda(opt.devices[0])
        cudnn.benchmark = True
    
    # Train model
    if opt.mode == "Train":
        train(model, criterion, train_set, val_set, opt, (rid2name, id2rid))
    # Test model
    elif opt.mode == "Test":
        test(model, criterion, test_set, opt)
def main():
    # parse options
    op = Options()
    opt = op.parse()

    # special setting
    opt.shuffle = False
    opt.batch_size = 1
    opt.load_thread = 1

    # initialize train or test working dir
    test_dir = os.path.join(opt.classify_dir, opt.name)
    opt.model_dir = opt.dir + "/trainer_" + opt.name + "/Train/"
    if not os.path.exists(test_dir):
        os.mkdir(test_dir)

    # save options to disk
    opt2file(opt, os.path.join(test_dir, "opt.txt"))

    # log setting
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(test_dir + "/deploy.log", 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    logging.getLogger().setLevel(logging.INFO)

    # load label
    if opt.label_file == "":
        opt.label_file = opt.dir + "/label.txt"
    rid2name, id2rid, rid2id = load_label(opt.label_file)
    num_classes = [len(rid2name[index]) - 2 for index in range(len(rid2name))]

    # load transformer
    transformer = get_transformer(opt)

    # load model
    model = load_model(opt, num_classes)
    model.eval()

    # use cuda
    if opt.cuda:
        model = model.cuda(opt.devices[0])
        cudnn.benchmark = True

    l = open(test_dir + "/classify_res_data.txt", 'w')
    with open(opt.classify_dir + "/data.txt") as data:
        for num, line in enumerate(data):
            logging.info(str(num + 1))
            line = json.loads(line)
            input_tensor = load_image(line["image_file"], line["box"], opt,
                                      transformer)
            input_tensor = input_tensor.unsqueeze(0)
            if opt.cuda:
                input_tensor = input_tensor.cuda(opt.devices[0])
            outputs = model(Variable(input_tensor, volatile=True))
            if not isinstance(outputs, list):
                outputs = [outputs]
            line["classify_res"] = list()
            for index, out in enumerate(outputs):
                out = out.cpu()
                #print "out:", out
                softmax = F.softmax(out, dim=1).data.squeeze()
                #print "softmax:", softmax
                probs, ids = softmax.sort(0, True)
                classify_res = {}
                for i in range(len(probs)):
                    classify_res[rid2name[index][id2rid[index][
                        ids[i]]]] = probs[i]
                classify_res["max_score"] = probs[0]
                classify_res["best_label"] = rid2name[index][id2rid[index][
                    ids[0]]]
                line["classify_res"].append(classify_res)
            l.write(json.dumps(line, separators=(',', ':')) + '\n')
    l.close()
    logging.info("classification done")