def exp_draw():
    config = Config()
    dataloader = DataLoader()
    #read a
    with open(sys.path[0] + "/../a") as a_in:
        aid = a_in.read().split('\n')[0]

    #read
    with open(sys.path[0] + "/../n") as n_in:
        nids = n_in.read().split('\n')

    #nids.remove('None')
    if '' in nids:
        nids.remove('')

    cate_trees = generate_category_tree(dataloader)
    rlt_dist = {1.:[], 0.1:[], 0.01:[], 0.001:[], 0.0001:[]}
    x_axis = ['0%', '20%', '40%', '60%', '80%', '100%']

    fig, axs = plt.subplots(ncols=5)
    sigmas = [1., 0.1, 0.01, 0.001, 0.0001]
    for row, sigma in enumerate(sigmas):
        ax = axs[row]
        data = dataloader.load(vectorized_convertor, pivots = cate_trees, sigma=sigma, valid_uid=[aid] + nids)
        avec = data[0]
        nvecs = data[1:]
        for nv in nvecs:
            rlt_dist[sigma].append(vectorized_dist_calculator(np.array(avec), np.array(nv)))
        print rlt_dist[sigma]
        ax.bar(x_axis, rlt_dist[sigma] + [0.0], width = 0.4)
        for a, b in zip(x_axis, rlt_dist[sigma] + [0.0]):
            ax.text(a, b+0.000000001, '%f'%b, ha='center', va= 'bottom',fontsize=7)
        ax.set_title(str(sigma))
    plt.show()
def generate_eighty_percent():
    dataloader = DataLoader()
    #read a
    with open(sys.path[0] + "/../a") as a_in:
        aid = a_in.read().split('\n')[0]

    #read
    with open(sys.path[0] + "/../n") as n_in:
        nids = n_in.read().split('\n')

    #nids.remove('None')
    if '' in nids:
        nids.remove('')
    cate_trees = generate_category_tree(dataloader)
    def cluster_convertor(uid, bus_cate_dict, kwargs):
        return [uid, bus_cate_dict.keys()]
    data = dataloader.load(cluster_convertor, valid_uid=[aid] + nids)
    a_data = data[0]
    u_data = data[-1]
    print '==============='
    print a_data

    for u_data in data[1:]:    
        print '=================='
        print u_data
        a_set = set(a_data[1])
        u_set = set(u_data[1])
        print float(len(a_set.intersection(u_set)))/float(len(a_set.union(u_set)))
Exemple #3
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    # create_dirs([config.summary_dir, config.checkpoint_dir, config.visual_dir])

    print('Create the data generator.')
    data_generator = DataLoader(config)

    print('Create the model.')
    models = BlurEnsemble(config)

    print('Create the trainer')
    trainer = BlurEnsembleTrainer(models.models,
                                  data_generator.get_train_data(),
                                  data_generator.get_test_data(), config)

    print('Start training the model.')
    trainer.train_gen()

    print('Visualize the losses')
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir, config.visual_dir])

    models = {
        'noise_model1': NoiseModel.build_model1(config),
        'noise_model2': NoiseModel.build_model2(config),
        'noise_model3': NoiseModel.build_model3(config)
    }

    print('Create the data generator.')
    data_generator = DataLoader(config)

    print('Create the model.')
    model = models[config.exp_name]

    print('Create the trainer')
    trainer = NoiseModelTrainer(model, data_generator.get_train_data(), config)

    print('Start training the model.')
    trainer.train()

    print('Visualize the losses')
    trainer.visualize()
def main():
    ctpn = CTPN(cfg)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    ctpn.load_ckpt(sess) # ctpn load
    
    
    if cfg.ADJUST_ANGLE:
        angle_detector = VGG(cfg) # vgg load
        angle_detector.load_weights()
        
    data = DataLoader(cfg)
    text = TextGenerator(cfg)
    
    densenet = DenseNet(cfg,text.nrof_classes)
    densenet.load()
    
#    image_path = raw_input("please input your image path and name:") # get image path
    image_path = str('/home/jwm/Desktop/OCR-standard/images/xuanye.jpg')
    img = data.load_data(image_path)
    t = time.time()    
    if cfg.ADJUST_ANGLE:
        img = rotate(img, angle_detector) # rotate image if necessary
        
#    img = cv2.resize(img, (2000,3000),interpolation=cv2.INTER_CUBIC)
    
    text_recs, detected_img, img = detect(img, data, ctpn, sess) # detect text
    results = recognize(img, text_recs, densenet, text, adjust=False) # recognize text
    print("It takes time:{}s".format(time.time() - t))
    for key in results:
        print(results[key][1])
    def test_load(self):
        loader = DataLoader()

        def convert_func(uid, business_dict, arg_dict):
            return {uid: business_dict.values()}

        data = loader.load(convert_func, arg1=1)
        print 'load done'
def train():
    data_loader = DataLoader(data_dir='datasets/hymenoptera_data',
                             image_size=IMAGE_SIZE,
                             batch_size=4)
    inputs, classes = next(iter(data_loader.load_data()))
    out = torchvision.utils.make_grid(inputs)
    data_loader.show_image(
        out, title=[data_loader.data_classes[c] for c in classes])
def predict():
    if len(sys.argv) > 1:
        print('predict image from : {}'.format(sys.argv[1]))
        data_loader = DataLoader(data_dir='datasets/hymenoptera_data', image_size=IMAGE_SIZE)
        if os.path.exists(sys.argv[1]):
            inputs = data_loader.make_predict_inputs(sys.argv[1])
            predict_single_image(inputs, data_loader.data_classes)
    else:
        print('must specific image file path.')
Exemple #9
0
def predict():
    if len(sys.argv) > 1:
        print('predict image from : {}'.format(sys.argv[1]))
        data_loader = DataLoader(data_dir='datasets/hymenoptera_data',
                                 image_size=IMAGE_SIZE)
        if os.path.exists(sys.argv[1]):
            inputs = data_loader.make_predict_inputs(sys.argv[1])
            predict_single_image(inputs, data_loader.data_classes)
    else:
        print('must specific image file path.')
Exemple #10
0
def reset_session(save_path, model, vggface, train_batchA, train_batchB):
    model.save_weights(path=save_path)
    K.clear_session()
    model = FaceswapGANModel(**arch_config)
    model.load_weights(path=save_path)
    #vggface = VGGFace(include_top=False, model='resnet50', input_shape=(224, 224, 3))
    vggface = RESNET50(include_top=False, weights=None, input_shape=(224, 224, 3))
    vggface.load_weights("rcmalli_vggface_tf_notop_resnet50.h5")
    model.build_pl_model(vggface_model=vggface, before_activ=loss_config["PL_before_activ"])
    train_batchA = DataLoader(train_A, train_AnB, batchSize, img_dirA_bm_eyes,
                              RESOLUTION, num_cpus, K.get_session(), **da_config)
    train_batchB = DataLoader(train_B, train_AnB, batchSize, img_dirB_bm_eyes,
                              RESOLUTION, num_cpus, K.get_session(), **da_config)
def main():
    # get json configuration filepath from the run argument
    # process the json configuration file
    args = get_args()
    config = process_config(args.config)

    print('Create the data generator')
    data_loader = DataLoader(config)

    if config['all_weights_in_folder']:
        weights = np.array(glob(os.path.dirname(os.path.abspath(config['weights_path'])) + '/*.hdf5'))
    else:
        weights = np.array([config['weights_path']])  

    for weight in weights:
        weightnum = int(os.path.basename(weight).split('-')[-1:][0][:-5])
        print('Create the model for weight #%s' % (weightnum))
        model = CycleGANAttrModel(config, weight, is_train=False)
        predict_set = config['predict_set']  # either a, b, both
        model.build_predict_model(predict_set)
        print('model ready loading data now')

        os.makedirs('images/%s' % config['dataset_name'], exist_ok=True)

        if predict_set=='both' or predict_set=='a':
            testA_datagen = DataGenerator(img_filenames=data_loader.get_testA_data(), batch_size=1, target_size=(config['predict_img_height'], config['predict_img_width']))
            testA_generator = iter(testA_datagen)

            num_images = len(testA_datagen)
            for i in range(num_images):
                imgs_A = next(testA_generator)
                fake_B = model.predict_g_AB.predict(imgs_A)
                imageio.imwrite("images/%s/%i_a_transl_%i.png" % (config['dataset_name'], weightnum, i), ((fake_B[0]+1)*127.5).astype(np.uint8))

                if predict_set=='both':
                    reconstr_A = model.predict_g_BA.predict(fake_B)
                    imageio.imwrite("images/%s/%i_a_recon_%i.png" % (config['dataset_name'], weightnum, i), ((reconstr_A[0]+1)*127.5).astype(np.uint8))

        if predict_set=='both' or predict_set=='b':
            testB_datagen = DataGenerator(img_filenames=data_loader.get_testB_data(), batch_size=1, target_size=(config['predict_img_height'], config['predict_img_width']))
            testB_generator = iter(testB_datagen)

            num_images = len(testB_datagen)
            for i in range(num_images):
                imgs_B = next(testB_generator)    
                fake_A = model.predict_g_BA.predict(imgs_B)
                imageio.imwrite("images/%s/%i_b_transl_%i.png" % (config['dataset_name'], weightnum, i), ((fake_A[0]+1)*127.5).astype(np.uint8))

                if predict_set=='both':
                    reconstr_B = model.predict_g_AB.predict(fake_A)
                    imageio.imwrite("images/%s/%i_b_recon_%i.png" % (config['dataset_name'], weightnum, i), ((reconstr_B[0]+1)*127.5).astype(np.uint8))
Exemple #12
0
    def reset_session(save_path, model, person='A'):
        model.save_weights(path=save_path)
        K.clear_session()
        model = FaceswapGANModel(**arch_config)
        model.load_weights(path=save_path)
        vggface = VGGFace(include_top=False, model='resnet50', input_shape=(224, 224, 3))
        model.build_pl_model(vggface_model=vggface, before_activ=loss_config["PL_before_activ"])
        if person == 'A':
            train_batch = DataLoader(gen_person_img, all_img, batchSize, gen_img_dir_bm_eyes,
                                      RESOLUTION, num_cpus, K.get_session(), **da_config)
        else:
            train_batch = DataLoader(person_img, all_img, batchSize, img_dir_bm_eyes,
                                      RESOLUTION, num_cpus, K.get_session(), **da_config)

        return model, vggface, train_batch
Exemple #13
0
class Generator:
    def __init__(self, config):
        self.config = config
        self.reviews_data_loader = DataLoader(config.reviews.raw_data_path)
        self.profiles_data_loader = DataLoader(config.profiles.raw_data_path)
        self.products_data_loader = DataLoader(config.products.raw_data_path)
        self.reviews_data = None
        self.profiles_data = None
        self.products_data = None
        self.preprocessor = None

    def load_data(self):
        self.reviews_data = self.get_reviews_data()
        self.profiles_data = self.get_profiles_data()
        self.products_data = self.get_products_data()

    def load_preprocessor(self):
        self.preprocessor = Preprocessor(self.config, self.reviews_data,
                                         self.profiles_data,
                                         self.products_data)

    def preprocess_reviews(self):
        self.reviews_data = self.preprocessor.preprocess_reviews()

    def preprocess_profiles(self):
        self.profiles_data = self.preprocessor.preprocess_profiles()

    def preprocess_products(self):
        self.products_data = self.preprocessor.preprocess_products()

    def get_reviews_data(self):
        self.reviews_data_loader.load_data()
        return self.reviews_data_loader.get_data()

    def get_profiles_data(self):
        self.profiles_data_loader.load_data()
        return self.profiles_data_loader.get_data()

    def get_products_data(self):
        self.products_data_loader.load_data()
        return self.products_data_loader.get_data()

    def save_reviews_data(self):
        self.reviews_data.to_csv(self.config.reviews.save_data_path,
                                 index=False)

    def save_profiles_data(self):
        self.profiles_data.to_csv(self.config.profiles.save_data_path,
                                  index=False)

    def save_products_data(self):
        self.products_data.to_csv(self.config.products.save_data_path,
                                  index=False)
Exemple #14
0
def train():
    data_loader = DataLoader(data_dir='datasets/hymenoptera_data', image_size=IMG_SIZE, batch_size=4)
    inputs, classes = next(iter(data_loader.load_data()))
    out = torchvision.utils.make_grid(inputs)
    data_loader.show_image(out, title=[data_loader.data_classes[c] for c in classes])

    model = fine_tune_model()
    
    criterion = nn.CrossEntropyLoss()
    optimizer_ft = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    try:
        model = train_model(data_loader, model, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=25)
        save_torch_model(model, MODEL_SAVE_FILE)
    except KeyboardInterrupt:
        print('manually interrupt, try saving model for now...')
        save_torch_model(model, MODEL_SAVE_FILE)
        print('model saved.')
Exemple #15
0
def reset_session(save_path):
    global model, vggface
    global train_batchA, train_batchB
    model.save_weights(path=save_path)
    del model
    del vggface
    del train_batchA
    del train_batchB
    K.clear_session()
    model = FaceswapGANModel(**arch_config)
    model.load_weights(path=save_path)
    vggface = VGGFace(include_top=False, model='resnet50', input_shape=(224, 224, 3))
    model.build_pl_model(vggface_model=vggface, before_activ=loss_config["PL_before_activ"])
    train_batchA = DataLoader(train_A, train_AnB, batchSize, img_dirA_bm_eyes,
                              RESOLUTION, num_cpus, K.get_session(), **da_config)
    train_batchB = DataLoader(train_B, train_AnB, batchSize, img_dirB_bm_eyes,
                              RESOLUTION, num_cpus, K.get_session(), **da_config)
Exemple #16
0
def main():
    ctpn = CTPN(cfg)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    ctpn.load_ckpt(sess)

    if cfg.ADJUST_ANGLE:
        angle_detector = VGG(cfg)
        angle_detector.load_weights()

    data = DataLoader(cfg)
    img = data.load_data('images/xuanye.jpg')

    t = time.time()

    if cfg.ADJUST_ANGLE:
        angle = angle_detector.predict(img=np.copy(img))
        print('The angel of this character is:', angle)
        im = Image.fromarray(img)
        print('Rotate the array of this img!')
        if angle == 90:
            im = im.transpose(Image.ROTATE_270)
        elif angle == 180:
            im = im.transpose(Image.ROTATE_180)
        elif angle == 270:
            im = im.transpose(Image.ROTATE_90)
        img = np.array(im)


#    img = cv2.resize(img, (2000,3000),interpolation=cv2.INTER_CUBIC)
    blobs, im_scales, resized_img, scale = data.get_blobs(img, None)
    boxes, scores = ctpn.predict(blobs, im_scales, resized_img, sess)
    boxes = ctpn.detect(boxes, scores[:, np.newaxis], resized_img.shape[:2])
    text_recs, im = draw_boxes(resized_img,
                               boxes,
                               caption='im_name',
                               wait=True,
                               is_display=True)
    #    text_recs = sort_box(text_recs)
    print("It takes time:{}s".format(time.time() - t))
    #    cv2.imshow('img',im)
    #    cv2.waitKey(0)
    cv2.imwrite('images/result.jpg', im)
class EditDistTest(unittest.TestCase):
    def setUp(self):
        self.data_loader = DataLoader()
        self.data = self.data_loader.load(bottomup_edit_dist_converter)
        config = Config().config
        self.data_file_name = config['data_file_name']
        self.cate_file_name = config['cate_file_name']

    def deep_copy_tree(self, t):
        uid = t.root.label
        with open(self.data_file_name) as user_data_f:
            user_data = json.load(user_data_f)[uid]
        bus_dict = {}
        for bid in user_data:
            bus_dict[bid] = self.data_loader.get_business_cate_path(bid)
        return bottomup_edit_dist_converter(uid, bus_dict, {})

    def test_edit_dist(self):

        random_index = int(random.random() * len(self.data))
        base_tree = self.data[random_index]

        # 1.dist(t1,t2) == dist(t2,t1)
        random_index_2 = int(random.random() * len(self.data))
        sec_tree = self.data[random_index_2]
        assert bottomup_edit_dist_calculator(
            sec_tree,
            base_tree) == bottomup_edit_dist_calculator(base_tree, sec_tree)

        # 2.same tree => dist==0
        mirror_tree = self.deep_copy_tree(base_tree)
        assert bottomup_edit_dist_calculator(mirror_tree, base_tree) == 0.0

        # 3.dist(t1,t2) + dist(t2,t3) >= dist(t1,t3)
        random_index_3 = int(random.random() * len(self.data))
        thd_tree = self.data[random_index_3]
        assert bottomup_edit_dist_calculator(
            base_tree, sec_tree) + bottomup_edit_dist_calculator(
                sec_tree, thd_tree) >= bottomup_edit_dist_calculator(
                    base_tree, thd_tree)

        # 4.one empty and another not empty => dist == size(not empty tree)-1
        empty_tree = BUEditTree('empty')
        d = bottomup_edit_dist_calculator(empty_tree, base_tree)
        assert base_tree.size - 1 == d
Exemple #18
0
def main():
    config = process_config(CONFIG_FILE)

    # create the experiments dirs
    create_dirs([config.tensorboard_log_dir, config.checkpoint_dir])

    print('Create the data generator.')
    data_loader = DataLoader(config)

    print('Create the model.')
    model = LSTMChem(config)

    print('Create the trainer')
    trainer = LSTMChemTrainer(model.model, data_loader.get_train_data(),
                              config)

    print('Start training the model.')
    trainer.train()
Exemple #19
0
def getData(mypath, config):

    # get list of filepaths
    onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
    data_dict = [mypath + "\\" + s for s in onlyfiles]

    # create numpy datasets for each stock
    data = []
    for fname in data_dict:
        data.append(
            DataLoader(fname,
                       window=config.experiment.window,
                       threshold=config.experiment.threshold))

    # initialize numpy arrays for training and test data
    X_train = data[0].X_train_std
    Y_train = data[0].Y_train
    X_val = data[0].X_val_std
    Y_val = data[0].Y_val
    X_test = data[0].X_test_std
    Y_test = data[0].Y_test

    # add other stocks to previously initialized numpy arrays
    for i in range(1, len(data)):
        X_train = np.concatenate((X_train, data[i].X_train_std), axis=0)
        Y_train = np.concatenate((Y_train, data[i].Y_train), axis=0)
        X_val = np.concatenate((X_val, data[i].X_val_std), axis=0)
        Y_val = np.concatenate((Y_val, data[i].Y_val), axis=0)
        X_test = np.concatenate((X_test, data[i].X_test_std), axis=0)
        Y_test = np.concatenate((Y_test, data[i].Y_test), axis=0)

    # Save number of features and samples
    num_train_samples = X_train.shape[0]
    num_val_samples = X_val.shape[0]
    num_test_samples = X_test.shape[0]
    num_train_features = X_train.shape[1]

    # Generate TF dataset for Keras model
    logging.info('------Final Training and Test Datasets------')
    logging.info('Size of X_Train: %s', X_train.shape)
    logging.info('Size of Y_Train: %s', Y_train.shape)
    logging.info('Size of X_val: %s', X_val.shape)
    logging.info('Size of Y_val: %s', Y_val.shape)
    logging.info('Size of X_Test: %s', X_test.shape)
    logging.info('Size of Y_Test: %s', Y_test.shape)
    train_dataset = Dataset.from_tensor_slices((X_train, Y_train))
    train_dataset = train_dataset.shuffle(config.model.shuffle).batch(
        config.model.batch_size).repeat()
    val_dataset = Dataset.from_tensor_slices((X_val, Y_val))
    val_dataset = val_dataset.shuffle(config.model.shuffle).batch(
        config.model.batch_size).repeat()
    test_dataset = Dataset.from_tensor_slices((X_test, Y_test))
    test_dataset = test_dataset.shuffle(config.model.shuffle).batch(
        config.model.batch_size).repeat()

    return train_dataset, val_dataset, test_dataset, num_train_features, num_train_samples, num_val_samples, num_test_samples
Exemple #20
0
def main():
    try:
        args = get_args()
        config = process_config(args.config)

    except:
        print("Missing or invalid arguments")
        exit(0)

    create_dirs([config.summary_dir, config.checkpoint_dir])
    #sess = tf.Session()
    print("Loading dataset")
    data = DataLoader(config, args.preprocess)
    print("Finished loading dataset")
    data.next_batch()
    model = Biaxial(config)
    logger = Logger(sess, config)
    trainer = Trainer(sess, model, data, config, logger)
    model.load(sess)
    trainer.train()
Exemple #21
0
def train():
    model = CTPN(cfg)
    data = DataLoader(cfg)
    #    imdb = data.load_imdb('voc_2007_trainval')
    #    roidb = data.get_training_roidb(imdb)
    sess = get_sess()
    logger = Logger(sess, cfg)
    trainer = CTPNTrainer(sess, model, data, logger)
    print('Solving...')
    trainer.train(cfg.TRAIN.MAX_ITERS, restore=False)
    print('done solving')
Exemple #22
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([
        config.callbacks.tensorboard_log_dir, config.callbacks.checkpoint_dir
    ])

    print('Create the data generator.')
    data_loader = DataLoader(config)

    while True:
        images = next(data_loader.get_train_generator())
        print(images)
def main():
    # get json configuration filepath from the run argument
    # process the json configuration file
    args = get_args()
    config = process_config(args.config)

    # create the experiment directories
    log_dir, checkpoint_dir = create_dirs(config)

    print('Create the data generator')
    data_loader = DataLoader(config)

    print('Create the model')
    model = CycleGANAttrModel(config, config['weights_path'])
    model.build_model()
    print('model ready loading data now')

    print('Create the trainer')
    trainer = CycleGANModelTrainer(model, data_loader.get_trainA_data(),
                                   data_loader.get_trainB_data(),
                                   data_loader.get_testA_data(),
                                   data_loader.get_testB_data(), config,
                                   log_dir, checkpoint_dir)

    # print('Start training the model.')
    trainer.train()
def main():
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("Missing Arguments")
        exit(0)
    sess = tf.Session()
    data = DataLoader("./data/celebA/images/", "./data/celebA/list_attr_celeba.txt", 178, 128, 16, "train")
    model = StarGAN(config, data)
    logger = Logger(sess, config)
    trainer = StarGANTrainer(sess, model, data, config, logger)
    trainer.train()
Exemple #25
0
 def __init__(self, config):
     self.config = config
     self.reviews_data_loader = DataLoader(config.reviews.raw_data_path)
     self.profiles_data_loader = DataLoader(config.profiles.raw_data_path)
     self.products_data_loader = DataLoader(config.products.raw_data_path)
     self.reviews_data = None
     self.profiles_data = None
     self.products_data = None
     self.preprocessor = None
Exemple #26
0
def main():
    ctpn = CTPN(cfg)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    ctpn.load_ckpt(sess)  # ctpn load

    if torch.cuda.is_available() and cfg.ALL_GPU:
        crnn = CRNN(cfg, 32, 1, len(cfg.KEYS) + 1, 256, 1).cuda()
    else:
        crnn = CRNN(cfg, 32, 1, len(cfg.KEYS) + 1, 256, 1).cpu()
    crnn.eval()
    crnn.load_state_dict(torch.load(cfg.CRNN_MODEL))  # crnn load

    if cfg.ADJUST_ANGLE:
        angle_detector = VGG(cfg)  # vgg load
        angle_detector.load_weights()

    data = DataLoader(cfg)
    text = TextGenerator(cfg)

    #        image_path = raw_input("please input your image path and name:") # get image path
    image_path = str('/home/jwm/Desktop/OCR-standard/images/xuanye.jpg')
    img = data.load_data(image_path)
    t = time.time()
    if cfg.ADJUST_ANGLE:
        img = rotate(img, angle_detector)  # rotate image if necessary


#    img = cv2.resize(img, (2000,3000),interpolation=cv2.INTER_CUBIC)
    text_recs, detected_img, img = detect(img, data, ctpn, sess)  # detect text
    results = recognize(img, text_recs, crnn, text,
                        adjust=True)  # recognize text
    print("It takes time:{}s".format(time.time() - t))
    for key in results:
        print(results[key][1])
Exemple #27
0
class ClusteringTest(unittest.TestCase):
    def setUp(self):
        self.data_loader = DataLoader()
        with open('testData1000','r') as valid_uid_f:
            self.valid_uid = valid_uid_f.read().split('\n')
    
    def test_vec_ctc(self):
        pivots = generate_category_tree(self.data_loader)
        data = self.data_loader.load(vectorized_convertor, pivots=pivots, sigma=0.0001, valid_uid=self.valid_uid)
        dct = DensityCoverTree(vectorized_dist_calculator, 3)
        for i, d in enumerate(data):
            dct.insert(Node(val=d, index=i))
        
        for cls in covertree_clustering(dct, 4):
            print 'stub'
class VectorizedUserDistTester(unittest.TestCase):
    def setUp(self):
        self.data_loader = DataLoader()
        self.pivots = generate_category_tree(self.data_loader)
        self.data = self.data_loader.load(vectorized_convertor,
                                          pivots=self.pivots,
                                          sigma=0.0001)

    def test_dist(self):
        random_index = int(len(self.data) * random.random())
        for d_1 in self.data:
            d_2 = self.data[random_index]
            assert vectorized_dist_calculator(
                d_1, d_2) >= 0 and vectorized_dist_calculator(
                    d_1, d_2) <= np.sqrt(len(d_1))
Exemple #29
0
def main():
    # capture the config path from the run arguments
    args = get_args()
    # process the json configuration file
    config = process_config(args.config)

    # configure devices
    os.environ["CUDA_VISIBLE_DEVICES"] = config.gpus

    import tensorflow as tf
    from data_loader.data_loader import DataLoader
    from models.gan_model import GANModel

    # set GPUS configuration
    gpuconfig = tf.ConfigProto(allow_soft_placement=True,
                               log_device_placement=False)
    gpuconfig.gpu_options.visible_device_list = config.gpus
    gpuconfig.gpu_options.allow_growth = True

    # create tensorflow session
    sess = tf.Session(config=gpuconfig)
    # create your data generator
    data = DataLoader(config)

    # create an instance of the model
    model = GANModel(data, config)
    # load model
    model.load(sess)

    # generate random noise vector (could replace by a specified noise)
    noise = tf.random_normal([1, config.latent_vec_dim])
    noise = tf.tile(noise, [config.batch_size, 1])
    label = np.zeros((config.batch_size, 1))
    for i in range(config.batch_size):
        label[i] = -1 + 2 * i / config.batch_size
    noise = sess.run(noise)
    generated_charts = sess.run(
        model.generator(tf.convert_to_tensor(noise),
                        tf.cast(tf.convert_to_tensor(label), tf.float32)))
    # create generations folder
    generations_path = os.path.join('experiments', config.exp_name,
                                    'generations')
    if not os.path.isdir(generations_path):
        os.mkdir(generations_path)
    sio.savemat(os.path.join(generations_path, 'generated_charts'),
                {'generated_charts': generated_charts})

    print('done')
Exemple #30
0
def main():
    # capture the config path from the run arguments
    # then process the json configration file
    # try:


    data_loader = DataLoader(data_dir, config)
    data_loader.load_directory('.tif')
    data_loader.create_np_arrays()
    data_loader.create_data_label_pairs()

    preptt = PrepTrainTest(config, data_loader)

    for data_label_pair in data_loader.data_label_pairs:
        x_data = data_label_pair[0][data_loader]
        y_true = data_label_pair[1][data_loader.data_label_pairs[i][1][:, :, 0]]

        preptt.add_data(x_data, y_true)

    # Create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir, config.input_dir])

    # Create tensorflow session
    sess = tf.Session()

    # Create instance of the model you want
    model = PopModel(config)

    # Load model if exist
    model.load(sess)

    # Create Tensorboard logger
    logger = Logger(sess, config)
    logger.log_config()

    # Create your data generator
    data = DataGenerator(config, preptraintest = preptt)

    data.create_traintest_data()

    # Create trainer and path all previous components to it
    trainer = PopTrainer(sess, model, data, config, logger)

    # Train model
    trainer.train()
def train():
    data_loader = DataLoader(data_dir='datasets/hymenoptera_data', image_size=IMAGE_SIZE, batch_size=4)
    inputs, classes = next(iter(data_loader.load_data()))
    out = torchvision.utils.make_grid(inputs)
    data_loader.show_image(out, title=[data_loader.data_classes[c] for c in classes])