Ejemplo n.º 1
0
def text_preprocess(x_train):
    """This is the text preprocess main method.

    It takes an raw string, clean it and processing it into tokenlized numpy array.
    """
    if Constant.STORE_PATH == '':
        temp_path = temp_path_generator()
        path = temp_path + '_store'
    else:
        path = Constant.STORE_PATH

    ensure_dir(path)

    x_train = [clean_str(x) for x in x_train]
    x_train, word_index = tokenlize_text(
        max_seq_length=Constant.MAX_SEQUENCE_LENGTH,
        max_num_words=Constant.MAX_NB_WORDS,
        x_train=x_train)

    print("generating preprocessing model...")
    x_train = processing(path=path,
                         word_index=word_index,
                         input_length=Constant.MAX_SEQUENCE_LENGTH,
                         x_train=x_train)
    return x_train
Ejemplo n.º 2
0
 def __init__(self, model_path=None, overwrite=False):
     super(VoiceGenerator, self).__init__()
     self.model_path = model_path if model_path is not None else temp_path_generator()
     ensure_dir(self.model_path)
     self.checkpoint_path = os.path.join(self.model_path, Constant.PRE_TRAIN_VOICE_GENERATOR_MODEL_NAME)
     self.sample_rate = 0
     self.hop_length = 0
     self.overwrite = overwrite
     self.device = get_device()
     self.load()
Ejemplo n.º 3
0
 def __init__(self, model_path=None, overwrite=False):
     super(VoiceGenerator, self).__init__()
     if model_path is None:
         model_path = temp_path_generator()
     self.model_path = model_path
     ensure_dir(self.model_path)
     self.checkpoint_path = os.path.join(self.model_path, Constant.PRE_TRAIN_VOICE_GENERATOR_MODEL_NAME)
     self.sample_rate = 0
     self.hop_length = 0
     self.overwrite = overwrite
     self.load()
Ejemplo n.º 4
0
 def load(self, model_path=None):
     temp_path = temp_path_generator()
     ensure_dir(temp_path)
     model_paths = [
         f'{temp_path}/{file_name}'
         for file_name in Constant.FACE_DETECTOR['MODEL_NAMES']
     ]
     for google_id, file_name in zip(
             Constant.FACE_DETECTOR['MODEL_GOOGLE_ID'],
             Constant.FACE_DETECTOR['MODEL_NAMES']):
         download_file_from_google_drive(
             file_id=google_id, dest_path=f'{temp_path}/{file_name}')
     return model_paths
Ejemplo n.º 5
0
 def __init__(self, verbose=True, model_path=None):
     """Initialize the instance."""
     self.verbose = verbose
     self.model = None
     self.device = get_device()
     self.model_path = model_path if model_path is not None else temp_path_generator(
     )
     ensure_dir(self.model_path)
     self.local_paths = [
         os.path.join(self.model_path, x.local_name)
         for x in self._google_drive_files
     ]
     for path, x in zip(self.local_paths, self._google_drive_files):
         if not os.path.exists(path):
             download_file_from_google_drive(file_id=x.google_drive_id,
                                             dest_path=path,
                                             verbose=True)
Ejemplo n.º 6
0
    def load(self, model_path=None):

        if model_path is None:
            model_file_name = Constant.OBJECT_DETECTOR['MODEL_NAME']
            temp_path = temp_path_generator()
            ensure_dir(temp_path)
            model_path = f'{temp_path}/{model_file_name}'
            download_file_from_google_drive(file_id=Constant.OBJECT_DETECTOR['MODEL_GOOGLE_ID'], dest_path=model_path)
        # load net
        num_classes = len(VOC_CLASSES) + 1  # +1 for background
        self.model = self._build_ssd('test', 300, num_classes)  # initialize SSD
        if self.device.startswith("cuda"):
            self.model.load_state_dict(torch.load(model_path))
        else:
            self.model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
        self.model.eval()
        print('Finished loading model!')

        self.model = self.model.to(self.device)
Ejemplo n.º 7
0
def test_text_classifier():
    model_file = os.path.join(temp_path_generator(),
                              'bert_classifier/pytorch_model.bin')
    if os.path.exists(model_file):
        os.remove(model_file)

    file_path1 = "examples/task_modules/text/train_data.tsv"
    file_path2 = "examples/task_modules/text/test_data.tsv"
    x_train, y_train = read_tsv_file(input_file=file_path1)
    x_train, y_train = x_train[:1], y_train[:1]
    x_test, y_test = read_tsv_file(input_file=file_path2)
    x_test, y_test = x_test[:1], y_test[:1]

    Constant.BERT_TRAINER_BATCH_SIZE = 1
    Constant.BERT_TRAINER_EPOCHS = 1

    clf = TextClassifier(verbose=True)
    clf.fit(x=x_train, y=y_train, time_limit=12 * 60 * 60)
    y_pred = clf.predict(x_test)
    if len(y_pred) != len(y_test):
        raise AssertionError()
Ejemplo n.º 8
0
def text_preprocess(x_train):
    """This is the text preprocess main method.

    It takes an raw string, clean it and processing it into tokenlized numpy array.
    """
    if Constant.STORE_PATH == '':
        temp_path = temp_path_generator()
        path = temp_path + '_store'
    else:
        path = Constant.STORE_PATH

    ensure_dir(path)

    x_train = [clean_str(x) for x in x_train]
    x_train, word_index = tokenlize_text(max_seq_length=Constant.MAX_SEQUENCE_LENGTH,
                                         max_num_words=Constant.MAX_NB_WORDS,
                                         x_train=x_train)

    print("generating preprocessing model...")
    x_train = processing(path=path, word_index=word_index, input_length=Constant.MAX_SEQUENCE_LENGTH, x_train=x_train)
    return x_train
Ejemplo n.º 9
0
    def load(self, model_path=None):
        # https://s3.amazonaws.com/amdegroot-models/ssd300_mAP_77.43_v2.pth
        if model_path is None:
            file_link = Constant.PRE_TRAIN_DETECTION_FILE_LINK
            # model_path = os.path.join(temp_path_generator(), "object_detection_pretrained.pth")
            model_path = temp_path_generator(
            ) + '_object_detection_pretrained.pth'
            download_file(file_link, model_path)
        # load net
        num_classes = len(VOC_CLASSES) + 1  # +1 for background
        self.model = self._build_ssd('test', 300,
                                     num_classes)  # initialize SSD
        if self.device.startswith("cuda"):
            self.model.load_state_dict(torch.load(model_path))
        else:
            self.model.load_state_dict(
                torch.load(model_path,
                           map_location=lambda storage, loc: storage))
        self.model.eval()
        print('Finished loading model!')

        self.model = self.model.to(self.device)
Ejemplo n.º 10
0
 def load(self, model_path=None):
     temp_path = temp_path_generator()
     ensure_dir(temp_path)
     for model_link, file_path in zip(Constant.FACE_DETECTION_PRETRAINED['PRETRAINED_MODEL_LINKS'],
                                      Constant.FACE_DETECTION_PRETRAINED['FILE_NAMES']):
         download_file(model_link, f'{temp_path}/{file_path}')
Ejemplo n.º 11
0
def test_temp_path_generator(_):
    path = temp_path_generator()
    assert path == TEST_TEMP_DIR + "/autokeras"
Ejemplo n.º 12
0
def test_temp_path_generator(_):
    path = temp_path_generator()
    assert path == TEST_TEMP_DIR + "/autokeras"