Ejemplo n.º 1
0
    def __process(self, *args):
        validation = self.__validate_input()
        if not validation:
            return

        filepath, target_i, header, index_col = validation
        predictor = Predictor(filepath, target_i, header, index_col)

        self.char_values[0].set(filepath.split('/')[-1])

        chars = predictor.extract()
        for i, char_info in enumerate(chars.items()):
            char_name, char_value = char_info
            self.char_values[i + 1].set(round(char_value, 3))

        ranking = predictor.predict()
        readable = human_readable_methods()

        for i, rank_info in enumerate(ranking[:25]):
            method, f1 = rank_info

            p = i + 1
            self.rank_positions[i].set(str(p) + '.')
            self.rank_methods[i].set(readable[method])
            self.rank_f1[i].set(round(f1, 2))
Ejemplo n.º 2
0
def main():
    # Load a dictionary of Michael's quotes to their season and episode
    print("Attempting to load quotes from file")
    quotes = load_quotes()
    if quotes is None:
        print("Scraping the web for new quotes")
        quotes = scrape()

    print("Creating sentence encoder")
    encoder = Encoder()

    print("Attempting to load quote embeddings from file")
    quote_embeddings = load_quote_embeddings()
    if quote_embeddings is None:
        print("Generating new quote embeddings")
        quote_embeddings = generate_quote_embeddings(encoder, quotes)
        print("Saving new quote embeddings to {0}".format(embeddings_file))
        save_pickle(quote_embeddings, embeddings_file)

    print("Creating predictor")
    predictor = Predictor(encoder, quote_embeddings)

    while True:
        input_sentence = query_input()
        prediction = predictor.predict_output(input_sentence)
        output_quote = prediction[0]
        output_season = prediction[1]['season']
        output_episode = prediction[1]['episode']
        print("Michael says: \"{0}\" in season {1}, episode {2}".format(
            output_quote, output_season, output_episode))
Ejemplo n.º 3
0
def test_model(model_name):
    model = Predictor(model_name)

    correct = 0
    wrong = 0

    for x in range(len(y_test)):
        a = x_train[x]
        a_label = y_train[x]
        a = a.reshape(1, 1, 28, 28)
        if model.predict(a) == chr(int(model.mapping[str(a_label)])):
            correct += 1
        else:
            wrong += 1

    print(model_name + " - accuracy: %.2lf%%" % (100*correct/(correct+wrong)))
def predict_on_test(args):
    config_path = args

    with open(config_path) as f:
        config = yaml.load(f)

    with open(config['labels_file']) as f:
        dataset = json.load(f)

    test_generator = DataGenerator(config, dataset['test'], shuffle=False)

    images_test, labels_test = test_generator.get_full_dataset()

    graph_file = config['predict']['graph_file']
    weights_file = config['predict']['weights_file']
    batch_size = config['predict']['batch_size']

    predictor = Predictor(config,
                          graph_file,
                          weights_file,
                          test_generator.num_decoder_tokens,
                          test_generator.max_seq_length,
                          test_generator.token_indices,
                          test_generator.reverse_token_indices,
                          batch_size=batch_size)

    pred_test = predictor.predict(images_test)

    CER, WER = score_prediction(labels_test, pred_test)

    for i in range(len(labels_test)):
        print(labels_test[i], pred_test[i])

    print('CER: ', round(CER * 100, 2), '%')
    print('WER: ', round(WER * 100, 2), '%')

    return CER, WER, labels_test, pred_test, images_test, dataset['test']
Ejemplo n.º 5
0
def experiment_pred(experiment_dir, test_data):
    print(f"Start predict: {experiment_dir}")
    transforms = get_transforms(False, CROP_SIZE)

    pred_df_lst = []
    for fold in config.folds:
        print("Predict fold", fold)
        fold_dir = experiment_dir / f'fold_{fold}'
        model_path = get_best_model_path(fold_dir)
        print("Model path", model_path)
        predictor = Predictor(model_path,
                              transforms,
                              BATCH_SIZE, (config.audio.n_mels, CROP_SIZE),
                              (config.audio.n_mels, CROP_SIZE // 4),
                              device=DEVICE)

        pred_df = pred_test(predictor, test_data)
        pred_df_lst.append(pred_df)

    pred_df = gmean_preds_blend(pred_df_lst)
    return pred_df
Ejemplo n.º 6
0
def experiment_pred(experiment_dir, audio_id2spec):
    print(f"Start predict: {experiment_dir}")
    transforms = get_transforms(False, CROP_SIZE)

    pred_lst = []
    for fold in FOLDS:
        print("Predict fold", fold)
        fold_dir = experiment_dir / f'fold_{fold}'
        model_path = get_best_model_path(fold_dir)
        print("Model path", model_path)
        predictor = Predictor(model_path, transforms, BATCH_SIZE,
                              CROP_SIZE, CROP_SIZE, DEVICE)

        transforms = get_transforms(False, CROP_SIZE)

        pred = fold_pred(predictor, audio_id2spec)
        pred_lst.append(pred)

    audio_id2pred = dict()
    for audio_id in audio_id2spec:
        pred = [p[audio_id] for p in pred_lst]
        audio_id2pred[audio_id] = np.mean(pred, axis=0)

    return audio_id2pred
def main(config):
    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
      try:
        # Currently, memory growth needs to be the same across GPUs
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        logical_gpus = tf.config.experimental.list_logical_devices('GPU')
        print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
      except RuntimeError as e:
        # Memory growth must be set before GPUs have been initialized
        print(e)

    renderer = vis_util.SMPLRenderer(
        img_size=config.img_size,
        face_path=config.smpl_face_path)

    config.checkpoint_dir = "training_checkpoints_125_epochs_lspe"
    predictor = Predictor(config)
    cv2.namedWindow("preview", cv2.WINDOW_NORMAL)
    cv2.setWindowProperty("preview", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
    vc = cv2.VideoCapture(0)

    if vc.isOpened(): # try to get the first frame
        rval, frame = vc.read()
    else:
        rval = False

    draw_skel = False
    draw_mesh = True
    rotate_img = False
    show_both = False

    while rval:
        frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
        input_img, proc_param, img = preprocess_image(frame, config)
        verts, cam, joints = predictor.do_prediction(input_img)
        cam_for_render, vert_shifted, joints_orig = vis_util.get_original(
            proc_param, np.squeeze(verts), np.squeeze(cam), np.squeeze(joints)[:,:2], img_size=frame.shape[:2])

        if tf.math.is_nan(joints_orig).numpy().any():
            print("nothing found")
            rend_img = frame
        else:
            if draw_skel:
                rend_img = vis_util.draw_skeleton(frame, joints_orig)
            if draw_mesh:
                if rotate_img:
                    rend_img = renderer.rotated(vert_shifted, 60, cam=cam_for_render, img_size=frame.shape[:2])
                else:
                    rend_img = renderer(vert_shifted, cam_for_render, frame, True)
                    if show_both:
                        img2 = renderer.rotated(vert_shifted, 60, cam=cam_for_render, img_size=frame.shape[:2])
                        rend_img = np.concatenate((rend_img, img2), axis=1)


        cv2.imshow("preview", rend_img)
        for i in range(5):
            rval, frame = vc.read()
        key = cv2.waitKey(20)
        if key == 27: # exit on ESC
            break
        if key == ord('s'):
            draw_skel = True
            draw_mesh = False
            rotate_img = False
            show_both = False

        if key == ord('m'):
            draw_skel = False
            draw_mesh = True
            rotate_img = False
            show_both = False

        if key == ord('r'):
            draw_skel = False
            draw_mesh = True
            rotate_img = True
            show_both = False

        if key == ord('b'):
            draw_skel = False
            draw_mesh = True
            rotate_img = False
            show_both = True

    cv2.destroyWindow("preview")
Ejemplo n.º 8
0

############################################
# Common paramas
############################################

weights_path = 'data/weights/best_loss.pt'
meta_path = 'data/weights/generic_classifier_head_meta.json'
db_path = 'data/ws.db'

############################################
# Get predictions from the attribute model
############################################


pred = Predictor(weights_path, meta_path)
type(pred)
pred.predict('This is incredible! I love it, this is the best chicken I have ever had.')
pred.predict('god is love')

top_labels = pred.predict_top_k('god is love', top_k_n=5)


############################################
# Get generated text similar to as above
############################################

gen = Generator(weights_path, meta_path)

gen.generate(
    cond_text="In the beginning",
Ejemplo n.º 9
0
model_path = config.experiments_dir / 'corr_noisy_007/fold_0/model-072-0.886906.pth'
if not model_path.exists():
    model_path.parent.mkdir(parents=True, exist_ok=True)
    gdown.download(
        'https://drive.google.com/uc?id=1vf6LtRHlpxCC-CjyCguM4JCrso2v3Tip',
        str(model_path),
        quiet=False)

DEVICE = 'cpu'
CROP_SIZE = 256
BATCH_SIZE = 16
TILE_STEP = 2

PREDICTOR = Predictor(model_path,
                      get_transforms(False, CROP_SIZE),
                      BATCH_SIZE, (config.audio.n_mels, CROP_SIZE),
                      (config.audio.n_mels, CROP_SIZE // TILE_STEP),
                      device=DEVICE)

signal_block_size = config.audio.sampling_rate
SPEC_BLOCK_SIZE = 64

spec_num = 4
SPEC_LST = [
    np.zeros((config.audio.n_mels, SPEC_BLOCK_SIZE), dtype=np.float32)
] * spec_num
PREV_SIGNAL = np.zeros(signal_block_size, dtype=np.float32)


def audio_callback(indata, frames, time, status):
    global PREV_SIGNAL
Ejemplo n.º 10
0
 def __init__(self):
     super().__init__()
     self.data_collector = DataCollector()
     self.data_viewer = DataViewer()
     self.predictor = Predictor()
Ejemplo n.º 11
0
class App(QtWidgets.QMainWindow):
    def __init__(self):
        super().__init__()
        self.data_collector = DataCollector()
        self.data_viewer = DataViewer()
        self.predictor = Predictor()

    def keyPressEvent(self, e):
        if e.key() == QtCore.Qt.Key_Escape:
            self.close()

    def create_log_widget(self):
        logTextBox = QTextEditLogger(self)
        logTextBox.setFormatter(logging.Formatter('%(asctime)s: %(message)s'))
        logging.getLogger().addHandler(logTextBox)
        logging.getLogger().setLevel(logging.DEBUG)

        return logTextBox.widget

    def display_main_menu(self):
        self.setWindowTitle('iClicker')
        # self.resize(*get_screen_dimensions())
        self.resize(800, 600)
        # creating layout
        main_widget = QtWidgets.QWidget()
        main_widget.setLayout(QtWidgets.QVBoxLayout())
        self.top_menu_part = QtWidgets.QWidget()
        self.top_menu_part.setLayout(QtWidgets.QVBoxLayout())
        # self.top_menu_part.layout().addWidget(QtWidgets.QLabel('iClicker'))
        self.log_widget = self.create_log_widget()
        self.top_menu_part.layout().addWidget(self.log_widget)
        self.top_menu_part.resize(100, 200)

        self.bottom_menu_part = QtWidgets.QWidget()
        main_widget.layout().addWidget(self.top_menu_part)
        main_widget.layout().addWidget(self.bottom_menu_part)
        self.add_control_buttons()
        self.setCentralWidget(main_widget)
        self.show()

    def add_control_buttons(self):
        self.bottom_menu_part.setLayout(QtWidgets.QGridLayout())
        collect_data_button = QtWidgets.QPushButton('Colectare date')
        collect_data_button.setToolTip('Colectează date')
        collect_data_button.clicked.connect(self.collect_data)

        process_data_button = QtWidgets.QPushButton('Procesare date')
        process_data_button.setToolTip('Procesează datele colectate')
        process_data_button.clicked.connect(self.process_collected_data)

        train_button = QtWidgets.QPushButton('Antrenare model')
        train_button.setToolTip('Antrenează modelul bazat pe datele procesate')
        train_button.clicked.connect(self.train_model)

        predict_button = QtWidgets.QPushButton('Simulare')
        predict_button.setToolTip('Simulează funcționalitățile mouse-ului')
        predict_button.clicked.connect(self.predict)

        # view_data_button = QtWidgets.QPushButton('View data')
        # view_data_button.setToolTip('View collected data')
        # view_data_button.clicked.connect(self.view_data)

        buttons = [
            collect_data_button, process_data_button, train_button,
            predict_button
        ]
        for i in range(0, 2):
            for j in range(0, 2):
                self.bottom_menu_part.layout().addWidget(
                    buttons[i * 2 + j], i, j)

    def predict(self):
        if self.predictor.can_predict() == False:
            self.error_dialog = QtWidgets.QErrorMessage()
            self.error_dialog.showMessage(
                'Nu există niciun model antrenat.\nApăsați pe butonul "Antrenare" pentru a antrena un model.'
            )
            return
        self.predictor.start()

    def process_collected_data(self):
        if can_process_data() == False:
            self.error_dialog = QtWidgets.QErrorMessage()
            self.error_dialog.showMessage(
                'Nu există date pentru a putea fi procesate.\n Apăsați pe butonul "Colectare date" mai întâi pentru a obține date.'
            )
            return
        run_function_on_thread(data_processing_main)

    def view_data(self):
        print('Getting collected data...')
        data = self.data_collector.get_collected_data()
        print(f'Displaying random photos from {len(data)} samples')
        self.data_viewer.view_data(data)

    def train_model(self):
        if Trainer.can_train_model() == False:
            self.error_dialog = QtWidgets.QErrorMessage()
            self.error_dialog.showMessage(
                'Nu există date procesate pentru a putea antrena un model.\n Apăsați pe butonul "Procesare date" pentru a procesa datele.'
            )
            return
        run_function_on_thread(Trainer.main)

    def collect_data(self):
        self.data_collector.collect_data()
Ejemplo n.º 12
0
 def __init__(self, config_path: str = SETTINGS_PATH, no_parse: bool = False):
     self.settings = Settings(config_path, no_parse=no_parse)
     self.exchanger = Exchanger(config_path)
     self.collector: Optional[DataCollector] = None
     self.analyzer: Optional[Analyzer] = None
     self.predictor = Predictor()
Ejemplo n.º 13
0
    with open(PREDICTION_DIR / 'val_lwlrap.json', 'w') as file:
        json.dump(result, file, indent=2)


if __name__ == "__main__":
    transforms = get_transforms(False, CROP_SIZE)
    test_data = get_test_data()

    for fold in config.folds:
        print("Predict fold", fold)
        fold_dir = EXPERIMENT_DIR / f'fold_{fold}'
        model_path = get_best_model_path(fold_dir)
        print("Model path", model_path)
        predictor = Predictor(model_path, transforms,
                              BATCH_SIZE,
                              (config.audio.n_mels, CROP_SIZE),
                              (config.audio.n_mels, CROP_SIZE//4),
                              device=DEVICE)

        if not config.kernel:
            print("Val predict")
            pred_val_fold(predictor, fold)

        print("Test predict")
        pred_test_fold(predictor, fold, test_data)

    print("Blend folds predictions")
    blend_test_predictions()

    if not config.kernel:
        print("Calculate lwlrap metric on cv")
Ejemplo n.º 14
0
    pred = np.concatenate(pred_lst)
    target = np.concatenate(target_lst)
    names = np.concatenate(image_names)

    np.savez(
        val_prediction_dir / f'preds-{NAME}.npz',
        altered_pred=pred,
        altered_target=target,
        name=names,
    )


if __name__ == "__main__":
    if check_dir_not_exist(PREDICTION_DIR):
        fold_dir = EXPERIMENT_DIR / f'fold_0'
        model_path = get_best_model_path(fold_dir)

        print("Model path", model_path)
        predictor = Predictor(model_path,
                              batch_size=BATCH_SIZE,
                              transform=get_transforms(train=False),
                              device=DEVICE,
                              logits=LOGITS,
                              tta=TTA)

        print("Test predict")
        predict_test(predictor)

        print("Val predict")
        predict_validation(predictor)
from src.scraper import Scraper
from src.data import Transformer
import os
from src.data import DatabaseWriter
from src.predictor import Predictor

wd = os.getcwd() + "/"
escreiper = Scraper(wd)
scraped_data_file_path = escreiper.scrape()
transformer = Transformer(scraped_data_file_path)
transformer.write()

dw = DatabaseWriter(wd + "../urgency_predictor_data/data.sqlite", "real",
                    wd + "real_data.csv")
dw.write()

wd = os.getcwd() + "/"
p = Predictor(wd + "../urgency_predictor_data/data.sqlite")
p.fit()
p.predict(60)
p.write()
Ejemplo n.º 16
0
            model=model,
            optimizer=optimizer,
            criterion=criterion,
            scheduler=scheduler,
            device=device,
            seed=seed,
            neptune=neptune_kwargs.get('neptune'),
            converter=converter,
            config=config,
        )

    time_inference = []
    for best_metric in ['best_cer', 'best_wer', 'best_acc', 'last']:
        experiment.load(f'{experiment.experiment_dir}/{best_metric}.pt')
        experiment.model.eval()
        predictor = Predictor(experiment.model, device)
        time_a = time.time()
        predictions = predictor.run_inference(test_loader)
        time_b = time.time()
        time_inference.append(time_b - time_a)
        df_pred = pd.DataFrame([{
            'id':
            prediction['id'],
            'pred_text':
            converter.decode(prediction['raw_output'].argmax(1)),
            'gt_text':
            prediction['gt_text']
        } for prediction in predictions]).set_index('id')

        cer_metric = round(cer(df_pred['pred_text'], df_pred['gt_text']), 5)
        wer_metric = round(wer(df_pred['pred_text'], df_pred['gt_text']), 5)