def calculate_metrics(self, path):
        """
        :param path:
        :return:
        """
        predictor = Predictor()
        cnt = 0
        result = [[], [], {}]
        for a in range(0, self.judger.task1_cnt):
            result[0].append({"TP": 0, "FP": 0, "TN": 0, "FN": 0})
        for a in range(0, self.judger.task2_cnt):
            result[1].append({"TP": 0, "FP": 0, "TN": 0, "FN": 0})
        result[2] = {"cnt": 0, "score": 0}

        with open(path, encoding="UTF-8") as f:
            for line in f.readlines():
                line = json.loads(line)
                ground_truth = line["meta"]
                fact = line["fact"]
                ans = predictor.predict(fact)
                cnt += 1
                result = self.judger.gen_new_result(result, ground_truth,
                                                    ans[0])
                scores = self.judger.get_score(result)
        # print(result)
        print(scores)
    def pipeline(self):
        """  Main method """
        #start a timer that keeps track of total time needed
        start_time = time.time()
        # load all the necessary parameters from configuration file
        config = Configuration()
        #start the operation
        logger = configure_logger('default')
        logger.info("Operation started")
        # load the urls from the csv file and take full-size screenshots
        run_screenshot = ScreenshotTaker(config.url_file_path,
                                         config.url_file_name,
                                         config.hashed_url_file_path,
                                         config.hashed_url_file_name, logger)
        list_urls_hashed = run_screenshot.link_processor()

        # run the screenshotModule inside the event loop manager
        asyncio.get_event_loop().run_until_complete(
            run_screenshot.screenshot_module(
                list_urls_hashed[:config.batch_size], config.screenshots_path))

        # resize and filter the screenshots
        preprocessor = PreProcessor(config.screenshots_path,
                                    config.path_to_processed, config.width,
                                    config.height, logger)
        preprocessor.resize_pictures(config.screenshots_path,
                                     config.path_to_processed, config.width,
                                     config.height)
        preprocessor.delete_white_pictures(config.path_to_processed)

        # delete the full size screenshots
        # preprocessor.clear_screenshots()

        # predict the resized images and label them accordingly
        predictor = Predictor(
            config.path_to_processed,
            config.path_of_submission,
            config.path_of_the_model,
            config.model_name,
            config.width,
            config.height,
            config.positive_threshold,
            config.hashed_url_file_path,
            config.hashed_url_file_name,
        )
        predictor.predict(
            config.path_to_processed,
            config.path_of_submission,
            config.path_of_the_model,
            config.model_name,
            config.positive_threshold,
        )

        # delete the processed screenshots
        # predictor.clear_processed_screenshots()

        end_time = time.time()
        logger.info("Successfully Completed")
        logger.info("Total time needed: " + str(end_time - start_time) +
                    " seconds")
 def handle(self, *args, **kwargs):
     currency = kwargs.get('currency') or 'USD'
     predictor = Predictor()
     max_date, last_rate, prediction = predictor.predict(currency)
     logger.info(
         "Last date stored: %s, rate was %s - predicted value: %s" % (
             max_date.isoformat(),
             last_rate,
             prediction[0])
     )
Exemple #4
0
    def __init__(self):
        self.database = Database()
        logging.info("\nReloading phase DB")
        self.database.load_database()

        self.data_collector = DataCollector(WORKERS)
        self.characterizer = Characterizer(self.database)
        self.predictor = Predictor(self.database, ALGO)
        self.metrics_publisher = MetricsPublisher()
        self.curr_phase = ""

        # for graceful exit
        signal(SIGINT, self.sawcap_exit)
        signal(SIGTERM, self.sawcap_exit)
def main():
    audio_file = './../data/audio/test_samples/30.wav'
    model_file = './../data/model/svm_model.pkl'

    predictor = Predictor(model_file)
    print predictor.predict(audio_file)
Exemple #6
0
from collector.kospi_db_manager import KospiDBManager
from collector.collector import DailyCollector
# from collector.collector import HourlyCollector
from collector.timeutill_helper import TimeUtillHelper
from predictor.predictor import Predictor

start_time = TimeUtillHelper(2009, 5, 1)
end_time = TimeUtillHelper(2019, 6, 20)
daily_collector = DailyCollector("035420", start_time, end_time)
daily_collector.read_stock_data()
daily_collector.update_stock_database()
daily_collector.update_labelled_database()

# start_time = TimeUtillHelper(2019, 7, 29, 9, 10, 00)
# end_time = TimeUtillHelper(2019, 8, 2, 15, 30, 00)
# hourly_collector = HourlyCollector("035420", start_time, end_time)
# hourly_collector.read_stock_data()
# hourly_collector.update_stock_database()
# hourly_collector.update_labelled_database()

predictor = Predictor()
predictor.check_predictor()