コード例 #1
0
    def __init__(self,
                 export_dir='experiment',
                 loss=NLLLoss(),
                 batch_size=64,
                 random_seed=None,
                 checkpoint_every=100,
                 print_every=100):
        self._trainer = "Simple Trainer"
        self.random_seed = random_seed

        if random_seed is not None:
            random.seed(random_seed)
            torch.manual_seed(random_seed)

        self.loss = loss
        self.evaluator = Evaluator(loss=self.loss, batch_size=batch_size)
        self.optimizer = None
        self.checkpoint_every = checkpoint_every
        self.print_every = print_every

        if not os.path.isabs(export_dir):
            export_dir = os.path.join(os.getcwd(), export_dir)
        self.export_dir = export_dir
        if not os.path.exists(self.export_dir):
            os.makedirs(self.export_dir)
        self.batch_size = batch_size
        self.logger = logging.getLogger(__name__)
コード例 #2
0
def runConfiguration(weights, subjects):
		print "Testing configuration: " + str(weights)

		aggregator = Aggregator()
		evaluator = Evaluator()
		aggregator.loadFeatures()
		aggregator.setWeights(weights) # (TO-DO: actually search for optimal values in n-weights space.)

		evaluations = list()

		for subject in subjects:
			aggregator.setSubject(subjects[subject])
			aggregator.run()
			resultingSummary = aggregator.getResultingSummary()
			idealSummary = aggregator.getIdealSummary()
			
			evaluator.setTest(resultingSummary, idealSummary)
			evaluations.append( evaluator.run() )

		print "Resulting evaluations: " + str(evaluations)

		meanEvaluation =  sum(evaluations) / float(len(evaluations))

		print "So that mean evaluation is: " + str(meanEvaluation) + "\n"

		localResult = dict()
		localResult["weights"] = weights
		localResult["evaluation"] = meanEvaluation

		return localResult
コード例 #3
0
ファイル: main.py プロジェクト: mineeme/MPRA-DragoNN
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    config = fetch_args()

    # create the experiments dirs
    create_dirs([config.tensorboard_log_dir, config.checkpoint_dir])

    print('Create the data generator.')
    train_data_loader = DataLoader(config, 'train')
    valid_data_loader = DataLoader(config, 'valid')

    print('Create the model.')
    model = Model(config)
    if config.pretrained_model_checkpoint is not None:
        model.load(config.pretrained_model_checkpoint)
    
    if config.evaluate:
        print('Predicting on test set.')
        test_data_loader = DataLoader(config, 'test')
        evaluator = Evaluator(model.model, test_data_loader, config)
        evaluator.evaluate()
        exit(0)

    print('Create the trainer')
    trainer = Trainer(model.model, train_data_loader, valid_data_loader, config)

    print('Start training the model.')
    trainer.train()
コード例 #4
0
class EvaluatorTest(unittest.TestCase):
    def setUp(self):
        self.preprocessor = mock.create_autospec(spec=PreprocessorService)
        self.extractor = mock.create_autospec(spec=ExtractorService)
        self.sentiment = mock.create_autospec(spec=SentimentService)
        self.db = mock.create_autospec(spec=DataSourceService)
        self.db.list_all_documents = MagicMock(return_value=[])

        self.extractors = {
            "default": {
                "label": "Default",
                "extractor": self.extractor
            }
        }

        self.evaluator = Evaluator(preprocessor=self.preprocessor,
                                   extractors=self.extractors,
                                   sentiment_service=self.sentiment,
                                   db=self.db,
                                   default="default")

    def test_evaluator_invokes_db_on_list(self):
        docs = self.evaluator.get_all_documents()
        self.db.list_all_documents.assert_called_with()
        self.assertEqual(docs, [])

    def test_evaluator_doesnt_run_when_empty(self):
        res = self.evaluator.run_evaluator()
        self.db.list_all_documents.assert_called_with()
        self.assertIsNone(res)
コード例 #5
0
def main():

    config = get_args()

    if config.load_model is not None:
        model, features, target_feature = load_model(config)
        data_loader = DataLoader(config, split=False, pretrained=True)
        data_loader.setup(features, target_feature)
        evaluator = Evaluator(config)

        evaluator.evaluate_pretrianed(model, data_loader, target_feature)
        
        exit(0)

    if config.load_checkpoint:
        auc, acc, pred, classes, completed = load_checkpoint(config)

    data_loader = DataLoader(config, split=not config.active_features)
    evaluator = Evaluator(config)
    trainer = Trainer(config, data_loader, evaluator)

    if config.load_checkpoint:
        evaluator.set_checkpoint(auc, acc, pred, classes)
        trainer.set_completed(completed)

    trainer.train()

    if not config.active_features:
        print(f"AUC ({config.evaluation_mode}): {evaluator.get_auc()}")
        print(f"Accuracy ({config.evaluation_mode}): {evaluator.get_accuracy()}")

        evaluator.save(data_loader.getFeatures())

    display_runtime(config)
コード例 #6
0
def evaluate(models,
             data_set_path,
             log_save_path,
             measurement,
             test_times=1):

    data_set_info_file = open(f"{data_set_path}\\data_set_info.json", 'r')
    data_set_info = json.load(data_set_info_file)

    if not os.path.exists(f"{log_save_path}\\{data_set_info['name']}"):
        os.mkdir(f"{log_save_path}\\{data_set_info['name']}")

    shutil.copy(f"{data_set_path}\\data_set_info.json", f"{log_save_path}\\{data_set_info['name']}")

    stream_list = [path for path in os.listdir(data_set_path) if os.path.isdir(f"{data_set_path}\\{path}")]
    stream_list.sort(key=lambda s: int(s.split('_')[1]))  # stream_number

    records_list = []
    for stream in stream_list:
        log_stream_path = f"{log_save_path}\\{data_set_info['name']}\\{stream}"
        if not os.path.exists(log_stream_path):
            os.mkdir(log_stream_path)
        if not os.path.exists(f"{log_stream_path}\\record"):
            os.mkdir(f"{log_stream_path}\\record")
        if not os.path.exists(f"{log_stream_path}\\figure"):
            os.mkdir(f"{log_stream_path}\\figure")
        data = pd.read_csv(f"{data_set_path}\\{stream}\\data.csv", header=None)
        with open(f"{data_set_path}\\{stream}\\data_info.json", 'r') as data_info_file:
            data_info = json.load(data_info_file)
        X = np.array(data.iloc[:, 1:])
        y = np.array(data.iloc[:, 0])

        perf_records = {}
        for model in models:
            for _ in range(test_times):
                test_model = copy.deepcopy(models[model])
                test_model.budget = data_info['budget']
                evaluator = Evaluator(measurement=measurement,
                                      pretrain_size=1,
                                      batch_size=1,
                                      budget=data_info['budget'])
                if model not in perf_records.keys():
                    perf_records[model] = evaluator.evaluate(X, y, model=test_model)
                else:
                    perf_records[model] += evaluator.evaluate(X, y, model=test_model)
            perf_records[model] = perf_records[model] / test_times
        perf_records = pd.DataFrame(perf_records)
        records_list.append(perf_records)
        perf_records.to_csv(f"{log_stream_path}\\record\\{measurement}.csv", index=None)
        plot_lines(perf_records, f"{log_stream_path}\\figure\\{measurement}", "pdf", 15, 'time', measurement)

    report_file = open(f"{log_save_path}\\report.md", 'a')
    report_file.write(f"# {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}\n")
    report_file.write(f"{data_set_info}\n\n")
    report_file.write(f"{measurement}\n\n")
    table = get_report(data_set_info, records_list, file_type="md")
    report_file.write(f"{table}\n")
    report_file.close()
コード例 #7
0
    def __init__(self,
                 config,
                 time_frame,
                 global_price_updater,
                 symbol_evaluator,
                 exchange,
                 trading_mode,
                 real_time_ta_eval_list,
                 main_loop,
                 relevant_evaluators=None):

        if relevant_evaluators is None:
            relevant_evaluators = CONFIG_EVALUATORS_WILDCARD
        self.config = config
        self.exchange = exchange
        self.trading_mode = trading_mode
        self.symbol = symbol_evaluator.get_symbol()
        self.time_frame = time_frame
        self.global_price_updater = global_price_updater
        self.symbol_evaluator = symbol_evaluator
        self.main_loop = main_loop
        self.should_refresh_matrix_evaluation_types = True

        self.should_save_evaluations = CONFIG_SAVE_EVALUATION in self.config and self.config[
            CONFIG_SAVE_EVALUATION]

        # notify symbol evaluator
        self.symbol_evaluator.add_evaluator_task_manager(
            self.exchange, self.time_frame, self.trading_mode, self)

        self.matrix = self.symbol_evaluator.get_matrix(self.exchange)
        self.matrix_exporter = MatrixExporter(self.matrix, self.symbol)

        self.task_name = f"Evaluator TASK MANAGER - {self.symbol} - {self.exchange.get_name()} - {self.time_frame}"
        self.logger = get_logger(self.task_name)

        # Create Evaluator
        self.evaluator = Evaluator()
        self.evaluator.set_config(self.config)
        self.evaluator.set_symbol(self.symbol)
        self.evaluator.set_time_frame(self.time_frame)
        self.evaluator.set_exchange(self.exchange)
        self.evaluator.set_symbol_evaluator(self.symbol_evaluator)

        # Add tasked evaluators that can notify the current task
        self.evaluator.set_social_eval(
            self.symbol_evaluator.get_crypto_currency_evaluator().
            get_social_eval_list(), self)
        self.evaluator.set_real_time_eval(real_time_ta_eval_list, self)

        # Create static evaluators
        self.evaluator.set_ta_eval_list(
            self.evaluator.get_creator().create_ta_eval_list(
                self.evaluator, relevant_evaluators), self)

        # Register in refreshing task
        self.global_price_updater.register_evaluator_task_manager(
            self.time_frame, self)
コード例 #8
0
    def __init__(self,
                 config,
                 time_frame,
                 symbol_time_frame_updater_thread,
                 symbol_evaluator,
                 exchange,
                 trading_mode,
                 real_time_ta_eval_list,
                 relevant_evaluators=CONFIG_EVALUATORS_WILDCARD):

        self.config = config
        self.exchange = exchange
        self.trading_mode = trading_mode
        self.symbol = symbol_evaluator.get_symbol()
        self.time_frame = time_frame
        self.symbol_time_frame_updater_thread = symbol_time_frame_updater_thread
        self.symbol_evaluator = symbol_evaluator

        self.should_save_evaluations = CONFIG_SAVE_EVALUATION in self.config and self.config[
            CONFIG_SAVE_EVALUATION]

        # notify symbol evaluator
        self.symbol_evaluator.add_evaluator_thread_manager(
            self.exchange, self.time_frame, self.trading_mode, self)

        self.matrix = self.symbol_evaluator.get_matrix(self.exchange)
        self.matrix_exporter = MatrixExporter(self.matrix, self.symbol)

        self.thread_name = f"TA THREAD MANAGER - {self.symbol} - {self.exchange.get_name()} - {self.time_frame}"
        self.logger = get_logger(self.thread_name)

        # Create Evaluator
        self.evaluator = Evaluator()
        self.evaluator.set_config(self.config)
        self.evaluator.set_symbol(self.symbol)
        self.evaluator.set_time_frame(self.time_frame)
        self.evaluator.set_exchange(self.exchange)
        self.evaluator.set_symbol_evaluator(self.symbol_evaluator)

        # Add threaded evaluators that can notify the current thread
        self.evaluator.set_social_eval(
            self.symbol_evaluator.get_crypto_currency_evaluator().
            get_social_eval_list(), self)
        self.evaluator.set_real_time_eval(real_time_ta_eval_list, self)

        # Create static evaluators
        self.evaluator.set_ta_eval_list(
            self.evaluator.get_creator().create_ta_eval_list(
                self.evaluator, relevant_evaluators), self)

        # Register in refreshing threads
        self.symbol_time_frame_updater_thread.register_evaluator_thread_manager(
            self.time_frame, self)
コード例 #9
0
def _get_tools():
    config = load_test_config()
    AdvancedManager.create_class_list(config)
    evaluator = Evaluator()
    evaluator.set_config(config)
    evaluator.set_symbol("BTC/USDT")
    evaluator.set_time_frame(TimeFrames.ONE_HOUR)
    return evaluator, config
コード例 #10
0
def evaluate(net, data_loader, copy_net=False):
    """
    :param net: neural net
    :param copy_net: boolean
    :param data_loader: DataLoader
    :return: Data structure of class Evaluator containing the amount correct for each class
    """
    if copy_net:
        Net = NET().cuda()
        Net.load_state_dict(net.state_dict())
        Net.eval()
    else:
        Net = net

    eval = Evaluator()
    class_correct = [0 for _ in range(NUM_CLASSES)]
    class_total = [0 for _ in range(NUM_CLASSES)]

    i = 0
    size = BATCH_SIZE * len(data_loader)

    for (inputs, labels) in data_loader:
        inputs, labels = [Variable(input).cuda()
                          for input in inputs], labels.cuda()
        outputs = Net(inputs)
        _, predicted = torch.max(outputs.data, 1)
        guesses = (predicted == labels).squeeze()

        # label: int, 0 to len(NUM_CLASSES)
        # guess: int, 0 or 1 (i.e. True or False)
        for guess, label in zip(guesses, labels):
            class_correct[label] += guess
            class_total[label] += 1

        i += BATCH_SIZE
        sys.stdout.write('\r' + str(i) + '/' + str(size))
        sys.stdout.flush()

    # Update the information in the Evaluator
    for i, (correct, total) in enumerate(zip(class_correct, class_total)):
        eval.update_accuracy(class_name=i,
                             amount_correct=correct,
                             amount_total=total)

    return eval
コード例 #11
0
    def __init__(self, config,
                 symbol,
                 time_frame,
                 symbol_time_frame_updater_thread,
                 symbol_evaluator,
                 exchange,
                 real_time_ta_eval_list):
        self.config = config
        self.exchange = exchange
        self.symbol = symbol
        self.time_frame = time_frame
        self.symbol_time_frame_updater_thread = symbol_time_frame_updater_thread
        self.symbol_evaluator = symbol_evaluator

        # notify symbol evaluator
        self.symbol_evaluator.add_evaluator_thread_manager(self.exchange, self.symbol, self.time_frame, self)

        self.matrix = self.symbol_evaluator.get_matrix(self.exchange)

        # Exchange
        # TODO : self.exchange.update_balance(self.symbol)

        self.thread_name = "TA THREAD MANAGER - {0} - {1} - {2}".format(self.symbol,
                                                                        self.exchange.get_name(),
                                                                        self.time_frame)
        self.logger = logging.getLogger(self.thread_name)

        # Create Evaluator
        self.evaluator = Evaluator()
        self.evaluator.set_config(self.config)
        self.evaluator.set_symbol(self.symbol)
        self.evaluator.set_time_frame(self.time_frame)
        self.evaluator.set_exchange(self.exchange)
        self.evaluator.set_symbol_evaluator(self.symbol_evaluator)

        # Add threaded evaluators that can notify the current thread
        self.evaluator.set_social_eval(self.symbol_evaluator.get_crypto_currency_evaluator().get_social_eval_list(), self)
        self.evaluator.set_real_time_eval(real_time_ta_eval_list, self)

        # Create static evaluators
        self.evaluator.set_ta_eval_list(self.evaluator.get_creator().create_ta_eval_list(self.evaluator))

        # Register in refreshing threads
        self.symbol_time_frame_updater_thread.register_evaluator_thread_manager(self.time_frame, self)
コード例 #12
0
    def setUp(self):
        self.preprocessor = mock.create_autospec(spec=PreprocessorService)
        self.extractor = mock.create_autospec(spec=ExtractorService)
        self.sentiment = mock.create_autospec(spec=SentimentService)
        self.db = mock.create_autospec(spec=DataSourceService)
        self.db.list_all_documents = MagicMock(return_value=[])

        self.extractors = {
            "default": {
                "label": "Default",
                "extractor": self.extractor
            }
        }

        self.evaluator = Evaluator(preprocessor=self.preprocessor,
                                   extractors=self.extractors,
                                   sentiment_service=self.sentiment,
                                   db=self.db,
                                   default="default")
コード例 #13
0
ファイル: ia2015.py プロジェクト: jonmagal/switchrs
 def __init__(self, dataset_id, dataset_switch_id, force):
     self._init_dir()
     
     self.dataset        = DataSet(dataset_id = dataset_id, sframe = True)
     self.dataset_switch = DataSet(dataset_id = dataset_switch_id, sframe = False)
     
     self.model_manager  = ModelManager()
     self.switch_manager = SwitchManager()
     self.evaluator      = Evaluator()
     
     self.force          = force
コード例 #14
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        json_file = '../configs/example.json'
        # config = process_config(args.config)
        config = process_config(json_file)

    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    sess = tf.compat.v1.Session()
    # create your data generator
    data = DataGenerator(config)
    data.generate_data()

    # create an instance of the model you want
    model = ExampleModel(config)
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and pass all the previous components to it
    trainer = ExampleTrainer(sess, model, data, config, logger)
    #load model if exists
    model.load(sess)
    # here you train your model
    trainer.train()
    # here you evaluate your model
    evaluator = Evaluator(trainer.sess, trainer.model, data, config, logger)
    evaluator.evaluate()
    evaluator.analysis_results()
コード例 #15
0
    def __init__(self, config,
                 symbol,
                 time_frame,
                 symbol_evaluator,
                 exchange,
                 real_time_ta_eval_list):
        self.config = config
        self.symbol = symbol
        self.time_frame = time_frame
        self.symbol_evaluator = symbol_evaluator

        # notify symbol evaluator
        self.symbol_evaluator.add_evaluator_thread(self)

        self.matrix = self.symbol_evaluator.get_matrix()

        # Exchange
        self.exchange = exchange
        # TODO : self.exchange.update_balance(self.symbol)

        self.thread_name = "TA THREAD - {0} - {1} - {2}".format(self.symbol,
                                                                self.exchange.__class__.__name__,
                                                                self.time_frame)
        self.logger = logging.getLogger(self.thread_name)

        # Create Evaluator
        self.evaluator = Evaluator()
        self.evaluator.set_config(self.config)
        self.evaluator.set_symbol(self.symbol)
        self.evaluator.set_time_frame(self.time_frame)
        self.evaluator.set_exchange(self.exchange)
        self.evaluator.set_symbol_evaluator(self.symbol_evaluator)

        # Add threaded evaluators that can notify the current thread
        self.evaluator.set_social_eval(self.symbol_evaluator.get_social_eval_list(), self)
        self.evaluator.set_real_time_eval(real_time_ta_eval_list, self)
        self.evaluator.set_ta_eval_list(self.evaluator.get_creator().create_ta_eval_list(self.evaluator))

        # Create refreshing threads
        self.data_refresher = TimeFrameUpdateDataThread(self)
コード例 #16
0
def main(config: DictConfig) -> None:
    print(OmegaConf.to_yaml(config))

    torch.manual_seed(config.eval.seed)
    torch.cuda.manual_seed_all(config.eval.seed)
    np.random.seed(config.eval.seed)
    random.seed(config.eval.seed)

    use_cuda = config.eval.cuda and torch.cuda.is_available()
    device = torch.device('cuda' if use_cuda else 'cpu')

    char2id, id2char = load_label(config.eval.label_path, config.eval.blank_id)
    audio_paths, transcripts, _, _ = load_dataset(config.eval.dataset_path,
                                                  config.eval.mode)

    test_dataset = SpectrogramDataset(
        config.eval.audio_path,
        audio_paths,
        transcripts,
        config.audio.sampling_rate,
        config.audio.n_mel,
        config.audio.frame_length,
        config.audio.frame_stride,
        config.audio.extension,
        config.train.sos_id,
        config.train.eos_id,
    )
    test_loader = AudioDataLoader(
        test_dataset,
        batch_size=config.eval.batch_size,
        num_workers=config.eval.num_workers,
    )

    model = load_test_model(config, device)

    print('Start Test !!!')

    evaluator = Evaluator(config, device, test_loader, id2char)
    evaluator.evaluate(model)
コード例 #17
0
ファイル: tester.py プロジェクト: Morzeux/LangBenchmarks
def main():
    """ Main tester class. """
    print(get_license())
    print('')

    languages = load_languages()

    versions = Evaluator.print_versions(languages)
    print(versions)
    system_info = Evaluator.print_system_info()
    print(system_info)
    Evaluator.compile_languages(languages)
    results = Evaluator.test_languages(languages, C.TESTS,
                                       C.EVALUATIONS, C.TIMEOUT)
    print('*' * 60)
    DocuGenerator.generate_readme(versions, system_info, results)
    Evaluator.cleanup(languages)
コード例 #18
0
ファイル: tester.py プロジェクト: Morzeux/LangBenchmarks
def main():
    """ Main tester class. """
    print(get_license())
    print('')

    languages = load_languages()

    versions = Evaluator.print_versions(languages)
    print(versions)
    system_info = Evaluator.print_system_info()
    print(system_info)
    Evaluator.compile_languages(languages)
    results = Evaluator.test_languages(languages, C.TESTS, C.EVALUATIONS,
                                       C.TIMEOUT)
    print('*' * 60)
    DocuGenerator.generate_readme(versions, system_info, results)
    Evaluator.cleanup(languages)
コード例 #19
0
def evaluate(net: nn.Module,
             data_loader: DataLoader,
             copy_net=True):
    """
    :param net: neural net
    :param copy_net: boolean
    :param data_loader: DataLoader
    :return: Data structure of class Evaluator containing the amount correct for each class
    """
    if copy_net:
        Net = copy.deepcopy(net)
        Net.eval()
    else:
        Net = net
        Net.eval()

    i = 0
    all_output_labels = torch.FloatTensor()
    all_true_labels = torch.LongTensor()

    for (inputs, labels) in data_loader:
        inputs, labels = [Variable(input).cuda() for input in inputs], labels
        output_labels = Net(inputs).cpu().detach().data

        all_output_labels = torch.cat((all_output_labels, output_labels))
        all_true_labels = torch.cat((all_true_labels, labels))

        i += data_loader.batch_size
        sys.stdout.write('\r' + str(i) + '/' + str(data_loader.batch_size * len(data_loader)))
        sys.stdout.flush()


    # Update the information in the Evaluator
    eval = Evaluator(all_true_labels, all_output_labels, 2)
    Net.train()
    return eval
コード例 #20
0
ファイル: ia2015.py プロジェクト: jonmagal/switchrs
class Experiment():

    dataset         = None
    dataset_switch  = None
    
    model_manager   = None
    switch_manager  = None
    
    evaluator       = None
    force           = None
    
    def __init__(self, dataset_id, dataset_switch_id, force):
        self._init_dir()
        
        self.dataset        = DataSet(dataset_id = dataset_id, sframe = True)
        self.dataset_switch = DataSet(dataset_id = dataset_switch_id, sframe = False)
        
        self.model_manager  = ModelManager()
        self.switch_manager = SwitchManager()
        self.evaluator      = Evaluator()
        
        self.force          = force
        
    def _init_dir(self):
        import os
        from settings import DIR

        for d in DIR:
            if not os.path.exists(d):
                os.makedirs(d)
                
    def _train_rec_models(self):
        self.model_manager.train_models(dataset = self.dataset)
        
    def _test_rec_models(self):
        self.model_manager.test_models(dataset = self.dataset)
    
    def _evaluate_rec_models(self):
        self.model_manager.evaluate_models(dataset = self.dataset)
        
    def _create_datasets_switch(self):   
        self.dataset_switch.prepare_switch_dataset(dataset = self.dataset, model_manager = self.model_manager, 
                                                   force = self.force)
        
    def _train_switch(self):
        self.switch_manager.train_models(dataset_switch = self.dataset_switch, force = self.force)
        
    def _test_switch(self):
        self.switch_manager.rating_prediction_switches(dataset = self.dataset, dataset_switch = self.dataset_switch, 
                         model_manager = self.model_manager, force = self.force)
    
    def _evaluate(self):
        self.evaluator.evaluate(dataset = self.dataset, dataset_switch = self.dataset_switch, 
                                model_manager = self.model_manager, switch_manager = self.switch_manager, 
                                force = True)

    def run(self):
        self._train_rec_models()
        self._test_rec_models()
        self._evaluate_rec_models()
        self._create_datasets_switch()
        self._train_switch()
        self._test_switch()
        self._evaluate()
コード例 #21
0
ファイル: validate.py プロジェクト: hiok2000/faceanalysis
def _main(args: Namespace) -> None:
    evaluator = Evaluator.create_evaluator(args)
    evaluation_results = evaluator.evaluate()
    print('Evaluation results: ', evaluation_results)
    parser_metrics = evaluator.compute_metrics()
    print('Parser metrics: ', parser_metrics)
コード例 #22
0
t = Corpus(10, 'test')
t.shuffle()

start = time.time()

m.test_model(t)
runtime = time.time() - start
avg_runtime = round((runtime / t.get_size()), 3)
runtime = round(runtime, 3)

for x in t:
    print('=' * 50)
    print('Text:\t[', x.get_utterance(), ']')
    print('Gold:\t[', x.get_gold_slots(), ']')
    print('Pred:\t[', x.get_pred_slots(), ']')
    print()
    slots_per_intent = x.get_pred_slots_per_intent()
    intent_probs = {}
    for intent in slots_per_intent:
        intent_probs[intent] = slots_per_intent[intent]['prob']
    highest_intent = sorted(intent_probs.items(), key=itemgetter(1))[-1][0]
    print('Gold Intent: ', x.get_gold_intent())
    print('pred Intent: ', highest_intent)
    x.set_pred_intent(highest_intent)

e = Evaluator(t, intent_set, slot_set)
print(e)

print(f'Total Runtime: {runtime} s. Avg for one instance: {avg_runtime} s.')
コード例 #23
0
    'v1.2': {
        'label': 'v1.2 (Basic with Strict Sentiment)',
        'extractor': SpacyExtractor(vader, ignore_zero=True)
    },
    'v2': {
        'label': 'v2.0 (Generics Analysis)',
        'extractor': RuleBasedExtractor(vader)
    },
}

test_db = DatabaseSource(is_production=False)
default = 'v2'

evaluator = Evaluator(preprocessor=preprocessor,
                      sentiment_service=vader,
                      extractors=extractors,
                      db=test_db,
                      default=default)


@app.route("/test/documents", methods=['GET'])
@cross_origin(supports_credentials=True)
def get_test_documents():
    ids_to_metas = evaluator.get_all_documents()

    results = []
    for id in ids_to_metas:
        results.append({'id': id, 'metadata': ids_to_metas[id]})

    return jsonify(results)
コード例 #24
0
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence
from torch.utils.data import DataLoader

from dataset.corpus import Corpus
from dataset.evaluator_coco_dataset import EvaluatorCocoDataset
from evaluator.evaluator import Evaluator
from evaluator.evaluator_loss import EvaluatorLoss
from file_path_manager import FilePathManager
from generator.conditional_generator import ConditionalGenerator

if __name__ == '__main__':
    if not os.path.exists(FilePathManager.resolve("models")):
        os.makedirs(FilePathManager.resolve("models"))
    corpus = Corpus.load(FilePathManager.resolve("data/corpus.pkl"))
    evaluator = Evaluator(corpus).cuda()
    generator = ConditionalGenerator.load(corpus).cuda()
    generator.freeze()
    dataset = EvaluatorCocoDataset(corpus)
    batch_size = 128
    dataloader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=True,
                            num_workers=cpu_count())
    criterion = EvaluatorLoss().cuda()
    optimizer = optim.Adam(evaluator.parameters(), lr=4e-4, weight_decay=1e-5)
    epochs = 5
    print(f"number of batches = {len(dataset) // batch_size}")
    print("Begin Training")
    for epoch in range(epochs):
        start = time.time()
コード例 #25
0
from torch.utils.data import DataLoader

from dataset.corpus import Corpus
from dataset.evaluator_coco_dataset import EvaluatorCocoDataset
from evaluator.evaluator import Evaluator
from evaluator.evaluator_loss import EvaluatorLoss
from file_path_manager import FilePathManager
from generator.conditional_generator import ConditionalGenerator
from policy_gradient.rl_loss import RLLoss

if __name__ == '__main__':
    epochs = 200
    batch_size = 128
    monte_carlo_count = 16
    corpus = Corpus.load(FilePathManager.resolve("data/corpus.pkl"))
    evaluator = Evaluator.load(corpus).cuda()
    generator = ConditionalGenerator.load(corpus).cuda()
    dataset = EvaluatorCocoDataset(corpus)
    dataloader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=True,
                            num_workers=cpu_count())
    evaluator_criterion = EvaluatorLoss().cuda()
    generator_criterion = RLLoss().cuda()
    evaluator_optimizer = optim.Adam(evaluator.parameters(),
                                     lr=1e-4,
                                     weight_decay=1e-5)
    generator_optimizer = optim.Adam(generator.parameters(),
                                     lr=1e-4,
                                     weight_decay=1e-5)
    print(f"number of batches = {len(dataset) // batch_size}")
コード例 #26
0
class EvaluatorTaskManager:
    def __init__(self, config,
                 time_frame,
                 global_price_updater,
                 symbol_evaluator,
                 exchange,
                 trading_mode,
                 real_time_ta_eval_list,
                 main_loop,
                 relevant_evaluators=None):

        if relevant_evaluators is None:
            relevant_evaluators = CONFIG_EVALUATORS_WILDCARD
        self.config = config
        self.exchange = exchange
        self.trading_mode = trading_mode
        self.symbol = symbol_evaluator.get_symbol()
        self.time_frame = time_frame
        self.global_price_updater = global_price_updater
        self.symbol_evaluator = symbol_evaluator
        self.main_loop = main_loop

        self.should_save_evaluations = CONFIG_SAVE_EVALUATION in self.config and self.config[CONFIG_SAVE_EVALUATION]

        # notify symbol evaluator
        self.symbol_evaluator.add_evaluator_task_manager(self.exchange, self.time_frame, self.trading_mode, self)

        self.matrix = self.symbol_evaluator.get_matrix(self.exchange)
        self.matrix_exporter = MatrixExporter(self.matrix, self.symbol)

        self.task_name = f"TA TASK MANAGER - {self.symbol} - {self.exchange.get_name()} - {self.time_frame}"
        self.logger = get_logger(self.task_name)

        # Create Evaluator
        self.evaluator = Evaluator()
        self.evaluator.set_config(self.config)
        self.evaluator.set_symbol(self.symbol)
        self.evaluator.set_time_frame(self.time_frame)
        self.evaluator.set_exchange(self.exchange)
        self.evaluator.set_symbol_evaluator(self.symbol_evaluator)

        # Add tasked evaluators that can notify the current task
        self.evaluator.set_social_eval(self.symbol_evaluator.get_crypto_currency_evaluator().get_social_eval_list(),
                                       self)
        self.evaluator.set_real_time_eval(real_time_ta_eval_list, self)

        # Create static evaluators
        self.evaluator.set_ta_eval_list(self.evaluator.get_creator().create_ta_eval_list(self.evaluator,
                                                                                         relevant_evaluators), self)

        # Register in refreshing task
        self.global_price_updater.register_evaluator_task_manager(self.time_frame, self)

    # handle notifications from evaluators, when notified refresh symbol evaluation matrix
    async def notify(self, notifier_name, force_TA_refresh=False, finalize=False, interruption=False):
        if self._should_consider_notification(notifier_name, interruption=interruption):
            self.logger.debug(f"** Notified by {notifier_name} **")
            if force_TA_refresh:
                await self.global_price_updater.force_refresh_data(self.time_frame, self.symbol)
            await self._refresh_eval(notifier_name, finalize=finalize)

    def _should_consider_notification(self, notifier_name, interruption=False):
        if self.get_refreshed_times() > 0:
            if interruption:
                # if notification from interruption (real_time or social evaluator,
                # ensure first that everything is initialized properly
                return_val = self.symbol_evaluator.are_all_timeframes_initialized(self.exchange)
            else:
                return True
        else:
            return_val = False
        if not return_val:
            self.logger.debug(f"Notification by {notifier_name} ignored")
        return return_val

    async def _refresh_eval(self, ignored_evaluator=None, finalize=False):
        # update eval
        await self.evaluator.update_ta_eval(ignored_evaluator)

        # update matrix
        self.refresh_matrix()

        # update strategies matrix
        await self.symbol_evaluator.update_strategies_eval(self.matrix, self.exchange, ignored_evaluator)

        if finalize:
            # calculate the final result
            await self.symbol_evaluator.finalize(self.exchange)

        # save evaluations if option is activated
        self._save_evaluations_if_necessary()

        self.logger.debug(f"MATRIX : {self.matrix.get_matrix()}")

    def refresh_matrix(self):
        self.matrix = self.symbol_evaluator.get_matrix(self.exchange)

        for ta_eval in self.evaluator.get_ta_eval_list():
            if ta_eval.get_is_active():
                ta_eval.ensure_eval_note_is_not_expired()
                self.matrix.set_eval(EvaluatorMatrixTypes.TA, ta_eval.get_name(),
                                     ta_eval.get_eval_note(), self.time_frame)
            else:
                self.matrix.set_eval(EvaluatorMatrixTypes.TA, ta_eval.get_name(),
                                     START_PENDING_EVAL_NOTE, self.time_frame)

        for social_eval in self.evaluator.get_social_eval_list():
            if social_eval.get_is_active():
                social_eval.ensure_eval_note_is_not_expired()
                self.matrix.set_eval(EvaluatorMatrixTypes.SOCIAL, social_eval.get_name(),
                                     social_eval.get_eval_note(), None)
            else:
                self.matrix.set_eval(EvaluatorMatrixTypes.SOCIAL, social_eval.get_name(),
                                     START_PENDING_EVAL_NOTE)

        for real_time_eval in self.evaluator.get_real_time_eval_list():
            if real_time_eval.get_is_active():
                real_time_eval.ensure_eval_note_is_not_expired()
                self.matrix.set_eval(EvaluatorMatrixTypes.REAL_TIME, real_time_eval.get_name(),
                                     real_time_eval.get_eval_note())
            else:
                self.matrix.set_eval(EvaluatorMatrixTypes.REAL_TIME, real_time_eval.get_name(),
                                     START_PENDING_EVAL_NOTE)

    def _save_evaluations_if_necessary(self):
        if self.should_save_evaluations and self.symbol_evaluator.are_all_timeframes_initialized(self.exchange):
            self.matrix_exporter.save()

    def get_refreshed_times(self):
        return self.global_price_updater.get_refreshed_times(self.time_frame, self.symbol)

    def get_evaluator(self):
        return self.evaluator

    def get_global_price_updater(self):
        return self.global_price_updater

    def get_exchange(self):
        return self.exchange

    def get_symbol_evaluator(self):
        return self.symbol_evaluator

    def get_symbol(self):
        return self.symbol
コード例 #27
0
 def __init__(self, optimizer, criterion, batch_size, device):
     self.optimizer = optimizer
     self.criterion = criterion
     self.batch_size = batch_size
     self.device = device
     self.evaluator = Evaluator(criterion=self.criterion)
コード例 #28
0
def main():
    setMemoryLimit(10000000000)

    # create the top-level parser
    parser = argparse.ArgumentParser(prog='FA*IR', description='a fair Top-k ranking algorithm',
                                     epilog="=== === === end === === ===")
    parser.add_argument("-c", "--create", nargs='*', help="creates a ranking from the raw data and dumps it to disk")
    parser.add_argument("-e", "--evaluate", nargs='*', help="evaluates rankings and writes results to disk")
    subparsers = parser.add_subparsers(help='sub-command help')

    # create the parser for the "create" command
    parser_create = subparsers.add_parser('dataset_create', help='choose a dataset to generate')
    parser_create.add_argument(dest='dataset_to_create', choices=["sat", "compas", "germancredit", "xing", "chilesat", "lsat"])

    # create the parser for the "evaluate" command
    parser_evaluate = subparsers.add_parser('dataset_evaluate', help='choose a dataset to evaluate')
    parser_evaluate.add_argument(dest='dataset_to_evaluate', choices=["sat", "xing",
                                                                  "compas_gender", "compas_race",
                                                                  "germancredit_25", "germancredit_35", "germancredit_gender"])

    args = parser.parse_args()

    if args.create == []:
        print("creating rankings for all datasets...")
        createDataAndRankings()
    elif args.create == ['sat']:
        createAndRankSATData()
    elif args.create == ['compas']:
        createAndRankCOMPASData()
    elif args.create == ['germancredit']:
        createAndRankGermanCreditData()
    elif args.create == ['xing']:
        createAndRankXingData()
    elif args.create == ['chilesat']:
        createAndRankChileData()
        # gender
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/gender/fold_1/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/gender/",
                               fold="fold_1/")
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/gender/fold_2/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/gender/",
                               fold="fold_2/")
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/gender/fold_3/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/gender/",
                               fold="fold_3/")
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/gender/fold_4/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/gender/",
                               fold="fold_4/")
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/gender/fold_5/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/gender/",
                               fold="fold_5/")
        # highschool
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/highschool/fold_1/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/highschool/",
                               fold="fold_1/")
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/highschool/fold_2/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/highschool/",
                               fold="fold_2/")
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/highschool/fold_3/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/highschool/",
                               fold="fold_3/")
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/highschool/fold_4/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/highschool/",
                               fold="fold_4/")
        convertFAIRPicklesToCSV("../results/rankingDumps/ChileSAT/highschool/fold_5/",
                               "../../Meike-FairnessInL2R-Code/octave-src/sample/ChileUni/NoSemi/highschool/",
                               fold="fold_5/")
    elif args.create == ['lsat']:
#         createAndRankLSATData()
        convertFAIRPicklesToCSV("../results/rankingDumps/LSAT/gender/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/LawStudents/gender/")
        convertFAIRPicklesToCSV("../results/rankingDumps/LSAT/race_asian/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/LawStudents/race_asian/")
        convertFAIRPicklesToCSV("../results/rankingDumps/LSAT/race_black/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/LawStudents/race_black/")
        convertFAIRPicklesToCSV("../results/rankingDumps/LSAT/race_hispanic/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/LawStudents/race_hispanic/")
        convertFAIRPicklesToCSV("../results/rankingDumps/LSAT/race_mexican/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/LawStudents/race_mexican/")
        convertFAIRPicklesToCSV("../results/rankingDumps/LSAT/race_puertorican/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/LawStudents/race_puertorican/")
    elif args.create == ['trec']:
        createAndRankTRECData()
        convertFAIRPicklesToCSV("../results/rankingDumps/TREC/fold_1/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/TREC/",
                                fold="fold_1/")
        convertFAIRPicklesToCSV("../results/rankingDumps/TREC/fold_2/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/TREC/",
                                fold="fold_2/")
        convertFAIRPicklesToCSV("../results/rankingDumps/TREC/fold_3/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/TREC/",
                                fold="fold_3/")
        convertFAIRPicklesToCSV("../results/rankingDumps/TREC/fold_4/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/TREC/",
                                fold="fold_4/")
        convertFAIRPicklesToCSV("../results/rankingDumps/TREC/fold_5/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/TREC/",
                                fold="fold_5/")
        convertFAIRPicklesToCSV("../results/rankingDumps/TREC/fold_6/",
                                "../../Meike-FairnessInL2R-Code/octave-src/sample/TREC/",
                                fold="fold_6/")
    elif args.create == ['syntheticsat']:
        createSyntheticSAT()
    #=======================================================
    elif args.evaluate == []:
        evaluator = Evaluator()
        evaluator.printResults()
    elif args.evaluate == ['compas_gender']:
        evaluator = Evaluator('compas_gender')
        evaluator.printResults()
        evaluator.plotOrderingUtilityVsPercentageOfProtected()
    elif args.evaluate == ['compas_race']:
        evaluator = Evaluator('compas_race')
        evaluator.printResults()
    elif args.evaluate == ['germancredit_25']:
        evaluator = Evaluator('germancredit_25')
        evaluator.printResults()
        evaluator.plotOrderingUtilityVsPercentageOfProtected()
    elif args.evaluate == ['germancredit_35']:
        evaluator = Evaluator('germancredit_35')
        evaluator.printResults()
    elif args.evaluate == ['germancredit_gender']:
        evaluator = Evaluator('germancredit_gender')
        evaluator.printResults()
    elif args.evaluate == ['xing']:
        evaluator = Evaluator('xing')
        evaluator.printResults()
    elif args.evaluate == ['sat']:
        evaluator = Evaluator('sat')
        evaluator.printResults()

    else:
        print("FA*IR \n running the full program \n Press ctrl+c to abort \n \n")
        createDataAndRankings()
        evaluator = Evaluator()
        evaluator.printResults()

        if EVALUATE_FAILURE_PROBABILITY:
            determineFailProbOfGroupFairnessTesterForStoyanovichRanking()
コード例 #29
0
class Trainer(object):
    """Trainer Class"""
    def __init__(self, optimizer, criterion, batch_size, device):
        self.optimizer = optimizer
        self.criterion = criterion
        self.batch_size = batch_size
        self.device = device
        self.evaluator = Evaluator(criterion=self.criterion)

    def _train_batch(self, model, iterator, iteratorQuery, teacher_ratio,
                     clip):
        model.train()
        epoch_loss = 0
        for _, batch in enumerate(zip(iterator, iteratorQuery)):
            batch_ques, batch_query = batch
            src_ques, src_len_ques = batch_ques.src
            src_query, src_len_query = batch_query.src
            trg = batch_query.trg
            self.optimizer.zero_grad()
            input_trg = trg if model.name == RNN_NAME else trg[:, :-1]
            output = model(src_ques, src_len_ques, src_query, src_len_query,
                           input_trg, teacher_ratio)
            trg = trg.t() if model.name == RNN_NAME else trg[:, 1:]
            output = output.contiguous().view(-1, output.shape[-1])
            trg = trg.contiguous().view(-1)
            # output: (batch_size * trg_len) x output_dim
            # trg: (batch_size * trg_len)
            loss = self.criterion(output, trg)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
            self.optimizer.step()
            epoch_loss += loss.item()
        length = len(iterator)
        return epoch_loss / len(iterator)

    def _get_iterators(self, train_data, valid_data, model_name):
        return BucketIterator.splits((train_data, valid_data),
                                     repeat=False,
                                     batch_size=self.batch_size,
                                     sort_within_batch=False,
                                     sort_key=lambda x: len(x.src),
                                     device=self.device)

    def _epoch_time(self, start_time, end_time):
        elapsed_time = end_time - start_time
        elapsed_mins = int(elapsed_time / 60)
        elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
        return elapsed_mins, elapsed_secs

    def _log_epoch(self, train_loss, valid_loss, epoch, start_time, end_time):
        minutes, seconds = self._epoch_time(start_time, end_time)
        print(f'Epoch: {epoch+1:02} | Time: {minutes}m {seconds}s')
        print(
            f'\tTrain Loss: {train_loss:.3f} | Train PPL: {np.exp(train_loss):7.3f}'
        )
        print(
            f'\t Val. Loss: {valid_loss:.3f} |  Val. PPL: {np.exp(valid_loss):7.3f}'
        )

    def _train_epoches(self, model, train_data, train_data_query, valid_data,
                       valid_data_query, num_of_epochs, teacher_ratio, clip):
        best_valid_loss = float('inf')
        # pylint: disable=unbalanced-tuple-unpacking
        train_iterator, valid_iterator = self._get_iterators(
            train_data, valid_data, model.name)
        train_iterator_query, valid_iterator_query = self._get_iterators(
            train_data_query, valid_data_query, model.name)
        for epoch in range(num_of_epochs):
            start_time = time.time()
            train_loss = self._train_batch(model, train_iterator,
                                           train_iterator_query, teacher_ratio,
                                           clip)
            valid_loss = self.evaluator.evaluate(model, valid_iterator,
                                                 valid_iterator_query,
                                                 teacher_ratio)
            end_time = time.time()
            self._log_epoch(train_loss, valid_loss, epoch, start_time,
                            end_time)
            if valid_loss < best_valid_loss:
                best_valid_loss = valid_loss
                Chechpoint.save(model)

    def train(self,
              model,
              train_data,
              train_data_query,
              valid_data,
              valid_data_query,
              num_of_epochs=20,
              teacher_ratio=1.0,
              clip=1):
        """Train model"""
        self._train_epoches(model, train_data, train_data_query, valid_data,
                            valid_data_query, num_of_epochs, teacher_ratio,
                            clip)
コード例 #30
0
class EvaluatorThreadsManager:
    def __init__(self, config,
                 time_frame,
                 symbol_time_frame_updater_thread,
                 symbol_evaluator,
                 exchange,
                 real_time_ta_eval_list,
                 relevant_evaluators=CONFIG_EVALUATORS_WILDCARD):
        self.config = config
        self.exchange = exchange
        self.symbol = symbol_evaluator.get_symbol()
        self.time_frame = time_frame
        self.symbol_time_frame_updater_thread = symbol_time_frame_updater_thread
        self.symbol_evaluator = symbol_evaluator

        # notify symbol evaluator
        self.symbol_evaluator.add_evaluator_thread_manager(self.exchange, self.time_frame, self)

        self.matrix = self.symbol_evaluator.get_matrix(self.exchange)

        self.thread_name = "TA THREAD MANAGER - {0} - {1} - {2}".format(self.symbol,
                                                                        self.exchange.get_name(),
                                                                        self.time_frame)
        self.logger = logging.getLogger(self.thread_name)

        # Create Evaluator
        self.evaluator = Evaluator()
        self.evaluator.set_config(self.config)
        self.evaluator.set_symbol(self.symbol)
        self.evaluator.set_time_frame(self.time_frame)
        self.evaluator.set_exchange(self.exchange)
        self.evaluator.set_symbol_evaluator(self.symbol_evaluator)

        # Add threaded evaluators that can notify the current thread
        self.evaluator.set_social_eval(self.symbol_evaluator.get_crypto_currency_evaluator().get_social_eval_list(),
                                       self)
        self.evaluator.set_real_time_eval(real_time_ta_eval_list, self)

        # Create static evaluators
        self.evaluator.set_ta_eval_list(self.evaluator.get_creator().create_ta_eval_list(self.evaluator,
                                                                                         relevant_evaluators), self)

        # Register in refreshing threads
        self.symbol_time_frame_updater_thread.register_evaluator_thread_manager(self.time_frame, self)

    def get_refreshed_times(self):
        return self.symbol_time_frame_updater_thread.get_refreshed_times(self.time_frame)

    def get_evaluator(self):
        return self.evaluator

    def notify(self, notifier_name):
        if self.get_refreshed_times() > 0:
            self.logger.debug("** Notified by {0} **".format(notifier_name))
            self._refresh_eval(notifier_name)
        else:
            self.logger.debug("Notification by {0} ignored".format(notifier_name))

    def _refresh_eval(self, ignored_evaluator=None):
        # update eval
        self.evaluator.update_ta_eval(ignored_evaluator)

        # update matrix
        self.refresh_matrix()

        # update strategies matrix
        self.symbol_evaluator.update_strategies_eval(self.matrix, self.exchange, ignored_evaluator)

        # calculate the final result
        self.symbol_evaluator.finalize(self.exchange)
        self.logger.debug("MATRIX : {0}".format(self.matrix.get_matrix()))

    def refresh_matrix(self):
        self.matrix = self.symbol_evaluator.get_matrix(self.exchange)

        for ta_eval in self.evaluator.get_ta_eval_list():
            if ta_eval.get_is_active():
                ta_eval.ensure_eval_note_is_not_expired()
                self.matrix.set_eval(EvaluatorMatrixTypes.TA, ta_eval.get_name(),
                                     ta_eval.get_eval_note(), self.time_frame)
            else:
                self.matrix.set_eval(EvaluatorMatrixTypes.TA, ta_eval.get_name(),
                                     START_PENDING_EVAL_NOTE, self.time_frame)

        for social_eval in self.evaluator.get_social_eval_list():
            if social_eval.get_is_active():
                social_eval.ensure_eval_note_is_not_expired()
                self.matrix.set_eval(EvaluatorMatrixTypes.SOCIAL, social_eval.get_name(),
                                     social_eval.get_eval_note(), None)
            else:
                self.matrix.set_eval(EvaluatorMatrixTypes.SOCIAL, social_eval.get_name(),
                                     START_PENDING_EVAL_NOTE)

        for real_time_eval in self.evaluator.get_real_time_eval_list():
            if real_time_eval.get_is_active():
                real_time_eval.ensure_eval_note_is_not_expired()
                self.matrix.set_eval(EvaluatorMatrixTypes.REAL_TIME, real_time_eval.get_name(),
                                     real_time_eval.get_eval_note())
            else:
                self.matrix.set_eval(EvaluatorMatrixTypes.REAL_TIME, real_time_eval.get_name(),
                                     START_PENDING_EVAL_NOTE)

    def start_threads(self):
        pass

    def stop_threads(self):
        for thread in self.evaluator.get_real_time_eval_list():
            thread.stop()

    def join_threads(self):
        for thread in self.evaluator.get_real_time_eval_list():
            thread.join()

    def get_symbol_time_frame_updater_thread(self):
        return self.symbol_time_frame_updater_thread

    def get_exchange(self):
        return self.exchange

    def get_symbol_evaluator(self):
        return self.symbol_evaluator
コード例 #31
0
def main():
    setMemoryLimit(10000000000)

    # create the top-level parser
    parser = argparse.ArgumentParser(
        prog='FA*IR',
        description='a fair Top-k ranking algorithm',
        epilog="=== === === end === === ===")
    parser.add_argument(
        "-c",
        "--create",
        nargs='*',
        help="creates a ranking from the raw data and dumps it to disk")
    parser.add_argument("-e",
                        "--evaluate",
                        nargs='*',
                        help="evaluates rankings and writes results to disk")
    subparsers = parser.add_subparsers(help='sub-command help')

    # create the parser for the "create" command
    parser_create = subparsers.add_parser('dataset_create',
                                          help='choose a dataset to generate')
    parser_create.add_argument(
        dest='dataset_to_create',
        choices=["sat", "compas", "germancredit", "xing", "csat"])

    # create the parser for the "evaluate" command
    parser_evaluate = subparsers.add_parser(
        'dataset_evaluate', help='choose a dataset to evaluate')
    parser_evaluate.add_argument(dest='dataset_to_evaluate',
                                 choices=[
                                     "sat", "xing"
                                     "compas_gender", "compas_race",
                                     "germancredit_25", "germancredit_35",
                                     "germancredit_gender"
                                 ])

    args = parser.parse_args()

    if args.create == []:
        print("creating rankings for all datasets...")
        createDataAndRankings()
    elif args.create == ['sat']:
        createAndRankSATData(1500)
    elif args.create == ['compas']:
        createAndRankCOMPASData(1000)
    elif args.create == ['germancredit']:
        createAndRankGermanCreditData(100)
    elif args.create == ['xing']:
        createAndRankXingData(40)
    elif args.create == ['csat']:
        createAndRankChileData(1500)
    elif args.create == ['syntheticsat']:
        createSyntheticSAT(1000)
    #=======================================================
    elif args.evaluate == []:
        evaluator = Evaluator()
        evaluator.printResults()
    elif args.evaluate == ['compas_gender']:
        evaluator = Evaluator('compas_gender')
        evaluator.printResults()
        evaluator.plotOrderingUtilityVsPercentageOfProtected()
    elif args.evaluate == ['compas_race']:
        evaluator = Evaluator('compas_race')
        evaluator.printResults()
    elif args.evaluate == ['germancredit_25']:
        evaluator = Evaluator('germancredit_25')
        evaluator.printResults()
        evaluator.plotOrderingUtilityVsPercentageOfProtected()
    elif args.evaluate == ['germancredit_35']:
        evaluator = Evaluator('germancredit_35')
        evaluator.printResults()
    elif args.evaluate == ['germancredit_gender']:
        evaluator = Evaluator('germancredit_gender')
        evaluator.printResults()
    elif args.evaluate == ['xing']:
        evaluator = Evaluator('xing')
        evaluator.printResults()
    elif args.evaluate == ['sat']:
        evaluator = Evaluator('sat')
        evaluator.printResults()

    else:
        print(
            "FA*IR \n running the full program \n Press ctrl+c to abort \n \n")
        createDataAndRankings()
        evaluator = Evaluator()
        evaluator.printResults()

        if EVALUATE_FAILURE_PROBABILITY:
            determineFailProbOfGroupFairnessTesterForStoyanovichRanking()
コード例 #32
0
class EvaluatorThreadsManager:
    def __init__(self,
                 config,
                 time_frame,
                 symbol_time_frame_updater_thread,
                 symbol_evaluator,
                 exchange,
                 trading_mode,
                 real_time_ta_eval_list,
                 relevant_evaluators=None):

        if relevant_evaluators is None:
            relevant_evaluators = CONFIG_EVALUATORS_WILDCARD
        self.config = config
        self.exchange = exchange
        self.trading_mode = trading_mode
        self.symbol = symbol_evaluator.get_symbol()
        self.time_frame = time_frame
        self.symbol_time_frame_updater_thread = symbol_time_frame_updater_thread
        self.symbol_evaluator = symbol_evaluator

        self.should_save_evaluations = CONFIG_SAVE_EVALUATION in self.config and self.config[
            CONFIG_SAVE_EVALUATION]

        # notify symbol evaluator
        self.symbol_evaluator.add_evaluator_thread_manager(
            self.exchange, self.time_frame, self.trading_mode, self)

        self.matrix = self.symbol_evaluator.get_matrix(self.exchange)
        self.matrix_exporter = MatrixExporter(self.matrix, self.symbol)

        self.thread_name = f"TA THREAD MANAGER - {self.symbol} - {self.exchange.get_name()} - {self.time_frame}"
        self.logger = get_logger(self.thread_name)

        # Create Evaluator
        self.evaluator = Evaluator()
        self.evaluator.set_config(self.config)
        self.evaluator.set_symbol(self.symbol)
        self.evaluator.set_time_frame(self.time_frame)
        self.evaluator.set_exchange(self.exchange)
        self.evaluator.set_symbol_evaluator(self.symbol_evaluator)

        # Add threaded evaluators that can notify the current thread
        self.evaluator.set_social_eval(
            self.symbol_evaluator.get_crypto_currency_evaluator().
            get_social_eval_list(), self)
        self.evaluator.set_real_time_eval(real_time_ta_eval_list, self)

        # Create static evaluators
        self.evaluator.set_ta_eval_list(
            self.evaluator.get_creator().create_ta_eval_list(
                self.evaluator, relevant_evaluators), self)

        # Register in refreshing threads
        self.symbol_time_frame_updater_thread.register_evaluator_thread_manager(
            self.time_frame, self)

    # handle notifications from evaluators, when notified refresh symbol evaluation matrix
    def notify(self, notifier_name, force_TA_refresh=False):
        if self.get_refreshed_times() > 0:
            self.logger.debug(f"** Notified by {notifier_name} **")
            if force_TA_refresh:
                self.symbol_time_frame_updater_thread.force_refresh_data()
            self._refresh_eval(notifier_name)
        else:
            self.logger.debug(f"Notification by {notifier_name} ignored")

    def _refresh_eval(self, ignored_evaluator=None):
        # update eval
        self.evaluator.update_ta_eval(ignored_evaluator)

        # update matrix
        self.refresh_matrix()

        # update strategies matrix
        self.symbol_evaluator.update_strategies_eval(self.matrix,
                                                     self.exchange,
                                                     ignored_evaluator)

        # calculate the final result
        self.symbol_evaluator.finalize(self.exchange)

        # save evaluations if option is activated
        self._save_evaluations_if_necessary()

        self.logger.debug(f"MATRIX : {self.matrix.get_matrix()}")

    def refresh_matrix(self):
        self.matrix = self.symbol_evaluator.get_matrix(self.exchange)

        for ta_eval in self.evaluator.get_ta_eval_list():
            if ta_eval.get_is_active():
                ta_eval.ensure_eval_note_is_not_expired()
                self.matrix.set_eval(EvaluatorMatrixTypes.TA,
                                     ta_eval.get_name(),
                                     ta_eval.get_eval_note(), self.time_frame)
            else:
                self.matrix.set_eval(EvaluatorMatrixTypes.TA,
                                     ta_eval.get_name(),
                                     START_PENDING_EVAL_NOTE, self.time_frame)

        for social_eval in self.evaluator.get_social_eval_list():
            if social_eval.get_is_active():
                social_eval.ensure_eval_note_is_not_expired()
                self.matrix.set_eval(EvaluatorMatrixTypes.SOCIAL,
                                     social_eval.get_name(),
                                     social_eval.get_eval_note(), None)
            else:
                self.matrix.set_eval(EvaluatorMatrixTypes.SOCIAL,
                                     social_eval.get_name(),
                                     START_PENDING_EVAL_NOTE)

        for real_time_eval in self.evaluator.get_real_time_eval_list():
            if real_time_eval.get_is_active():
                real_time_eval.ensure_eval_note_is_not_expired()
                self.matrix.set_eval(EvaluatorMatrixTypes.REAL_TIME,
                                     real_time_eval.get_name(),
                                     real_time_eval.get_eval_note())
            else:
                self.matrix.set_eval(EvaluatorMatrixTypes.REAL_TIME,
                                     real_time_eval.get_name(),
                                     START_PENDING_EVAL_NOTE)

    def _save_evaluations_if_necessary(self):
        if self.should_save_evaluations and self.symbol_evaluator.are_all_timeframes_initialized(
                self.exchange):
            self.matrix_exporter.save()

    def start_threads(self):
        pass

    def stop_threads(self):
        for thread in self.evaluator.get_real_time_eval_list():
            thread.stop()

    def join_threads(self):
        for thread in self.evaluator.get_real_time_eval_list():
            thread.join()

    def get_refreshed_times(self):
        return self.symbol_time_frame_updater_thread.get_refreshed_times(
            self.time_frame)

    def get_evaluator(self):
        return self.evaluator

    def get_symbol_time_frame_updater_thread(self):
        return self.symbol_time_frame_updater_thread

    def get_exchange(self):
        return self.exchange

    def get_symbol_evaluator(self):
        return self.symbol_evaluator

    def get_symbol(self):
        return self.symbol
コード例 #33
0
    def predict(self):

        intent_set, slot_set = self.train_data.get_labels()

        # train slot filler and intent parser on same dataset

        hmm = HMM3(slot_set, intent_set)
        hmm.train_model(self.train_data)
        hmm.test_model(self.test_data)

        # - - - With SVM - - - #

        # svm = SVM( self.train_data, self.test_data )
        # svm.train()
        # # choose intent with highest prob as the one predicted
        # for x in self.test_data:
        #     intent_probs = x.get_intent_probabilities()
        #     predicted_intent = sorted(intent_probs.items(), key=itemgetter(1))[-1][0]
        #     x.set_pred_intent(predicted_intent)

        # - - - With BiLSTM - - - #

        model = BiLSTM(self.train_data, self.test_data)
        model.train()
        model.rnn_model()
        for x in self.test_data:
            intent_probs = x.get_intent_probabilities()
            predicted_intent = sorted(intent_probs.items(),
                                      key=itemgetter(1))[-1][0]
            x.set_pred_intent(predicted_intent)

        # report results
        print()
        print('=' * 50)
        print('SLOT FILLER AND INTENT PARSER TESTED SEPERATELY')
        e = Evaluator(self.test_data, intent_set, slot_set)
        print(e)

        # choose slots and intents with highest joint probability
        for x in self.test_data:
            joint_probs = {}
            for intent in intent_set:
                intent_prob = self.get_log_prob(
                    x.get_intent_probabilities()[intent])
                slot_prob = x.get_pred_slots_per_intent()[intent]['prob']
                joint_probs[intent] = intent_prob + slot_prob
            highest_intent = sorted(joint_probs.items(),
                                    key=itemgetter(1))[-1][0]
            x.set_pred_intent(highest_intent)
            x.set_pred_slots({
                'slots':
                x.get_pred_slots_per_intent()[highest_intent]['slots'],
                'prob':
                0
            })

        print()
        print('=' * 50)
        print('SLOT FILLER AND INTENT PARSER TESTED JOINT')
        e2 = Evaluator(self.test_data, intent_set, slot_set)
        print(e2)

        print('=' * 50)
        print()