def call_yoci(config=None, ci=None, req_type=None, name=None, verbose=False): _set_global_verbosity_level(verbose) config = config if config else DEFAULT_CONFIG_FILE if not ci: ci = 'Travis' if not req_type: req_type = 'repos' ci_config = import_config(config) if ci.lower() == 'Travis'.lower(): if hasattr(travis, req_type.title()): getattr(travis, req_type.title())( lgr, ci_config[ci.title()], id=name) # getattr(ci_instance, req_type) else: raise RuntimeError('type not found') else: raise RuntimeError('ci {} not supported'.format(ci))
def call_yoci(config=None, ci=None, req_type=None, name=None, verbose=False): _set_global_verbosity_level(verbose) config = config if config else DEFAULT_CONFIG_FILE if not ci: ci = 'Travis' if not req_type: req_type = 'repos' ci_config = import_config(config) if ci.lower() == 'Travis'.lower(): if hasattr(travis, req_type.title()): getattr(travis, req_type.title())(lgr, ci_config[ci.title()], id=name) # getattr(ci_instance, req_type) else: raise RuntimeError('type not found') else: raise RuntimeError('ci {} not supported'.format(ci))
def __init__(self, input_file, input_ent, output_weka, output_nn, limit): """ """ self.input_file = input_file self.input_ent = input_ent self.out_weka = output_weka self.out_nn = output_nn self.limit = limit self.cfg_feat = import_config('features') self.gazzetters = self.import_gazetters() self.DictEntities = self.import_entities_annotated() self.tagger = PosChunkTagger() self.POSTags = set() self.ChunkTags = set() self.out_tokens = []
def __init__(self, input_file, schedule_file, limit, work_tsl, contr_tsl, time_tsl): """ """ self.cfg_match = import_config('matcher') self.input_file = input_file self.schedule_file = schedule_file self.limit = limit self.work_tsl = work_tsl self.contr_tsl = contr_tsl self.time_tsl = time_tsl self.DictTweets = self.import_ugc_tweets() self.DictSched = self.import_schedule() self.stopwords = self.import_stopwords() # Initialize counters (self.tp_count, self.fp_count, self.tn_count, self.fn_count, self.tp_c_count, self.fp_c_count, self.tn_c_count, self.fn_c_count) = (0, ) * 8 self.outfile = "../results/schedule_matcher_%s_%s_%s.txt" % ( work_tsl, contr_tsl, time_tsl)
import datetime import copy import os import tensorflow as tf from dataclass import data, reader, mnist_data from argparse import Namespace import utils import argparse CONFIG = utils.import_config() def evaluate(classifier, params, eval_dir=None, result_dir=None, wrong_fn=None, save_all=False): """ Get the predictions based on the features given by the input_fn. The goal here was to make something that would make a folder to showall the instances where the model went wrong. Unfortunatly this does not work with the current version of tensorflow. """ assert result_dir is not None or wrong_fn is not None if eval_dir is None: eval_dir = CONFIG["MNIST"]["test"]["csv"] json_dict = {}
def __init__(self, inpath): """ """ self.cfg_tw_api = import_config('twitter_api') self.inpath = inpath