def assign(profile_fn, outfile=None): blast_data = bi.parse_psiblast(profile_fn) qry = query.Query("test") qry.set_input(blast_data) sc1_input = fg.ScorerInput(["profile", "sequence"]) sc1_input.set_data(qry) scorers1 = [] for i in range(para.num_classes - 1): scorers1.append(scorer.Scorer(para.lvl1_coef[i], sc1_input)) qry.set_structure_vector(scorers1) #print "#Lvl 1 complete" #print qry.profile_vec #print qry.structure_vec #sc2_input = fg.ScorerInput(["sequence", "profile"]) sc2_input = fg.ScorerInput(["structure", "profile"]) sc2_input.set_data(qry) scorers2 = [] for i in range(para.num_classes - 1): scorers2.append(scorer.Scorer(para.lvl2_coef[i], sc2_input)) qry.set_scores(scorers2) qry.set_assignment(expected.get_expected(para.num_classes)) qry.set_confidence() #print "Lvl 2 complete" out.output(qry, outfile) return qry
def main_wrapper(): """ Loop main """ words = read_subword_file('data/linux_command.txt') my_scorer = scorer.Scorer() while is_continued(my_scorer): main(words, my_scorer) print(my_scorer.num_correct) print(my_scorer.num_incorrect) print(my_scorer.end_time())
confpath = os.path.join('..', 'conf') random.seed() subjectid = "" sessionid = "" while subjectid == "" or subjectid == None: subjectid = raw_input("Subject ID: ") print subjectid sessionid = 0 random.seed(subjectid + str(time.time())) drawer = drawer.Drawer() scorer = scorer.Scorer() acceptedkeys = ['s', 'd', 'f', 'j', 'k', 'l'] sessionid = 0 exptname = "skill3" gensettings = readConfig(os.path.join(confpath, 'configuration.txt')) colort = readConfig(os.path.join(confpath, 'colors.txt')) colors = [] for k, v in colort.iteritems(): colors.append(map(int, v)) print colors letters = [ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
help='Display result limit') parser.add_argument('--loc-weight', default=0.40, type=float, help='Location distance weight') parser.add_argument('--job-weight', default=0.30, type=float, help='Job Title Weight') parser.add_argument('--industry-weight', default=0.30, type=float, help='Industry Type Weight') args = parser.parse_args() scorer = scorer.Scorer(**args.__dict__) scorer.run() # items = load_data(args) # project = load_project(args) # processed = list() # for item in items: # if not is_within_100(project, item): # print(f"Excluding participant {item['firstname']} > 100 km") # continue # score = compute_score(args, project, item) # item['score'] = score # processed.append(item)
def __init__(self): super().__init__() self.lock = threading.RLock() self.scorer = scorer.Scorer()
dest_db = get_dest_db() if dest_db is None: dest_db = create_dest_db() else: source_db = get_source_db() dest_db = None # Get the destination DB in loop in case of concurrecy proble, # that is, slave nodes try to acquire the dest DB before master node creates it. while dest_db == None: dest_db = get_dest_db() alcohols_dict = "./topic analysis/dictionary/alcohols.txt" fastfood_dict = "./topic analysis/dictionary/fastfood.txt" smoking_dict = "./topic analysis/dictionary/smoking.txt" # create Scorer objects for each topic # so that the resource only needs to be loaded once. alcohols_scorer = scorer.Scorer(senti_analyzer, alcohols_dict) fastfood_scorer = scorer.Scorer(senti_analyzer, fastfood_dict) smoking_scorer = scorer.Scorer(senti_analyzer, smoking_dict) # index records the number of tweets index = 0 # records the number of results in the buffer buffer = 0 # A temporary list to store results. # if the buffer is full, data in result will be written into dest DB, # and buffer and result will be reset. result = [] for ele in source_db: index += 1 print(index) # each node only handles the tweets they are responsible for. if index % comm_size == comm_rank: