예제 #1
0
 def __init__(self, loop=None, post=False):
     self._loop = loop or asyncio.get_event_loop()
     self._method = 'POST' if post else 'GET'
     self.finished = 0
     self.sem = asyncio.Semaphore(100, loop=self._loop)
     self.https_judge = Judge('https://httpbin.skactor.tk/anything')
     self.http_judge = Judge('http://httpbin.skactor.tk:8080/anything')
예제 #2
0
def test_RPSXGame():
    p1 = Player('p1', bot=True)
    bot = Player('p2', bot=True)
    game = RPSXGame(p1, bot, Judge())
    for i in range(10):
        game.play()
    plp('test_RPSXGame()')
예제 #3
0
    def get_content(self):
        # 1.提取基本文本块
        self.parser = Parser(self.url)
        ns_list = self.parser.ns()
        self.title = self.parser.get_title()
        # 2.文本串分块
        self.partitioner = Partitioner()
        blocks = self.partitioner.partition(ns_list)

        # 3.抽取正文块,副产品为分析信息
        self.judge = Judge(self.title.string, ns_list)
        res = self.judge.select(blocks, ns_list)

        flag = res['flag']
        cblock = res['block']
        confidence = res['confidence']
        detail = res['detail']
        #if flag:
        content = cblock.to_str()
        (srcs, images) = self.get_images(cblock)
        cblock = self.insert_images(cblock, images)
        content_with_format = cblock.to_str_with_format()
        #else:
        #    content = ""
        #    content_with_format = ""
        #    srcs = None
        return (flag, self.title.string.strip(), content, content_with_format,
                srcs, confidence, detail)
예제 #4
0
class GatekeeperHandler(ProxyHandler):
	"""
	Uses classifiers and judges to modify HTTP requests before they are
	passed on (to the browser or to the web).
	"""
	judge = Judge()
	update_filename = path.join(path.dirname(path.dirname(path.realpath(__file__))), 'precommitment', 'has_changed')
	print "\nReady to start processing requests.\n"

	def mitm_request(self, data):
		"""Modifies requests."""

		# NOTE we always allow requests through -- we only categorize and judge responses
		# this could be changed

		url = self.headers['Host'] + self.path
		trimmed_url = trim_to(url, 40)

		log_request(url, self.command)

		s_print('>> to: %s' % trimmed_url)

		# check for updates
		if path.isfile(self.update_filename):
			new_access_level = open(self.update_filename, "r").read().strip()
			system("rm -f " + self.update_filename)
			s_print("\n\nUPDATING ACCESS LEVEL: %s\n" % new_access_level)
			self.judge.load_access_rules(new_access_level)
			global CLASSIFIER
			CLASSIFIER.reload() # throw away old classifier, create a new one (because e.g. maybe we added a temporarily_blocked entry)

		return data

	def mitm_response(self, data):
		"""Modifies responses."""

		url = self.headers['Host'] + self.path
		trimmed_url = trim_to(url, 40)

		s_print('<< judging: %s' % trimmed_url)

		global CLASSIFIER
		categories = CLASSIFIER.classify(self, data)
		judgment, info = self.judge.render_judgment(categories)

		log_response(url, categories, judgment, info, self.command)

		s_print("<< %s\nJudged:\t%s => %s (%s)\n" % (trimmed_url, categories, judgment, info))

		if judgment == "BLOCK":
			return refuse(categories, info)
		elif judgment == "DELAY":
			sleep(float(info))
			return data
		elif judgment == "ALLOW":
			return data
		else:
			raise Exception("Unrecognized judgment: %s (%s)" % (judgment, info))
예제 #5
0
def main():
    if len(sys.argv) <= 1:
        raise Exception("Check your arguments please!")

    project_dir = sys.argv[1]
    user_dataset = []

    def get_user_dataset():
        sort_input_path = sys.argv[2]
        sort_output_path = sys.argv[3]
        freq_input_path = sys.argv[4]
        freq_output_path = sys.argv[5]
        matmul_input_path = sys.argv[6]
        matmul_output_path = sys.argv[7]
        user_dataset.append(sort_input_path)
        user_dataset.append(sort_output_path)
        user_dataset.append(freq_input_path)
        user_dataset.append(freq_output_path)
        user_dataset.append(matmul_input_path)
        user_dataset.append(matmul_output_path)

    get_user_dataset()

    server_dataset = []

    def get_server_dataset():
        server_sort_input_file = "./datasets/sorting/sort.in"
        server_sort_output_file = "./datasets/sorting/sort.out"
        server_freq_input_file = "./datasets/freq/freq.in"
        server_freq_output_file = "./datasets/freq/freq.out"
        server_matmul_input_file = "./datasets/matmul/matmul.in"
        server_matmul_output_file = "./datasets/matmul/matmul.out"
        server_dataset.append(server_sort_input_file)
        server_dataset.append(server_sort_output_file)
        server_dataset.append(server_freq_input_file)
        server_dataset.append(server_freq_output_file)
        server_dataset.append(server_matmul_input_file)
        server_dataset.append(server_matmul_output_file)

    get_server_dataset()

    if len(server_dataset) != len(user_dataset):
        raise Exception("User and Server datasets aren't equal size!")

    for i in range(len(server_dataset)):
        judge = Judge(server_dataset[i], user_dataset[i])

        if not judge.check():
            raise Exception("Files ", server_dataset[i], "and",
                            user_dataset[i], "aren't matched!")
예제 #6
0
def test_judge_part2():
    judge = Judge()

    stop = 5000000
    genA = generatorAPart2(65, stop)
    genB = generatorBPart2(8921, stop)

    for _ in range(stop):
        valueA = next(genA)
        valueB = next(genB)

        judge.compare(valueA, valueB)

    assert judge.getCount() == 309
예제 #7
0
def main():
    judge = Judge()

    stop = 40000000
    genA = generatorA(512, stop)
    genB = generatorB(191, stop)

    for _ in range(stop):
        valueA = next(genA)
        valueB = next(genB)

        judge.compare(valueA, valueB)

    print(judge.getCount())
예제 #8
0
파일: main.py 프로젝트: jvmncs/safe-debates
def main(args):
    """main man"""
    # reproducibility
    if args.seed is not None:
        torch.manual_seed(
            args.seed)  # unsure if this works with SparseMNIST right now
        np.random.seed(args.seed)

    # cuda
    args.use_cuda = not args.no_cuda and torch.cuda.is_available()
    args.device = torch.device("cuda" if args.use_cuda else "cpu")

    # data
    dataset = MNIST('./data/', train=False, transform=ToTensor())
    kwargs = {'num_workers': 1}
    if args.use_cuda:
        kwargs['pin_memory'] = True
    data_loader = DataLoader(dataset, args.batch_size, shuffle=True, **kwargs)
    if args.rounds is None:
        args.rounds = len(dataset) // args.batch_size

    # load judge
    judge_state = torch.load(args.checkpoint)['state_dict']

    # debate game
    judge = Judge().to(args.device)
    judge.load_state_dict(judge_state)
    judge.eval()
    helper = Agent(honest=True, args=args)
    liar = Agent(honest=False, args=args)
    debate = Debate((helper, liar), data_loader, args)

    total_meter = AverageMeter()
    class_meters = [AverageMeter() for i in range(10)]

    # TODO precommit logic
    for _ in range(args.rounds):
        print("starting round {}".format(_))
        helper.precommit_(None, None)
        liar.precommit_(None, None)
        result = debate.play(judge, args.device)
        track_stats_(total_meter, class_meters, result['helper']['preds'],
                     result['helper']['wins'], result['labels'],
                     args.precommit)

    print('Total accuracy: {}'.format(total_meter.avg))
    print('Accuracy per class\n==============================================')
    for i in range(10):
        print('Digit {}: {}'.format(i, class_meters[i].avg))
예제 #9
0
def daemon():
    def callback(ch, method, properties, body):
        print " [x] Received judge request %r" % (body, )  # for debug
        s = Sub()
        try:
            s.getSub(body)
            c = Compile()
            c.compile(s)
        except Exception, e:
            print 'compile err| ', Exception, ':', e  # for debug

        if s.status == jcnf.SUB_STATUS['judging']:
            try:
                j = Judge()
                j.judge(s)
            except Exception, e:
                print 'judge err| ', Exception, ':', e  # for debug
예제 #10
0
def create_judge_roster(csv_filename):
    with open(csv_filename, encoding="utf-8") as csvfile:
        judge_roster = list()
        csvreader = csv.DictReader(csvfile)

        # Create an entry in the roster for each judge with their contact details, preferred categories, and availability
        for row in csvreader:
            if not any(row.values()):
                continue

            new_presentation_availability = list()
            for column_name, times_selected in row.items():
                if column_name not in JudgeColumnNames.JUDGE_AVAILABILITY_COLUMN_NAMES:
                    continue
                column_date = column_name_to_date(column_name)
                if times_selected:
                    for time_slot in times_selected.split(","):
                        if not time_slot:
                            continue
                        index_at_00_min = date_and_time_to_index(
                            column_date,
                            time_slot_to_time(time_slot),
                        )
                        new_presentation_availability.append(index_at_00_min)
                        new_presentation_availability.append(index_at_00_min +
                                                             0.5)

            new_judge = Judge(
                judge_id=csvreader.
                line_num,  # using the line number as a sequential ID field for each judge
                first=row[JudgeColumnNames.FIRST_NAME],
                last=row[JudgeColumnNames.LAST_NAME],
                email=row[JudgeColumnNames.EMAIL],
                phone=row[JudgeColumnNames.PHONE],
                preferred_categories=[
                    JUDGE_CATEGORIES[category] for category in JUDGE_CATEGORIES
                    if category in row[JudgeColumnNames.PREFERRED_CATEGORIES]
                ],
                is_paper_reviewer=row[
                    JudgeColumnNames.IS_PAPER_REVIEWER] == "Yes",
                presentation_availability=new_presentation_availability,
            )
            judge_roster.append(new_judge)

    return judge_roster
예제 #11
0
파일: web.py 프로젝트: amoghn4/pythons4
 def post(self):
     try:
         payload = json.loads(self.request.body)
     except ValueError:
         raise
     if "id" not in payload or 'src' not in payload:
         raise Exception("Argument missing")
     pid = int(payload['id'])
     src = payload['src']
     judge = Judge(pid, src, mode="inline")
     ok, msg = judge.run_tests()
 
     if not ok:
         ret = json.dumps({"pass":False, "msg": msg})
     else:
         ret = json.dumps({"pass":True})
     self.set_header("content-type", "application/json")
     self.write(ret)
예제 #12
0
def main():
    trainIndex = 2
    rootPath = '../../exam_map/train' + str(trainIndex)
    carPath = os.path.join(rootPath, 'car.txt')
    roadPath = os.path.join(rootPath, 'road.txt')
    crossPath = os.path.join(rootPath, 'cross.txt')
    answerPath = os.path.join(rootPath, 'answer.txt')
    presetAnswerPath = os.path.join(rootPath, 'presetAnswer.txt')
    start = time.time()

    carsDict = read_car(carPath)
    roadsDict = read_road(roadPath)
    crossesDict = read_cross(crossPath, roadsDict)
    read_answer(carsDict, answerPath, presetAnswerPath)
    judge = Judge(carsDict, roadsDict, crossesDict)
    judge.init()
    judge.judge()

    print("total run time: ", time.time() - start)
    print("map {} done!".format(trainIndex))
예제 #13
0
def set_content():
    content_valid = False
    content_type, content = None, None
    while not content_valid:
        content_input = input(
            "\nPlease type in the absolute path of the "
            "file/directory\nIf you want to select "
            "multiple files, please add four colons(::::) "
            "between the file paths.\nDo not choose multiple"
            " directories or a combination of files and "
            "directories.\nYou can type in 'exit()' to exit.")
        check_user_exit(content_input)
        content = content_input.split("::::")
        judge = Judge(content)
        content_type, content = judge.judge_content()
        if content_type <= 0:
            print(
                "Wrong value received!\nDetails: {} Please Try again.".format(
                    content))
        else:
            content_valid = True
    return content_type, content
예제 #14
0
    def handle_match_request(self, cs, p):
        pl('handling match request from {}'.format(str(p)))
        if self.has_players():
            pl('setting up normal match...')
            raise Exception(NotImplemented)
        else:
            pl('setting up bot match...')
            p2 = Player("Gunhilda", bot=True)

            ## Whos responsibility is this?
            # probably servers, right?
            # - think so, since it is the one serving the match and rules etc.
            if not p.has_moves_left():
                p.reset_moves()

            match = RPSXGame(p, p2, Judge(), bot=True)
            # One more match
            self.stats.add_match()

            self.send_cmd(cs, Command.OK)

            pl('match set up')
            self.handle_bot_match(cs, match, p2)
예제 #15
0
def test():
    print("Testing game")
    print("0 = draw, -1 = p1 won, +1 = p2 won")
    p1 = Player('p1')
    p2 = Player('p2')
    match = RPSXGame(p1, p2, Judge())
    match.set_p1_move(Move.ROCK)
    match.set_p2_move(Move.PAPER)
    match.play()  # P2 should win
    score = match.get_snapshot().split(':')[2]
    pl(score)
    match.set_p1_move(Move.ROCK)
    match.set_p2_move(Move.ROCK)
    match.play()  # Should be a tie
    score = match.get_snapshot().split(':')[2]
    pl(score)
    match.set_p1_move(Move.ROCK)
    match.set_p2_move(Move.SCISSORS)
    match.play()  # P1 should win
    score = match.get_snapshot().split(':')[2]
    pl(score)
    match.finish()
    winner = match.get_winner()
    pl("Winner is: {}".format(winner))
예제 #16
0
    def get_timeline(self, api, t_name):
        judge = Judge()
        try:

            for tweet in api.user_timeline(id=t_name, count=self.__num):
                #for tweet in api.user_timeline(id = Twitterkey.id, count = self.__num):
                if judge.my_timeline_multi(tweet):
                    try:
                        self.__status_media[self.__count] = tweet
                        self.__count += 1
                    except:
                        pass

                elif judge.my_timeline_single(tweet):
                    try:
                        self.__status_media[self.__count] = tweet
                        self.__count += 1
                    except:
                        pass
        except tp.error.TweepError:
            for _ in tqdm(range(15 * 60)):
                time.sleep(1)

        return self.__status_media
import shutil
from config import config
from judge import Judge
import logging
import math
import inspect
import ctypes
import time
from tkinter import messagebox

logging.basicConfig(
    level=logging.ERROR,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    filename='log/error.log')
logger = logging.getLogger(__name__)
judge_handler = Judge()


def _async_raise(tid, exctype):
    """raises the exception, performs cleanup if needed"""
    tid = ctypes.c_long(tid)
    if not inspect.isclass(exctype):
        exctype = type(exctype)
    res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid,
                                                     ctypes.py_object(exctype))
    if res == 0:
        raise ValueError("invalid thread id")
    elif res != 1:
        # """if it returns a number greater than one, you're in trouble,
        # and you should call it again with exc=NULL to revert the effect"""
        ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
예제 #18
0
#     dict_crit= criteria_dict_book[sect.kind.value]
    

#     for key, details in dict_crit.items():
#         method_to_call = getattr(Judge, details['function'])
#         result = method_to_call(df,details['breakpoint'], details['rates'])
#        # result = altitude_lost(df,details['breakpoint'], details['rates'])
#         print(details['name'],result)


# %% using class 


path_file = "judge1.json"

jdg = Judge(path_file)

# jdg.dict_criteria = criteria_dict_book

# jdg.save_judge("judge1.json")



# %%
# jdg2=Judge()

# print(jdg2)
# jdg2.load_judge('judge1.json')

# print(jdg2)
    sources = list({"lastfm", "discogs", "tagtraum"} - {target})
    dataset_path = os.path.join(utils.FOLDS_DIR,
                                "{0}_4-fold_by_artist.tsv".format(target))
    dhelper = DataHelper(sources, target, dataset_path=dataset_path)
    print(dhelper.dataset_df)

    source_tags = [
        f"{source}:{el}" for source in sources
        for el in utils.corpus_genres_for_source(dhelper.dataset_df, source)
    ]
    target_tags = [
        f"{target}:{el}"
        for el in utils.corpus_genres_for_source(dhelper.dataset_df, target)
    ]
    tm = TagManager.get(sources, target, source_tags, target_tags)
    judge = Judge()
    print("The judge was initialized")

    translators = {}
    tr = DbpMappingTranslator(
        tm, ''.join([
            utils.ISMIR2019_TRANSLATION_TABLES_DIR, "distance_table_dbpedia_",
            target
        ]))
    translators['baseline'] = tr

    models = {}
    models['avg_init'] = ''.join(
        [utils.ACOUSTICBRAINZ_EMBS_DIR, "/avg_initial_embs.csv"])
    models['avg_retro_unweighted'] = ''.join(
        [utils.ACOUSTICBRAINZ_EMBS_DIR, "/avg_retro_unweighted_embs.csv"])
예제 #20
0
파일: main.py 프로젝트: fredvol/paralogger
 def load_judge_file(self, judge_path="judge1.json"):
     try:
         self.judge = Judge(judge_path)
     except Exception as ex:
         logger.error(ex)
예제 #21
0
import os
import sys
from judge import Judge
from constants import *
import json

#team_id, sub_id, lang_id, q_no, time_limit

try:
    if len(sys.argv) != 6:
        raise ValueError("Invalid number of arguments!")
    team_id = str(sys.argv[1])
    sub_id = int(sys.argv[2])
    lang_id = int(sys.argv[3])
    q_no = int(sys.argv[4])
    time_limit = int(sys.argv[5])
    judge_obj = Judge(team_id, sub_id, lang_id, q_no, time_limit)
    result = judge_obj.generate_result()
    if result is None:
        raise Exception("Invalid result generated")
    path_to_team = os.path.join(PATH_TO_SUBMISSION, str(team_id))
    path_to_sub_id = os.path.join(path_to_team, str(sub_id))
    path_to_result = os.path.join(path_to_sub_id, RESULT_FILE)
    json.dump(result, open(path_to_result, 'w'))

except Exception as e:
    print(e)
예제 #22
0
def main(args):
    # reproducibility
    if args.seed is not None:
        torch.manual_seed(
            args.seed)  # don't think this works with SparseMNIST right now
        np.random.seed(args.seed)
    if not os.path.isdir(args.checkpoint):
        mkdir_p(args.checkpoint)
    if args.checkpoint_filename is None:
        checkpoint_file = args.checkpoint + str(datetime.now())[:-10]
    else:
        checkpoint_file = args.checkpoint + args.checkpoint_filename

    # cuda
    args.use_cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda" if args.use_cuda else "cpu")

    # eval?
    args.evaluate = args.val_batches > 0

    # prep sparse mnist
    if not args.evaluate:
        train_loader, _, test_loader = prepare_data(args)
    else:
        train_loader, val_loader, test_loader = prepare_data(args)

    # machinery
    model = Judge().to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    # setup validation metrics we want to track for tracking best model over training run
    best_val_loss = float('inf')
    best_val_acc = 0

    print('\n================== TRAINING ==================')
    model.train()  # set model to training mode

    # set up training metrics we want to track
    correct = 0
    train_num = args.batches * args.batch_size

    # timer
    time0 = time.time()

    for ix, (
            sparse, img,
            label) in enumerate(train_loader):  # iterate over training batches
        sparse, label = sparse.to(device), label.to(
            device)  # get data, send to gpu if needed
        optimizer.zero_grad(
        )  # clear parameter gradients from previous training update
        logits = model(sparse)  # forward pass
        loss = F.cross_entropy(logits, label)  # calculate network loss
        loss.backward()  # backward pass
        optimizer.step(
        )  # take an optimization step to update model's parameters

        pred = logits.max(1, keepdim=True)[1]  # get the index of the max logit
        correct += pred.eq(
            label.view_as(pred)).sum().item()  # add to running total of hits

        if ix % args.log_interval == 0:  # maybe log current metrics to terminal
            print('Train: [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t\
                Accuracy: {:.2f}%\tTime: {:0f} min, {:.2f} s'.format(
                (ix + 1) * len(sparse), train_num,
                100. * ix / len(train_loader), loss.item(),
                100. * correct / ((ix + 1) * len(sparse)),
                (time.time() - time0) // 60, (time.time() - time0) % 60))

    print(
        'Train Accuracy: {}/{} ({:.2f}%)\tTrain Time: {:0f} minutes, {:2f} seconds\n'
        .format(correct, train_num, 100. * correct / train_num,
                (time.time() - time0) // 60, (time.time() - time0) % 60))

    if args.evaluate:
        print('\n================== VALIDATION ==================')
        model.eval()

        # set up validation metrics we want to track
        val_loss = 0.
        val_correct = 0
        val_num = args.eval_batch_size * args.val_batches

        # disable autograd here (replaces volatile flag from v0.3.1 and earlier)
        with torch.no_grad():
            for sparse, img, label in val_loader:
                sparse, label = sparse.to(device), label.to(device)
                logits = model(sparse)

                val_loss += F.cross_entropy(logits, label,
                                            size_average=False).item()

                pred = logits.max(1, keepdim=True)[1]
                val_correct += pred.eq(label.view_as(pred)).sum().item()

        # update current evaluation metrics
        val_loss /= val_num
        val_acc = 100. * val_correct / val_num
        print(
            '\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'
            .format(val_loss, val_correct, val_num, val_acc))

        is_best = val_acc > best_val_acc
        if is_best:
            best_val_acc = val_acc
            best_val_loss = val_loss  # note this is val_loss of best model w.r.t. accuracy,
            # not the best val_loss throughout training

        # create checkpoint dictionary and save it;
        # if is_best, copy the file over to the file containing best model for this run
        state = {
            'state_dict': model.state_dict(),
            'optimizer_state': optimizer.state_dict(),
            'val_loss': val_loss,
            'val_acc': val_acc,
        }
        save_checkpoint(state, is_best, checkpoint_file)

    print('\n================== TESTING ==================')
    check = torch.load(checkpoint_file + '-best.pth.tar')
    model.load_state_dict(check['state_dict'])
    model.eval()

    test_loss = 0.
    test_correct = 0
    test_num = args.eval_batch_size * args.test_batches

    # disable autograd here (replaces volatile flag from v0.3.1 and earlier)
    with torch.no_grad():
        for sparse, img, label in test_loader:
            sparse, label = sparse.to(device), label.to(device)
            logits = model(sparse)
            test_loss += F.cross_entropy(logits, label,
                                         size_average=False).item()
            pred = logits.max(
                1, keepdim=True)[1]  # get the index of the max logit
            test_correct += pred.eq(label.view_as(pred)).sum().item()

    test_loss /= test_num
    print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
        test_loss, test_correct, test_num, 100. * test_correct / test_num))

    print('Final model stored at "{}".'.format(checkpoint_file +
                                               '-best.pth.tar'))
예제 #23
0
def create_tree(verbose=False):
    """
    Parameters
    ----------
    verbose: boolean
    Returns
    -------
    tree: DecisionTree
    """
    # Load the data.
    trips = load_data()  #loads data
    arrival_times_df = load_arrival_times(trips)

    # Assume nan means that the train is late.
    arrival_times_df.fillna(value=30, inplace=True)

    # Split the data into training and testing sets.
    training_dates = []
    tuning_dates = []
    testing_dates = []

    last_training_day = datetime.datetime.strptime('2016-04-30', '%Y-%m-%d')
    last_tuning_day = datetime.datetime.strptime('2017-04-30', '%Y-%m-%d')

    for datestr in arrival_times_df.columns:
        this_date = datetime.datetime.strptime(datestr, '%Y-%m-%d')
        if this_date <= last_training_day:
            training_dates.append(datestr)
        if this_date <= last_tuning_day:
            tuning_dates.append(datestr)
        else:
            testing_dates.append(datestr)

    training_df = arrival_times_df.loc[:, training_dates]
    tuning_df = arrival_times_df.loc[:, tuning_dates]
    testing_df = arrival_times_df.loc[:, testing_dates]

    training_features_df = create_features(list(training_df.columns))
    judge = Judge(training_df)

    # Tune our hyperparameter.
    # Iterate over values for n_min.
    best_tuning_score = 1e10
    best_n_min = 0
    best_tree = None
    for n_min in range(10, 100, 10):

        tree = DecisionTree(err_fn=judge.find_total_absolute_deviation,
                            n_min=n_min)
        tree.fit(training_features_df)
        training_score = evaluate(tree, training_df)
        tuning_score = evaluate(tree, tuning_df)

        if tuning_score < best_tuning_score:
            best_tuning_score = tuning_score
            best_n_min = n_min
            best_tree = tree

        if verbose:
            print('n_min', n_min)
            print('training', training_score)
            print('tuning', tuning_score)
            tree.render()

    testing_score = evaluate(best_tree, testing_df)

    if verbose:
        print('best_n_min', best_n_min)
        print('best_tuning', best_tuning_score)
        print('testing score', testing_score)

    return best_tree
예제 #24
0
        minutes = contest_cfg['duration'] / 60 % 60
        hours = contest_cfg['duration'] / 60 / 60 % 60

        logger.debug("Duration: %02d:%02d:%02d" % (hours, minutes, seconds))
        logger.debug("Problems: " + str(contest_cfg['prob_ids']))
        logger.debug("Penalty: %d points / wrong submission" %
                     contest_cfg['penalty'])

    problems = {
        prob_id: Problem(prob_id, options.contest_dir, logger)
        for prob_id in contest_cfg['prob_ids']
    }
    contest = Contest(options.delay, contest_cfg['duration'],
                      options.minutes_in, contest_cfg['prob_ids'],
                      contest_cfg['penalty'], logger)
    judge = Judge(contest, problems, options.contest_dir, options.num_judges,
                  logger)

    application = web.Application(
        [
            (r'/', IndexHandler),
            (r'/index.html', IndexHandler),
            (r'/auth/login', AuthLoginHandler),
            (r'/auth/logout', AuthLogoutHandler),
            (r'/api/v1/admin/(.*)', AdminHandler),
            (r'/api/v1/log/(.*)', LogHandler),
            (r'/api/v1/metadata', MetadataHandler),
            (r'/api/v1/updates', UpdatesHandler),
            (r'/api/v1/submit/(.*)/solution', SubmitSolutionHandler),
            (r'/api/v1/submit/(.*)/clarification', SubmitClarificationHandler),
            (r'/api/v1/errors/(.*)', ErrorFileHandler),
        ],
    #由于cv2.imwrite不支持保存图片到中文路径,用以下方法代替cv2.imwrite
    cv2.imencode('.jpg', general_crop)[1].tofile(general_img)

    #返回零件号的精确截图
    accurate_crop = extract_part(general_crop)
    name_img = os.path.join(os.path.dirname(__file__), "accurate_crop",
                            'part' + str(index) + '.jpg')

    #保存零件号截图
    #cv2.imwrite(name_img,accurate_crop)
    #由于cv2.imwrite不支持保存图片到中文路径,用以下方法代替cv2.imwrite
    cv2.imencode('.jpg', accurate_crop)[1].tofile(name_img)

    #识别图片零件号
    part_num = tran_text(name_img)
    models = Judge(part_num)
    part_nums.append([part_num, models])

output = os.path.join(path, "output")
#清空所有文件夹及其内部的文件
shutil.rmtree(os.path.join(output, "2GW"))
os.mkdir(os.path.join(output, "2GW"))
shutil.rmtree(os.path.join(output, "2HX"))
os.mkdir(os.path.join(output, "2HX"))
shutil.rmtree(os.path.join(output, "2LD"))
os.mkdir(os.path.join(output, "2LD"))
shutil.rmtree(os.path.join(output, "2QJ"))
os.mkdir(os.path.join(output, "2QJ"))
shutil.rmtree(os.path.join(output, "2SV"))
os.mkdir(os.path.join(output, "2SV"))
shutil.rmtree(os.path.join(output, "2YS"))
예제 #26
0
파일: test.py 프로젝트: YLAsce/oj
#!/usr/bin/env python
import time
from compile import Compile
from sub import Sub
from judge import Judge
c = Compile()
s = Sub()
s.sid = 1
s.lang = 'g++'
c.compile(s)
print 'ce:' + s.ce
print 'status:' + str(s.status)
s.pid = 15
s.case_cnt = 1
s.time_lim = 1000
s.mem_lim = 10240
s.case_lim = [
    {
        'time': 1000,
        'mem': 4000
    },
]
j = Judge()
j.judge(s)
print 'status:' + str(s.status)
print s.case_res