Ejemplo n.º 1
0
def parse_csv(csv_path: Path):
    """
    Parse csv file to return rows for storing
    """

    epiweek = int(csv_path.stem)
    sub = submission.Submission(csv=csv_path)

    n_regions = len(submission.MAP_REGION)

    index = {"epiweek": [epiweek] * n_regions, "region": []}

    matrices = {
        1: np.zeros((n_regions, 131)),
        2: np.zeros((n_regions, 131)),
        3: np.zeros((n_regions, 131)),
        4: np.zeros((n_regions, 131)),
        "onset_wk": np.zeros((n_regions, 34)),
        "peak_wk": np.zeros((n_regions, 33)),
        "peak": np.zeros((n_regions, 131))
    }

    for idx, item in enumerate(submission.MAP_REGION.items()):
        index["region"].append(item[0])
        for target in matrices:
            # NOTE: The data from ensemble loader uses log values instead of normal
            # Here we are using normal values directly
            try:
                (X, bin_starts) = sub.get_X(item[0], target)
                matrices[target][idx, :] = X
            except ValueError:
                print(
                    f"Shape error: {X.shape} {target} for region {item[0]} file name : {csv_path}"
                )
    return [pd.DataFrame(index), matrices]
Ejemplo n.º 2
0
def diagnose(code, error):

    sub = submission.Submission(lexer.begin_lex(code), error)

    for poss_error in possible_errors:
        detected = poss_error[0](sub)
        if detected:
            with open('errors/' + detected, "r") as f:
                data = f.read()

            if poss_error[1]:
                line = poss_error[1](sub)
                print "Can render"
                if line:
                    print "GOt line - " + line
                    data = data.replace("<LINE>", line)
                else:
                    print "no line"
                    data = data.replace(
                        """<div style = "margin: 20px;">
      <p style="font-family: courier; font-size: 120%; color: #000;">
            <span style = "background-color: #ffff99; padding: 10px; border: 1px solid #cc0;">
            <LINE>
            </span>
      </p>
</div>""", "")

            return data

    if not detected:
        return '<pre>' + error + '</pre>'
Ejemplo n.º 3
0
def loadSubmissionsFromJson(filename):
    file = open(filename, 'r')
    # Ugh...
    lines = file.readlines()
    text = u''.join(lines)
    # Fix the formatting so the json module understands it
    text = "[{}]".format(text[1:-3])

    dictSubmissions = json.loads(text)
    submissions = []
    for dictSubmission in dictSubmissions:
        submission = Submissions.Submission()
        submission.initFromDict(dictSubmission)
        submissions.append(submission)

    return submissions
Ejemplo n.º 4
0
def testDatabase():
    db = LikedSavedDatabase('test.db')
    testSubmission = Submissions.Submission()
    testSubmission.source = "source"
    testSubmission.title = "title"
    testSubmission.author = "author"
    testSubmission.subreddit = "subreddit"
    testSubmission.subredditTitle = "subredditTitle"
    testSubmission.body = "body"
    testSubmission.bodyUrl = "bodyUrl"
    testSubmission.postUrl = "postUrl"
    db.addSubmission(testSubmission)
    dbSubmission = db.getSubmissionsByTitle("title")
    dbCollection = db.createCollection("myCollection")
    print(dbSubmission[0])
    print(dbCollection)
    db.addSubmissionToCollection(dbSubmission[0], dbCollection[0])
    print(db.getAllSubmissionsInCollection(dbCollection[0]))
def saveRequestedUrls(pipeConnection, urls):
    if pipeConnection:
        logger.setPipe(pipeConnection)

    initialize()

    logger.log(
        'Attempting to save {} requested urls. This may take several minutes...'
        .format(len(urls)))

    submissions = []
    # Create Submission for each URL
    for url in urls:
        convertedSubmission = submission.Submission()
        convertedSubmission.source = "UserRequested"
        convertedSubmission.title = "UserRequested"
        convertedSubmission.author = "(Requested by user)"
        convertedSubmission.subreddit = "Requested_Downloads"
        convertedSubmission.subredditTitle = "Requested Downloads"
        convertedSubmission.body = "(Requested by user)"
        convertedSubmission.bodyUrl = url
        convertedSubmission.postUrl = url
        submissions.append(convertedSubmission)

    if len(submissions) != len(urls):
        logger.log(
            'Could not parse {} URLs!'.format(len(urls) - len(submissions)))

    unsupportedSubmissions = imageSaver.saveAllImages(
        settings.settings['Output_dir'],
        submissions,
        imgur_auth=imgurDownloader.getImgurAuth(),
        only_download_albums=settings.settings['Only_download_albums'],
        skip_n_percent_submissions=settings.
        settings['Skip_n_percent_submissions'],
        soft_retrieve_imgs=settings.settings['Should_soft_retrieve'],
        only_important_messages=settings.settings['Only_important_messages'])

    logger.log(
        'Download finished. Output to \'Requested Downloads\' directory')

    if pipeConnection:
        logger.log(scriptFinishedSentinel)
        pipeConnection.close()
Ejemplo n.º 6
0
def submissionsFromJsonFiles(jsonFilesToRead):
    submissions = []
    for filename in jsonFilesToRead:
        file = open(filename, 'r')
        # Ugh...
        lines = file.readlines()
        text = u''.join(lines)
        # Fix the formatting so the json module understands it
        text = "[{}]".format(text[1:-3])

        dictSubmissions = json.loads(text)
        for dictSubmission in dictSubmissions:
            submission = Submissions.Submission()
            submission.initFromDict(dictSubmission)
            submissions.append(submission)
        print("Read {} submissions from file {}".format(
            len(dictSubmissions), filename))

    totalSubmissions = len(submissions)
    return (submissions, totalSubmissions)
def saveRequestedSubmissions(pipeConnection, submissionIds):
    if pipeConnection:
        logger.setPipe(pipeConnection)

    initialize()

    logger.log(
        'Attempting to save {} requested submissions. This will take several minutes...'
        .format(len(submissionIds)))

    dbSubmissions = LikedSavedDatabase.db.getSubmissionsByIds(submissionIds)

    submissions = []
    # Convert from database submissions to Submission
    for dbSubmission in dbSubmissions:
        convertedSubmission = submission.Submission()
        convertedSubmission.initFromDict(dbSubmission)
        submissions.append(convertedSubmission)

    if len(submissions) != len(submissionIds):
        logger.log('Could not find {} submissions in database!'.format(
            len(submissionIds) - len(submissions)))

    unsupportedSubmissions = imageSaver.saveAllImages(
        settings.settings['Output_dir'],
        submissions,
        imgur_auth=imgurDownloader.getImgurAuth(),
        only_download_albums=settings.settings['Only_download_albums'],
        skip_n_percent_submissions=settings.
        settings['Skip_n_percent_submissions'],
        soft_retrieve_imgs=settings.settings['Should_soft_retrieve'],
        only_important_messages=settings.settings['Only_important_messages'])

    logger.log(
        'Download finished. Please refresh the page to see updated entries')

    if pipeConnection:
        logger.log(scriptFinishedSentinel)
        pipeConnection.close()
Ejemplo n.º 8
0
def main():
    # read arguments
    input_dir = os.path.abspath(sys.argv[1])
    output_dir = os.path.abspath(sys.argv[2])
    program_dir = os.path.abspath(sys.argv[3])
    submission_dir = os.path.abspath(sys.argv[4])
    log_files = list()
    # create output dir if not existing
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if VERBOSE:
        print("input dir: {}".format(input_dir))
        print("output dir: {}".format(output_dir))
        print("program dir: {}".format(program_dir))
        print("submission dir: {}".format(submission_dir))

        print("input content", os.listdir(input_dir))
        print("output content", os.listdir(output_dir))
        print("program content", os.listdir(program_dir))
        print("submission content", os.listdir(submission_dir))

    # add proper directories to path
    sys.path.append(program_dir)
    sys.path.append(submission_dir)

    try:
        import submission
    except ImportError:
        raise ImportError(
            'The submission folder should contain a file submission.py containing your controler named '
            'as the class Submission.')

    # Instantiate and run the agents on both validation and test sets (simultaneously)
    with open(os.path.abspath(os.path.join(input_dir, "chronic_names.json")),
              'r') as f:
        chronics = json.load(f)

        for i, chronic in enumerate(chronics):

            environment = pypownet.environment.RunEnv(
                parameters_folder=input_dir,
                game_level=chronic["GAME_LEVEL"],
                chronic_looping_mode='natural',
                start_id=int(chronic["START_ID"]),
                game_over_mode=GAME_OVER_MODE)
            try:
                submitted_controler = submission.Submission(environment)
            except:
                raise Exception(
                    'Did not find a class named Submission within submission.py; your submission controler should'
                    ' be a class named Submission in submission.py file directly within the ZIP submission file.'
                )
            log_path = os.path.abspath(os.path.join(output_dir, 'runner.log'))
            print('log file path', log_path)

            open(log_path, 'w').close()  #what the hek

            # Instanciate a runner, that will save the run statistics within the log_path file, to be parsed and processed
            # by the scoring program

            current_machine_log = "machine_log_" + str(i) + ".json"
            log_files.append(current_machine_log)

            from runner import Runner
            pypownet.runner.Runner = Runner  #delete after
            phase_runner = pypownet.runner.Runner(
                environment,
                submitted_controler,
                verbose=VERBOSE,
                vverbose=VVERBOSE,
                level=chronic["GAME_LEVEL"],
                log_filepath=log_path,
                machinelog_filepath=os.path.abspath(
                    os.path.join(output_dir, current_machine_log)
                ))  # vverbose should be False otherwise any one
            # can see the testing phase injections and co
            phase_runner.ch.setLevel(logging.ERROR)
            # Run the planned experiment of this phase with the submitted model
            phase_runner.loop(iterations=int(chronic["NUMBER_ITERATIONS"]) - 1)

        overall_time_spent = time.time() - overall_start

        with open(os.path.abspath(os.path.join(output_dir, 'log_files.json')),
                  'w') as files:
            json.dump(log_files, files)

        if VERBOSE:
            print("Overall time spent %5.2f sec " % overall_time_spent)
Ejemplo n.º 9
0
        print(word)

    pattern_b = map(to_bool, pattern)

    hor(pattern_b[0])
    vert(pattern_b[1], pattern_b[2], pattern_b[3])
    vert(pattern_b[4], pattern_b[5], pattern_b[6])

    number = 0
    for i in range(0, 4):
        if pattern_b[7 + i]:
            number += pow(2, i)
    print(int(number))


submission = submission.Submission("michael_rollins")
submission.header("Michael Rollins")

six = [1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1]
three = [1, -1, 1, 1, -1, 1, 1, 1, 1, -1, -1]
one = [-1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1]

seven_segment(three)
seven_segment(six)
seven_segment(one)

##this assumes you have called your weight matrix "weight_matrix"
submission.section("Weight matrix")
submission.matrix_print("W", weight_matrix)

print("test1")