コード例 #1
0
def main(args):
    files = helpers.parse_flags(args)
    analyzer = Analyzer(helpers.read(files["input"]))

    if analyzer.try_analyze():
        for warning in analyzer.warnings:
            print(warning)

        print("The file is fine, no error was found")
        print("\nCheck the", files["output"], "file for more information.")
        helpers.write(files["output"], analyzer.symbols)
        sys.exit(0)

    else:
        for warning in analyzer.warnings:
            print(warning)

        for error in analyzer.errors:
            print(error)

        if analyzer.failedAt == "Semantic":
            print("\nCheck the", files["output"], "file for more information.")
            helpers.write(files["output"], analyzer.symbols)

        sys.exit(1)
コード例 #2
0
ファイル: dejavu3.py プロジェクト: vtsokorov/dejavu3
    def search_record_by_file(self, filename):
        frames, fs, file_hash = read(filename, self.limit)

        matches = []
        for d in frames:
            matches.extend(self.find_matches(d, fs=fs))

        match = self.align_matches(matches)

        return match
コード例 #3
0
ファイル: configFiles.py プロジェクト: elhamAm/GUI
def getConfigFile(fileName):

    session['fileName'] = fileName
    res = h.read(fileName)
    session["selectedFile"] = fileName
    if (res == "NOTJSON" or res == "NOTSCHEMA" or res == "NOSCHEMA"):
        session["data"] = {}
    else:
        session["data"] = res

    return jsonify(res)
コード例 #4
0
def main():
    B, L, D, scores, libraries = helpers.read('./data/d_tough_choices.txt')

    ansLibs = []
    #libraries = [[id, [book1,book2,...]] , [id, [book1,book2,...]]]

    for i in range(0, (L - 1) / 2):

        lib = libraries[2 * i]
        ansLibs.append([lib[0], lib[4]])

    helpers.output(ansLibs, "outputs/D_v1.txt")
コード例 #5
0
def shutDownRunningFile():

    d = session.get("data")
    dc = h.createDaqInstance(d)
    runningFile = h.read(r1.hgetall("runningFile")["fileName"])
    allDOWN = True
    for p in runningFile['components']:
        rawStatus, timeout = dc.getStatus(p)
        status = h.translateStatus(rawStatus, timeout)
        if (not status == "DOWN"):
            allDOWN = False
    if (allDOWN):
        r1.hset("runningFile", "isRunning", 0)
        return jsonify("true")
    else:
        return jsonify("false")
コード例 #6
0
def main(args):
    files = helpers.parse_flags(args)
    lines = helpers.read(files["input"])
    lexer = Lexer()

    # No errors on file
    if lexer.try_tokenize(lines):
        print("The file is fine, no error was found")

    # Print errors on screen
    else:
        for err in lexer.get_errors():
            print("*** ERROR on line", err.line, "***", err.reason, err.word)

    # Write to the file
    helpers.write(files["output"], lexer.get_all())
    print("\nCheck the file", files["output"], "for more information")

    sys.exit(0)
コード例 #7
0
def main():
    B, L, D, scores, libraries = h.read(
        "../data/e_so_many_books.txt"
    )  # libraries is [id,NBooks,TDays,MShipsperday,[books]]
    # TODO Call get_points
    book_scores = get_book_point_lib(libraries, scores)

    #list.sort(libraries, key=lambda library:get_points(library,book_scores), reverse=True)
    tot_points = 0
    # sort books by value and at total points to calculate average
    for lib in libraries:
        list.sort(lib[4], key=lambda book: book_scores[book], reverse=True)
        tot_points += get_points(lib, book_scores)
    average_points = tot_points / L
    list.sort(
        libraries,
        key=lambda library: get_points2(library, book_scores, average_points),
        reverse=True)
    ansLibs = []

    day = 0
    new_libraries = []
    for lib in libraries:
        day_local = day + lib[2]  # Add time to set up
        books_to_scan = []
        while day_local < D:
            list.sort(lib[4], key=lambda book: book_scores[book],
                      reverse=True)  #sort
            books_to_scan.append(lib[4][0:lib[2]])
            for i in range(lib[2]):
                if i < len(lib[4]):
                    books_to_scan.append(lib[4][i])
                    book_scores[lib[4][i]] = 0
            day_local += lib[2]  #iterate over days
        new_libraries.append([lib[0], books_to_scan])

    #print("Days total are: " + str(D))
    for i in range(int((L - 1) / 2)):
        lib = new_libraries[2 * i]
        ansLibs.append([lib[0], lib[4]])
    h.output(ansLibs, "../outputs/E_v1.txt")
コード例 #8
0
                        in one forward/backward pass',
                        required=True,
                        type=int)

    args = parser.parse_args()
    dataset = args.dataset
    epochs = args.epochs
    batch_size = args.batch_size

    is_valid_dataset(dataset)

    logger.info("Reading in preprocessed training {0} dataset".format(dataset))
    file_path = "{dir}{dataset}{suffix}".format(dir=DATADIR,
                                                dataset=dataset,
                                                suffix=TRAIN_FILE_SUFFIX)
    train_df = read(file_path)
    logger.info("Finished reading in preprocessed {0} dataset".format(dataset))

    logger.info("preparing training data")
    split = descriptor_activation_split(train_df)

    logger.info("Generating model")
    model = generate_model(split.shape)

    logger.info("fitting model")
    model.fit(split.descriptors,
              split.act,
              epochs=epochs,
              batch_size=batch_size)

    logger.info("saving model")
コード例 #9
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Evaluate QSAR data')
    parser.add_argument('--dataset',
                        help='Enter one of available datasets: {}'.format(
                            ", ".join(DATASETS)),
                        required=True)

    args = parser.parse_args()
    dataset = args.dataset

    is_valid_dataset(dataset)

    file_path = "{dir}{dataset}{suffix}".format(dir=DATADIR,
                                                dataset=dataset,
                                                suffix=TEST_FILE_SUFFIX)
    logger.info("Reading in preprocessed testing {0} dataset".format(dataset))
    test_df = read(file_path)
    logger.info("Finished reading in preprocessed {0} dataset".format(dataset))

    logger.info("Preparing testing data")
    split = descriptor_activation_split(test_df)

    logger.info("Loading {0} model".format(dataset))
    model = load_model('{0}{1}.h5'.format('/data/', dataset),
                       custom_objects={'r2': r2})

    logger.info("Evaluating {0} model".format(dataset))
    score = model.evaluate(split.descriptors, split.act)

    logger.info("R2 score {0}".format(score[1]))
コード例 #10
0
    parser = argparse.ArgumentParser(description='Preprocess QSAR data')
    parser.add_argument('--dataset', help='Enter one of available datasets: {}'
                        .format(", ".join(DATASETS)), required=True)

    args = parser.parse_args()
    dataset = args.dataset

    is_valid_dataset(dataset)

    logger.info("Reading in {0} dataset".format(dataset))
    train_file_path = "{dir}{dataset}{suffix}".format(dir=DATADIR,
                                                      dataset=dataset,
                                                      suffix=TRAIN_FILE_SUFFIX)
    test_file_path = "{dir}{dataset}{suffix}".format(dir=DATADIR,
                                                     dataset=dataset,
                                                     suffix=TEST_FILE_SUFFIX)
    train_df = read(train_file_path)
    test_df = read(test_file_path)
    logger.info("Finished reading in {0} dataset".format(dataset))

    logger.info("Transforming {0} dataset".format(dataset))
    start = time.time()
    train_df, test_df = Preprocessor(train_df, test_df).transform()
    logger.info("Transformation took {0} seconds".format(
        time.time() - start))

    logger.info("Writing preprocessed {0} dataset to disk".format(dataset))
    write(DATADIR, dataset, train_df)
    write(DATADIR, dataset, test_df)
    logger.info("Finished writing preprocessed {0} dataset ".format(dataset))
コード例 #11
0
ファイル: geoparse.py プロジェクト: twkillian/am205-project
# Parses geographic population shapefiles into a list using pyshp
# pyshp can be found at https://github.com/GeospatialPython/pyshp
from helpers import dist, center, read

if __name__ == "__main__":
	geogen = read()
	with open('popgridtest.dat', 'w') as f:
		for i in geogen:
			f.write('{}, {},{}\n'.format(i[0][0], i[0][1], i[1]))