Ejemplo n.º 1
0
def test_add_repository():
    # create new hashdb
    H.make_hashdb("temp_1.hdb", json_out1)
    H.rm_tempdir("temp_2.hdb")

    # add to new temp_2.hdb
    H.hashdb(["add_repository", "temp_1.hdb", "temp_2.hdb", "repository1"])

    # temp_2.hdb should only have hashes and sources with repository1
    H.hashdb(["export", "temp_2.hdb", "temp_2.json"])
    json2 = H.read_file("temp_2.json")
    H.lines_equals(json2, [
        '# command: ', '# hashdb-Version: ',
        '{"block_hash":"2222222222222222","k_entropy":0,"block_label":"","source_sub_counts":["1111111111111111",1]}',
        '{"block_hash":"8899aabbccddeeff","k_entropy":0,"block_label":"","source_sub_counts":["0000000000000000",1,"0011223344556677",2]}',
        '{"block_hash":"ffffffffffffffff","k_entropy":0,"block_label":"","source_sub_counts":["0011223344556677",1]}',
        '{"file_hash":"0000000000000000","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["repository1","temp_1.tab"]}',
        '{"file_hash":"0011223344556677","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["repository1","temp_1.tab"]}',
        '{"file_hash":"1111111111111111","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["repository1","temp_1.tab"]}'
    ])

    # add to new temp_2.hdb
    H.rm_tempdir("temp_2.hdb")
    H.hashdb(["add_repository", "temp_1.hdb", "temp_2.hdb", "repository2"])

    # temp_2.hdb should only have hashes and sources with repository2
    H.hashdb(["export", "temp_2.hdb", "temp_2.json"])
    json2 = H.read_file("temp_2.json")
    H.lines_equals(json2, [
        '# command: ', '# hashdb-Version: ',
        '{"block_hash":"2222222222222222","k_entropy":0,"block_label":"","source_sub_counts":["1111111111111111",1]}',
        '{"file_hash":"1111111111111111","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["repository2","second_temp_1.tab"]}'
    ])
Ejemplo n.º 2
0
def test_add_repository():
    # create new hashdb
    H.make_hashdb("temp_1.hdb", json_out1)
    H.rm_tempdir("temp_2.hdb")

    # add to new temp_2.hdb
    H.hashdb(["add_repository", "temp_1.hdb", "temp_2.hdb", "repository1"])

    # temp_2.hdb should only have hashes and sources with repository1
    H.hashdb(["export", "temp_2.hdb", "temp_2.json"])
    json2 = H.read_file("temp_2.json")
    H.lines_equals(json2, [
'# command: ',
'# hashdb-Version: ',
'{"block_hash":"2222222222222222","k_entropy":0,"block_label":"","source_offsets":["1111111111111111",1,[4096]]}',
'{"block_hash":"8899aabbccddeeff","k_entropy":0,"block_label":"","source_offsets":["0000000000000000",1,[0],"0011223344556677",2,[0,512]]}',
'{"block_hash":"ffffffffffffffff","k_entropy":0,"block_label":"","source_offsets":["0011223344556677",1,[1024]]}',
'{"file_hash":"0000000000000000","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["repository1","temp_1.tab"]}',
'{"file_hash":"0011223344556677","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["repository1","temp_1.tab"]}',
'{"file_hash":"1111111111111111","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["repository1","temp_1.tab"]}'
])

    # add to new temp_2.hdb
    H.rm_tempdir("temp_2.hdb")
    H.hashdb(["add_repository", "temp_1.hdb", "temp_2.hdb", "repository2"])

    # temp_2.hdb should only have hashes and sources with repository2
    H.hashdb(["export", "temp_2.hdb", "temp_2.json"])
    json2 = H.read_file("temp_2.json")
    H.lines_equals(json2, [
'# command: ',
'# hashdb-Version: ',
'{"block_hash":"2222222222222222","k_entropy":0,"block_label":"","source_offsets":["1111111111111111",1,[4096]]}',
'{"file_hash":"1111111111111111","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["repository2","second_temp_1.tab"]}'
])
Ejemplo n.º 3
0
def main():
    base2014 = helpers.read_file(COTHIST2014_FILENAME)
    base2015 = helpers.read_file(COTHIST2015_FILENAME)
    base2016 = helpers.read_file(COTHIST2016_FILENAME)

    print("""
Simulador de Investimentos

1) Guilherme
2) Leonardo Puglia
3) Leonardo Vendrame
4) Oráculo

0) Sair
    """)

    sys.stdout.write("=> ")
    sys.stdout.flush()
    option = int(raw_input())

    if option == 0:
        print("Saindo.")
    elif option == 1:
        print_result(guilherme.simulate(base2014, base2015, base2016))
    elif option == 2:
        print_result(puglia.simulate(base2014, base2015, base2016))
    elif option == 3:
        vendrame.simulate(base2014, base2015, base2016)
    elif option == 4:
        print_result(oracle.futute(base2016, STOCKS))
    else:
        print("Opção inválida.")
Ejemplo n.º 4
0
def test_subtract_hash():
    # create new hashdb
    H.make_hashdb("temp_1.hdb", json_set_db1)
    H.make_hashdb("temp_2.hdb", json_set_db2)
    H.rm_tempdir("temp_3.hdb")

    # db1 - db2 hash
    H.hashdb(["subtract_hash", "temp_1.hdb", "temp_2.hdb", "temp_3.hdb"])
    H.hashdb(["export", "temp_3.hdb", "temp_3.json"])
    json3 = H.read_file("temp_3.json")
    H.lines_equals(json3, [
        '# command: ', '# hashdb-Version: ',
        '{"block_hash":"1111111111111111","k_entropy":1,"block_label":"bl1","source_sub_counts":["11",1]}',
        '{"file_hash":"11","filesize":1,"file_type":"A","zero_count":11,"nonprobative_count":1,"name_pairs":["r1","f1"]}'
    ])

    # db2 - db1 hash
    H.rm_tempdir("temp_3.hdb")
    H.hashdb(["subtract_hash", "temp_2.hdb", "temp_1.hdb", "temp_3.hdb"])
    H.hashdb(["export", "temp_3.hdb", "temp_3.json"])
    json3 = H.read_file("temp_3.json")
    H.lines_equals(json3, [
        '# command: ', '# hashdb-Version: ',
        '{"block_hash":"3333333333333333","k_entropy":3,"block_label":"bl3","source_sub_counts":["33",1]}',
        '{"file_hash":"33","filesize":3,"file_type":"C","zero_count":13,"nonprobative_count":3,"name_pairs":["r2","f2"]}'
    ])
Ejemplo n.º 5
0
def test_subtract_hash():
    # create new hashdb
    H.make_hashdb("temp_1.hdb", json_set_db1)
    H.make_hashdb("temp_2.hdb", json_set_db2)
    H.rm_tempdir("temp_3.hdb")

    # db1 - db2 hash
    H.hashdb(["subtract_hash", "temp_1.hdb", "temp_2.hdb", "temp_3.hdb"])
    H.hashdb(["export", "temp_3.hdb", "temp_3.json"])
    json3 = H.read_file("temp_3.json")
    H.lines_equals(json3, [
'# command: ',
'# hashdb-Version: ',
'{"block_hash":"1111111111111111","k_entropy":1,"block_label":"bl1","source_offsets":["11",1,[4096]]}',
'{"file_hash":"11","filesize":1,"file_type":"A","zero_count":11,"nonprobative_count":1,"name_pairs":["r1","f1"]}'
])

    # db2 - db1 hash
    H.rm_tempdir("temp_3.hdb")
    H.hashdb(["subtract_hash", "temp_2.hdb", "temp_1.hdb", "temp_3.hdb"])
    H.hashdb(["export", "temp_3.hdb", "temp_3.json"])
    json3 = H.read_file("temp_3.json")
    H.lines_equals(json3, [
'# command: ',
'# hashdb-Version: ',
'{"block_hash":"3333333333333333","k_entropy":3,"block_label":"bl3","source_offsets":["33",1,[4096]]}',
'{"file_hash":"33","filesize":3,"file_type":"C","zero_count":13,"nonprobative_count":3,"name_pairs":["r2","f2"]}'
])
Ejemplo n.º 6
0
def get_plain_ruby_problem():
    data = {"test_type": "unittest",
            "language": "ruby",
            "file_type": 'plain',
            "code": read_file('fixtures/plain/solution.rb'),
            "test": read_file('fixtures/plain/tests.rb'),
            }

    return data
Ejemplo n.º 7
0
    def compute_initial_figure(self):
        self.curgen = CURRENT_GENERATION
        self.last_open_up_individuals = time.ctime(os.path.getmtime(self.cfg_data_individuals))
        self.last_open_individuals = self.last_open_up_individuals
        self.individuals_data = helpers.read_file(self.cfg_data_individuals)
        self.produce_scatter()

        # Create fitness surface
        self.fitscape = helpers.read_file(self.fitscape_data)
        self.produce_landscape()
Ejemplo n.º 8
0
def get_plain_python_problem():
    data = {"test_type": "unittest",
            "language": "python",
            "file_type": 'plain',
            "code": read_file('fixtures/plain/solution.py'),
            "test": read_file('fixtures/plain/tests.py'),
            "extra_options": {
                "foo": "bar"    # wtf? lol
            }}

    return data
    def get_doxy_file(self):
        if self.V > 0:
            print "> Checking doxy file"

        if self.conf.doxy_file:
            if self.V > 0:
                doxy_path = self.conf.ROOT + self.conf.doxy_file
                print "  > using %s project doxy file: %s" % (self.conf.proj, doxy_path )
            dox_contents = h.read_file(doxy_path)
            
        else:
            dox_contents = h.read_file(self.conf.ETC + self.conf.DEFAULT_DOXY)
            if self.V > 0:
                print "  > using default fg-docs file: etc/%s"  % self.conf.DEFAULT_DOXY
        return dox_contents
Ejemplo n.º 10
0
def test_add_multiple():
    json_db1 = [
        '{"file_hash":"11","filesize":1,"file_type":"ft1","zero_count":15,"nonprobative_count":111,"name_pairs":["rn1","fn1"]}',
        '{"block_hash":"11111111","k_entropy":101,"block_label":"bl1","source_sub_counts":["11",1]}'
    ]
    json_db2 = [
        '{"file_hash":"22","filesize":2,"file_type":"ft2","zero_count":16,"nonprobative_count":222,"name_pairs":["rn2","fn2"]}',
        '{"block_hash":"22222222","k_entropy":202,"block_label":"bl2","source_sub_counts":["22",1]}'
    ]
    json3_db3 = [
        '# command: ', '# hashdb-Version: ',
        '{"block_hash":"11111111","k_entropy":101,"block_label":"bl1","source_sub_counts":["11",1]}',
        '{"block_hash":"22222222","k_entropy":202,"block_label":"bl2","source_sub_counts":["22",1]}',
        '{"file_hash":"11","filesize":1,"file_type":"ft1","zero_count":15,"nonprobative_count":111,"name_pairs":["rn1","fn1"]}',
        '{"file_hash":"22","filesize":2,"file_type":"ft2","zero_count":16,"nonprobative_count":222,"name_pairs":["rn2","fn2"]}'
    ]

    # create DBs
    H.make_hashdb("temp_1.hdb", json_db1)
    H.make_hashdb("temp_2.hdb", json_db2)
    H.rm_tempdir("temp_3.hdb")

    # add 1 and 2 into 3
    H.hashdb(["add_multiple", "temp_1.hdb", "temp_2.hdb", "temp_3.hdb"])

    # check temp_3.hdb
    H.hashdb(["export", "temp_3.hdb", "temp_3.json"])
    json_in3 = H.read_file("temp_3.json")
    H.lines_equals(json_in3, json3_db3)
Ejemplo n.º 11
0
def test_import_tab1():
    H.rm_tempdir("temp_1.hdb")
    H.rm_tempfile("temp_1.json")
    H.hashdb(["create", "temp_1.hdb"])
    H.make_tempfile("temp_1.tab", [
          "# <file hash> <tab> <block hash> <tab> <index>",
          "0011223344556677	8899aabbccddeeff	1",
          "0000000000000000	8899aabbccddeeff	1",
          "0011223344556677	8899aabbccddeeff	2",
          "0011223344556677	ffffffffffffffff	3",
          "1111111111111111	2222222222222222	9",
          "1111111111111111	2222222222222222	9"])
    H.hashdb(["import_tab", "temp_1.hdb", "temp_1.tab"])
    H.hashdb(["export", "temp_1.hdb", "temp_1.json"])

    returned_answer = H.read_file("temp_1.json")
    expected_answer = ["# command: ","# hashdb-Version: ",
'{"block_hash":"2222222222222222","k_entropy":0,"block_label":"","source_offsets":["1111111111111111",2,[4096]]}',
'{"block_hash":"8899aabbccddeeff","k_entropy":0,"block_label":"","source_offsets":["0000000000000000",1,[0],"0011223344556677",2,[0,512]]}',
'{"block_hash":"ffffffffffffffff","k_entropy":0,"block_label":"","source_offsets":["0011223344556677",1,[1024]]}',
'{"file_hash":"0000000000000000","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["temp_1.tab","temp_1.tab"]}',
'{"file_hash":"0011223344556677","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["temp_1.tab","temp_1.tab"]}',
'{"file_hash":"1111111111111111","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["temp_1.tab","temp_1.tab"]}'
]

    H.lines_equals(returned_answer, expected_answer)
Ejemplo n.º 12
0
def test_export_json_hash_partition_range():
    H.rm_tempdir("temp_1.hdb")
    H.rm_tempfile("temp_1.json")
    H.rm_tempfile("temp_2.json")

    temp1_input = [
'{"block_hash":"2222222222222222","k_entropy":1,"block_label":"bl1","source_offsets":["1111111111111111",2,[4096]]}',
'{"block_hash":"8899aabbccddeeff","k_entropy":2,"block_label":"bl2","source_offsets":["0000000000000000",1,[0],"0011223344556677",2,[0,512]]}',
'{"block_hash":"ffffffffffffffff","k_entropy":3,"block_label":"bl3","source_offsets":["0011223344556677",1,[1024]]}',
'{"file_hash":"0000000000000000","filesize":3,"file_type":"ftb","zero_count":4,"nonprobative_count":5,"name_pairs":["r2","f2"]}',
'{"file_hash":"0011223344556677","filesize":6,"file_type":"fta","zero_count":7,"nonprobative_count":8,"name_pairs":["r1","f1"]}',
'{"file_hash":"1111111111111111","filesize":9,"file_type":"ftc","zero_count":10,"nonprobative_count":11,"name_pairs":["r3","f3"]}'
]

    expected_answer = [
"# command: ","# hashdb-Version: ",
'{"block_hash":"2222222222222222","k_entropy":1,"block_label":"bl1","source_offsets":["1111111111111111",2,[4096]]}',
'{"file_hash":"1111111111111111","filesize":9,"file_type":"ftc","zero_count":10,"nonprobative_count":11,"name_pairs":["r3","f3"]}'
]

    H.make_tempfile("temp_1.json", temp1_input)
    H.hashdb(["create", "temp_1.hdb"])
    H.hashdb(["import", "temp_1.hdb", "temp_1.json"])
    H.hashdb(["export", "-p", "00:80", "temp_1.hdb", "temp_2.json"])

    returned_answer = H.read_file("temp_2.json")
    H.lines_equals(returned_answer, expected_answer)
Ejemplo n.º 13
0
def get_example_code():
    filename = "examples/" + request.args.get('filename', 0, type=str)
    if (os.path.exists(filename)):
        code = helpers.read_file(filename)
        return jsonify(code=code)
    else:
        return jsonify(code="Error: example not found")
Ejemplo n.º 14
0
def test_import_tab4():
    H.rm_tempdir("temp_1.hdb")
    H.hashdb(["create", "temp_1.hdb"])
    H.make_tempfile("temp_1.tab", [
        "# <file hash> <tab> <block hash> <tab> <index>",
        "0000000000000000	8888888888888888	1",
        "0000000000000000	8888888888888888	2"
    ])
    H.hashdb(["import_tab", "-rr", "temp_1.hdb", "temp_1.tab"])
    H.make_tempfile("temp_2.tab", [
        "# <file hash> <tab> <block hash> <tab> <index>",
        "0000000000000000	8888888888888888	1",
        "0000000000000000	8888888888888888	2",
        "0000000000000000	8888888888888888	3",
        "1111111111111111	8888888888888888	1",
        "1111111111111111	8888888888888888	2"
    ])
    H.hashdb(["import_tab", "-rr", "temp_1.hdb", "temp_2.tab"])

    H.hashdb(["export", "temp_1.hdb", "temp_1.json"])

    returned_answer = H.read_file("temp_1.json")
    expected_answer = [
        "# command: ", "# hashdb-Version: ",
        '{"block_hash":"8888888888888888","k_entropy":0,"block_label":"","source_sub_counts":["0000000000000000",2,"1111111111111111",2]}',
        '{"file_hash":"0000000000000000","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["r","temp_1.tab"]}',
        '{"file_hash":"1111111111111111","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["r","temp_2.tab"]}'
    ]
    H.lines_equals(returned_answer, expected_answer)
Ejemplo n.º 15
0
def alias():
    import json
    import helpers
    actionList = json.loads(
        helpers.read_file('{}/{}'.format(helpers.path('util'),
                                         'action-list.json')))
    bashrcFilepath = helpers.run_command_output('cd {} && cd ../'.format(
        helpers.path('util'))) + '.baconrc'
    contents = helpers.read_file(bashrcFilepath)
    pat = re.compile('alias {}='.format())
    match = re.search(pat, contents)
    formattedAlias = '\nalias {}="python {}/actions.py"'.format(
        actionList['alias'], helpers.path('util'))
    if not match:
        contents += formattedAlias
    helpers.write_file(bashrcFilepath, contents)
Ejemplo n.º 16
0
    def do_GET(self):
        try:
            if self.path.endswith("/restaurants"):
                self._send_response_and_header(200, self.header)
                output = read_file(Paths.TEMPLATE_RESTAURANT) % "tesla"
                self.wfile.write(output)
                return

            if self.path.endswith("/hello"):
                self._send_response_and_header(200, self.header)

                output = "<html><body>Hello!"
                output += "<form method='POST' enctype='multipart/form-data' action='/hello'>"
                output += "<h2>What would you like me to say?</h2>"
                output += "<input name='message' type='text'>"
                output += "<input type='submit' value='Submit'> </form>"
                output += "</body></html>"
                self.wfile.write(output)
                return

            if self.path.endswith("/hola"):
                self._send_response_and_header(200, self.header)

                output = "<html><body>&#161Hola! <a href='/hello'>Back to Hello</a>"
                output += "<form method='POST' enctype='multipart/form-data' action='/hello'>"
                output += "<h2>What would you like me to say?</h2>"
                output += "<input name='message' type='text'>"
                output += "<input type='submit' value='Submit'> </form>"
                output += "</body></html>"
                self.wfile.write(output)
                return
        except IOError:
            self.send_error(404, "File Not Found %s" % self.path)
Ejemplo n.º 17
0
    def initialize_vars(self, fname):
        content_array = read_file(fname)
        init = 0
        goal = 0

        for line in range(len(content_array)):
            array = content_array[line].split("-")

            if array[0] == 'w':
                self.world = [int(a) for a in array[1:]]

            if array[0] == 'i':
                start_node_number = int(array[1])
                init = start_node_number

            if array[0] == 'g':
                goal_node_name = int(array[1])
                goal = goal_node_name

            if array[0] == 'e':
                for i in array[1:]:
                    list = (i.split(":"))
                    self.dict.update({list[0]: list[1]})

        return self.dict, self.world, init, goal
 def write_header_html(self):
     
     template_header = h.read_file( self.conf.ETC + "fg_docx_header.html" )
     template_header = template_header.replace("___NAV_ITEMS___", self.get_navigation() )
     template_header = template_header.replace("___REPO_LINK___", self.get_repo_link() )
     template_header = template_header.replace("___REPO_BRANCH___", self.get_branch() )
     h.write_file( self.conf.work_dir + "fg_docx_header.html", template_header)
Ejemplo n.º 19
0
def main():
    """Shows basic usage of the Drive v3 API.
    Prints the names and ids of the first 10 files the user has access to.
    """
    creds = None
    # The file token.pickle stores the user's access and refresh tokens, and is
    # created automatically when the authorization flow completes for the first
    # time.
    creds = h.read_file("credentials/GDrive_token.txt")[0]
    # If there are no (valid) credentials available, let the user log in.
    if not creds.valid:
        if creds and creds.expired and creds.refresh_token:
            creds.refresh(Request())
        else:
            flow = InstalledAppFlow.from_client_secrets_file(
                'credentials/credentials.json', SCOPES)
            creds = flow.run_local_server()
        # Save the credentials for the next run
        h.write_file("credentials/GDrive_token.txt", creds)

    service = build('drive', 'v3', credentials=creds)

    # Call the Drive v3 API
    results = service.files().list(
        pageSize=10, fields="nextPageToken, files(id, name)").execute()
    items = results.get('files', [])

    if not items:
        print('No files found.')
    else:
        print('Files:')
        for item in items:
            print(u'{0} ({1})'.format(item['name'], item['id']))
Ejemplo n.º 20
0
def test_import_tab4():
    H.rm_tempdir("temp_1.hdb")
    H.hashdb(["create", "temp_1.hdb"])
    H.make_tempfile("temp_1.tab", [
          "# <file hash> <tab> <block hash> <tab> <index>",
          "0000000000000000	8888888888888888	1",
          "0000000000000000	8888888888888888	2"])
    H.hashdb(["import_tab", "-rr", "temp_1.hdb", "temp_1.tab"])
    H.make_tempfile("temp_2.tab", [
          "# <file hash> <tab> <block hash> <tab> <index>",
          "0000000000000000	8888888888888888	1",
          "0000000000000000	8888888888888888	2",
          "0000000000000000	8888888888888888	3",
          "1111111111111111	8888888888888888	1",
          "1111111111111111	8888888888888888	2"])
    H.hashdb(["import_tab", "-rr", "temp_1.hdb", "temp_2.tab"])

    H.hashdb(["export", "temp_1.hdb", "temp_1.json"])

    returned_answer = H.read_file("temp_1.json")
    expected_answer = [
"# command: ","# hashdb-Version: ",
'{"block_hash":"8888888888888888","k_entropy":0,"block_label":"","source_offsets":["0000000000000000",2,[0,512],"1111111111111111",2,[0,512]]}',
'{"file_hash":"0000000000000000","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["r","temp_1.tab"]}',
'{"file_hash":"1111111111111111","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["r","temp_2.tab"]}'
]
    H.lines_equals(returned_answer, expected_answer)
Ejemplo n.º 21
0
class Data:
    salt = helpers.gen_salt()
    ops = helpers.read_file('ops.txt')
    banned = helpers.read_file('banned.txt')
    silentbanned = helpers.read_file('silentbanned.txt')
    crashbanned = helpers.read_file('crashbanned.txt')
    players = []
    taken_ids = []
    update_thread = None
    shutdown = False
    setblock_queue = queue.Queue()
    chat_broadcast_queue = queue.Queue()

    def __init__(self):
        self.colors_regex = re.compile('%(?=[{}])'.format(
            config.SERVER_COLORS))
Ejemplo n.º 22
0
def send_email_report(passed, fail, skip, error, browser, platform, duration):
    status = 'FAILED' if fail > 0 or error > 0 else 'PASSED'
    status_color = 'red' if status == 'FAILED' else 'green'
    now = datetime.now()
    total = passed + fail + skip + error
    msg = MIMEMultipart('alternative')
    to = args.to_email
    me = args.from_email
    pswd = args.password
    msg['Subject'] = f"Test Automation Report | {passed}/{total} | {browser.upper()} |{now.strftime('%d/%m/%Y %H:%M:%S')}"
    msg['From'] = me
    msg['To'] = to
    html = read_file("templates/email_report.html")
    html = Template(html).substitute(PLATFORM=platform,
                                     ST_COLOR=status_color,
                                     STATUS=status,
                                     BROWSER=browser.upper(),
                                     FAILED=fail,
                                     PASSED=passed,
                                     SKIPPED=skip,
                                     ERROR=error,
                                     TOTAL=total,
                                     ENVIRONMENT=base_url,
                                     URL=base_url,
                                     DURATION=duration)
    part2 = MIMEText(html, "html")
    msg.attach(part2)
    s = smtplib.SMTP(args.smtp_host, args.smtp_port)
    s.starttls()
    s.login(me, pswd)
    s.sendmail(me, [to], msg.as_string())
Ejemplo n.º 23
0
def test_export_json_hash_partition_range():
    H.rm_tempdir("temp_1.hdb")
    H.rm_tempfile("temp_1.json")
    H.rm_tempfile("temp_2.json")

    temp1_input = [
        '{"block_hash":"2222222222222222","k_entropy":1,"block_label":"bl1","source_sub_counts":["1111111111111111",2]}',
        '{"block_hash":"8899aabbccddeeff","k_entropy":2,"block_label":"bl2","source_sub_counts":["0000000000000000",1,"0011223344556677",2]}',
        '{"block_hash":"ffffffffffffffff","k_entropy":3,"block_label":"bl3","source_sub_counts":["0011223344556677",1]}',
        '{"file_hash":"0000000000000000","filesize":3,"file_type":"ftb","zero_count":4,"nonprobative_count":5,"name_pairs":["r2","f2"]}',
        '{"file_hash":"0011223344556677","filesize":6,"file_type":"fta","zero_count":7,"nonprobative_count":8,"name_pairs":["r1","f1"]}',
        '{"file_hash":"1111111111111111","filesize":9,"file_type":"ftc","zero_count":10,"nonprobative_count":11,"name_pairs":["r3","f3"]}'
    ]

    expected_answer = [
        "# command: ", "# hashdb-Version: ",
        '{"block_hash":"2222222222222222","k_entropy":1,"block_label":"bl1","source_sub_counts":["1111111111111111",2]}',
        '{"file_hash":"1111111111111111","filesize":9,"file_type":"ftc","zero_count":10,"nonprobative_count":11,"name_pairs":["r3","f3"]}'
    ]

    H.make_tempfile("temp_1.json", temp1_input)
    H.hashdb(["create", "temp_1.hdb"])
    H.hashdb(["import", "temp_1.hdb", "temp_1.json"])
    H.hashdb(["export", "-p", "00:80", "temp_1.hdb", "temp_2.json"])

    returned_answer = H.read_file("temp_2.json")
    H.lines_equals(returned_answer, expected_answer)
Ejemplo n.º 24
0
def populate_node(path, properties, **kwargs):
    "Organizes properties into form fields and posts the multipart form data."
    
    # properties can be handled as strings by default
    fields = [ (p['name'], p['value']) for p in properties if not p.has_key('type')]
    
    # properties with a type need to be hinted
    hinted = [ hp for hp in properties if hp.has_key('type') and hp['type'] != 'nt:file']
    for hp in hinted:
        if hp['value'].__class__.__name__ == 'str':
            fields.append((hp['name'], hp['value'])) # single item
        else:
            map(lambda i: fields.append((hp['name'], i)), hp['value']) # multiple items
        # add the type hint
        fields.append(('%s@TypeHint' % hp['name'], hp['type']))
    
    # properties typed as nt:file should be handled as files
    files = [   (   p['name'], p['value'].split('/')[-1], 
                    read_file(p['value'], 'rb')
                    ) 
                for p in properties 
                if p.has_key('type') and p['type'] == 'nt:file' ]
    if files:
        fields.append(('%s@TypeHint' % p['name'], p['type']))
    
    post_multipart(path, fields, files, HEADERS, **kwargs)
Ejemplo n.º 25
0
def test_add_multiple():
    json_db1 = [
'{"file_hash":"11","filesize":1,"file_type":"ft1","zero_count":15,"nonprobative_count":111,"name_pairs":["rn1","fn1"]}',
'{"block_hash":"11111111","k_entropy":101,"block_label":"bl1","source_offsets":["11",1,[1024]]}']
    json_db2 = [
'{"file_hash":"22","filesize":2,"file_type":"ft2","zero_count":16,"nonprobative_count":222,"name_pairs":["rn2","fn2"]}',
'{"block_hash":"22222222","k_entropy":202,"block_label":"bl2","source_offsets":["22",1,[1024]]}']
    json3_db3 = [
'# command: ','# hashdb-Version: ',
'{"block_hash":"11111111","k_entropy":101,"block_label":"bl1","source_offsets":["11",1,[1024]]}',
'{"block_hash":"22222222","k_entropy":202,"block_label":"bl2","source_offsets":["22",1,[1024]]}',
'{"file_hash":"11","filesize":1,"file_type":"ft1","zero_count":15,"nonprobative_count":111,"name_pairs":["rn1","fn1"]}',
'{"file_hash":"22","filesize":2,"file_type":"ft2","zero_count":16,"nonprobative_count":222,"name_pairs":["rn2","fn2"]}'
]



    # create DBs
    H.make_hashdb("temp_1.hdb", json_db1)
    H.make_hashdb("temp_2.hdb", json_db2)
    H.rm_tempdir("temp_3.hdb")

    # add 1 and 2 into 3
    H.hashdb(["add_multiple", "temp_1.hdb", "temp_2.hdb", "temp_3.hdb"])

    # check temp_3.hdb
    H.hashdb(["export", "temp_3.hdb", "temp_3.json"])
    json_in3 = H.read_file("temp_3.json")
    H.lines_equals(json_in3, json3_db3)
Ejemplo n.º 26
0
def test_import_tab1():
    H.rm_tempdir("temp_1.hdb")
    H.rm_tempfile("temp_1.json")
    H.hashdb(["create", "temp_1.hdb"])
    H.make_tempfile("temp_1.tab", [
        "# <file hash> <tab> <block hash> <tab> <index>",
        "0011223344556677	8899aabbccddeeff	1",
        "0000000000000000	8899aabbccddeeff	1",
        "0011223344556677	8899aabbccddeeff	2",
        "0011223344556677	ffffffffffffffff	3",
        "1111111111111111	2222222222222222	9",
        "1111111111111111	2222222222222222	9"
    ])
    H.hashdb(["import_tab", "temp_1.hdb", "temp_1.tab"])
    H.hashdb(["export", "temp_1.hdb", "temp_1.json"])

    returned_answer = H.read_file("temp_1.json")
    expected_answer = [
        "# command: ", "# hashdb-Version: ",
        '{"block_hash":"2222222222222222","k_entropy":0,"block_label":"","source_sub_counts":["1111111111111111",2]}',
        '{"block_hash":"8899aabbccddeeff","k_entropy":0,"block_label":"","source_sub_counts":["0000000000000000",1,"0011223344556677",2]}',
        '{"block_hash":"ffffffffffffffff","k_entropy":0,"block_label":"","source_sub_counts":["0011223344556677",1]}',
        '{"file_hash":"0000000000000000","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["temp_1.tab","temp_1.tab"]}',
        '{"file_hash":"0011223344556677","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["temp_1.tab","temp_1.tab"]}',
        '{"file_hash":"1111111111111111","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["temp_1.tab","temp_1.tab"]}'
    ]

    H.lines_equals(returned_answer, expected_answer)
Ejemplo n.º 27
0
def format_page(master_list):
    file_input = input("Please enter a file path: ")
    master_list = read_file(file_input)

    gene_acronym, gene_sequence = sequence_gene_filter(master_list)
    gene_acronym = [line.replace('\n', '') for line in gene_acronym]

    real_list = []
    for x in gene_sequence:
        for z in x:
            real_list.append(z)
    another_list = [
        nucleotide for nucleotide in real_list if nucleotide.isalpha()
    ]

    str_list = ''
    for x in another_list:
        str_list += x

    str_list = str_list.upper()
    print()
    print(gene_acronym)
    print(str_list)

    with open("FormattedDatabase.txt", "w") as output:
        output.write(str(str_list))

    mutation_gui()
Ejemplo n.º 28
0
def main():
    client, sc, reddit = helpers.initialize()

    # Uses RDD's to parallize initial data processing.
    if config.CREATE_AUTHORS:
        db = data.get_or_create_database(client, config.DB_NAME)
        rdd = helpers.read_file(conig.PATH + config.COMMENT_FILE, sc)
        rdd2 = rdd.mapPartitions(helpers.mapper)
        rdd3 = rdd2.mapPartitions(helpers.filterer)
        rdd4 = rdd3.reduceByKey(
            lambda x, y: x + y
            if y not in x else x)  # Returns [(key, [names])]
        data.create_authors(config.DB_NAME, config.AUTHOR_COLLECTION, rdd4)
        rdd.unpersist()
        rdd2.unpersist()
        rdd3.unpersist()
        rdd4.unpersist()

    # Creates subreddits and the counts of other subreddits used from there.
    if config.CREATE_SUBREDDITS:
        data.create_subreddits(config.DB_NAME, config.AUTHOR_COLLECTION,
                               config.SUBREDDIT_COLLECTION, config.SUBREDDITS)

    # Converts the subreddit data to a json data for usage.
    if config.CREATE_JSON:
        data.create_json(config.DB_NAME, config.SUBREDDIT_COLLECTION,
                         config.DATA_FILE)

    # Download subreddit icon images from reddit using reddit's API.
    if config.CREATE_IMAGES:
        helpers.get_subreddit_images(reddit, config.SUBREDDITS,
                                     config.IMAGE_PATH)

    client.close()
    return True
Ejemplo n.º 29
0
	def __init__(self, verbose=0):
		
		self._V = verbose
		self.raw_yaml_str = h.read_file( self.ETC + self.CONFIG_FILE )
		
		self.conf = yaml.load(self.raw_yaml_str)
		if self._V > 0:
			print "> Loaded configs: %s" % " ".join( self.conf.keys() )
Ejemplo n.º 30
0
def rebuild_combined():
    """
    Пересоздание общего файла
    """
    with open(expanduser("~/.sites-combined"), "w") as file:
        text = check_output("md-to-links ~/.db/prj/websites/my/links.md", shell=True)
        file.write("\n".join(helpers.read_file(f) for f in DEFAULT_FILES))
        file.write(text.decode('utf-8'))
Ejemplo n.º 31
0
def criteria_page(year, month, criteria_name):
	filename = year+"-"+month+"-"+criteria_name
	print filename
	if filename in os.listdir(config.CRITERIA_PATH):
		content = helpers.read_file(config.CRITERIA_PATH+filename)
		return content
	else:
		return 'Sorry, this page does not exist.'
 def get_version(self):
     
     version = "-na-"
     if self.conf.version_no:
         version = self.conf.version_no
         
     if self.conf.version_file:
         version = h.read_file( self.conf.work_dir + self.conf.version_file ).strip()
     return version
Ejemplo n.º 33
0
def rebuild_combined():
    """
    Пересоздание общего файла
    """
    with open(expanduser("~/.sites-combined"), "w") as file:
        text = check_output("md-to-links ~/.db/prj/websites/my/links.md",
                            shell=True)
        file.write("\n".join(helpers.read_file(f) for f in DEFAULT_FILES))
        file.write(text.decode('utf-8'))
Ejemplo n.º 34
0
def helpers():
    import helpers
    # get bacon filepath
    baconHelpersFilepath = helpers.run_command_output(
        'cd {} && cd ../ && pwd'.format(helpers.path('util'))).replace(
            '\n', '') + '/bacon/template/helpers.py'
    utilityHelpersFilepath = '/{}/{}'.format(helpers.path('util'),
                                             'helpers.py')
    # get target helpers content
    content = helpers.read_file(utilityHelpersFilepath)
    customHelpers = content.split(
        "# custom helpers start here\n# =========================")[1]
    # get default helpers template from bacon
    newDefaultHelpers = helpers.read_file(baconHelpersFilepath)
    # pack content and save
    newContent = newDefaultHelpers + customHelpers
    # print(newContent)
    helpers.write_file(utilityHelpersFilepath, newContent)
    msg.done()
Ejemplo n.º 35
0
 def browse_files(self):
         filename = filedialog.askopenfilename(initialdir = "/",
                                         title = "Select a File",
                                         filetypes = (("Text files",
                                                         "*.txt*"),
                                                     ("all files",
                                                         "*.*")))        
         if filename is not None:
             self.content = read_file(filename)
             self.parent.event_generate("<<Fileread>>")
Ejemplo n.º 36
0
def test_add():
    # create new hashdb
    H.make_hashdb("temp_1.hdb", json_out1)
    H.rm_tempdir("temp_2.hdb")

    # add to new temp_2.hdb
    H.hashdb(["add", "temp_1.hdb", "temp_2.hdb"])

    # temp_2.hdb should match
    H.hashdb(["export", "temp_2.hdb", "temp_2.json"])
    json2 = H.read_file("temp_2.json")
    H.lines_equals(json2, json_out1)

    # add to existing temp_2.hdb
    H.hashdb(["add", "temp_1.hdb", "temp_2.hdb"])

    # temp_2.hdb should match
    H.hashdb(["export", "temp_2.hdb", "temp_2.json"])
    json2 = H.read_file("temp_2.json")
    H.lines_equals(json2, json_out1)
Ejemplo n.º 37
0
 def compute_initial_figure(self):
     self.last_open_up = time.ctime(os.path.getmtime(self.cfg_data_fitness))
     self.last_open = self.last_open_up
     self.fitness_data = helpers.read_file(self.cfg_data_fitness)
     self.x = np.arange(0., len(self.fitness_data[0]), 1.)
     maxfit = x = np.array(map(float, self.fitness_data[0]))
     meanfit = x = np.array(map(float, self.fitness_data[1]))
     self.l1, = self.axes.plot(self.x, maxfit, 'r')
     self.axes.hold(True)
     self.l2, = self.axes.plot(self.x, meanfit, 'b')
     self.axes.hold(False)
Ejemplo n.º 38
0
def test_add():
    # create new hashdb
    H.make_hashdb("temp_1.hdb", json_out1)
    H.rm_tempdir("temp_2.hdb")

    # add to new temp_2.hdb
    H.hashdb(["add", "temp_1.hdb", "temp_2.hdb"])

    # temp_2.hdb should match
    H.hashdb(["export", "temp_2.hdb", "temp_2.json"])
    json2 = H.read_file("temp_2.json")
    H.lines_equals(json2, json_out1)

    # add to existing temp_2.hdb
    H.hashdb(["add", "temp_1.hdb", "temp_2.hdb"])

    # temp_2.hdb should match
    H.hashdb(["export", "temp_2.hdb", "temp_2.json"])
    json2 = H.read_file("temp_2.json")
    H.lines_equals(json2, json_out1)
Ejemplo n.º 39
0
def test_basic_settings():
    # remove existing DB
    h.rm_tempdir("temp_1.hdb")

    # create new DB
    h.hashdb(["create", "-b4", "temp_1.hdb"])

    # validate settings parameters
    lines = h.read_file(settings1)
    h.lines_equals(lines, [
'{"settings_version":4, "block_size":4}'

])
Ejemplo n.º 40
0
def render_report(
    report_template_path,
    html_compare_results,
    html_unreachable_results,
    reference_flows,
):
    report_template = read_file(report_template_path)
    template = Template(report_template)
    return template.render(
        unreachable_results=html_unreachable_results,
        compare_results=html_compare_results,
        reference_flows=reference_flows,
    )
Ejemplo n.º 41
0
def test_intersect():
    # create new hashdb
    H.make_hashdb("temp_1.hdb", json_set_db1)
    H.make_hashdb("temp_2.hdb", json_set_db2)
    H.rm_tempdir("temp_3.hdb")

    # intersect
    H.hashdb(["intersect", "temp_1.hdb", "temp_2.hdb", "temp_3.hdb"])
    H.hashdb(["export", "temp_3.hdb", "temp_3.json"])
    json3 = H.read_file("temp_3.json")
    H.lines_equals(json3, [
        '# command: ', '# hashdb-Version: ',
        '{"block_hash":"2222222222222222","k_entropy":2,"block_label":"bl2","source_sub_counts":["22",2]}',
        '{"file_hash":"22","filesize":2,"file_type":"B","zero_count":12,"nonprobative_count":2,"name_pairs":["r1","f1","r2","f2"]}'
    ])
Ejemplo n.º 42
0
def test_basic_settings():
    # remove existing DB
    h.rm_tempdir("temp_1.hdb")

    # create new DB
    h.hashdb(["create", "-b4", "-a2", "-m500:20", "-t30:10", "temp_1.hdb"])

    # validate settings parameters
    lines = h.read_file(settings1)
    h.lines_equals(
        lines,
        [
            '{"settings_version":3, "byte_alignment":2, "block_size":4, "max_count":500, "max_sub_count":20, "hash_prefix_bits":30, "hash_suffix_bytes":10}'
        ],
    )
Ejemplo n.º 43
0
def test_intersect():
    # create new hashdb
    H.make_hashdb("temp_1.hdb", json_set_db1)
    H.make_hashdb("temp_2.hdb", json_set_db2)
    H.rm_tempdir("temp_3.hdb")

    # intersect
    H.hashdb(["intersect", "temp_1.hdb", "temp_2.hdb", "temp_3.hdb"])
    H.hashdb(["export", "temp_3.hdb", "temp_3.json"])
    json3 = H.read_file("temp_3.json")
    H.lines_equals(json3, [
'# command: ',
'# hashdb-Version: ',
'{"block_hash":"2222222222222222","k_entropy":2,"block_label":"bl2","source_offsets":["22",2,[0,512]]}',
'{"file_hash":"22","filesize":2,"file_type":"B","zero_count":12,"nonprobative_count":2,"name_pairs":["r1","f1","r2","f2"]}'
])
Ejemplo n.º 44
0
def test_import_tab2():
    H.rm_tempdir("temp_1.hdb")
    H.rm_tempfile("temp_1.json")
    H.hashdb(["create", "temp_1.hdb"])
    H.make_tempfile("temp_1.tab", [
          "# <file hash> <tab> <block hash> <tab> <index>",
          "0011223344556677	8899aabbccddeeff	1"])
    H.hashdb(["import_tab", "-rr", "temp_1.hdb", "temp_1.tab"])
    H.hashdb(["export", "temp_1.hdb", "temp_1.json"])

    returned_answer = H.read_file("temp_1.json")
    expected_answer = ["# command: ","# hashdb-Version: ", \
'{"block_hash":"8899aabbccddeeff","k_entropy":0,"block_label":"","source_sub_counts":["0011223344556677",1]}',
'{"file_hash":"0011223344556677","filesize":0,"file_type":"","zero_count":0,"nonprobative_count":0,"name_pairs":["r","temp_1.tab"]}'
]
    H.lines_equals(returned_answer, expected_answer)
Ejemplo n.º 45
0
def debug():
    # try to remove debug file
    try:
        os.remove('debug.log')
    except:
        pass
    lines = helpers.read_file('README.md')
    print('reading file finished')
    start, end = __get_info_line_num__(lines)
    print()
    print('got the start and end position of hospital list')
    print('start is :', start)
    print('end is:', end)
    print('parsing started, this can take a while, please wait')
    info_dict = __parse_all__(lines[start:end], debug_mode=True)
    print('debug finished, see the log on debug.log')
    return info_dict
Ejemplo n.º 46
0
def main(mode):
    "Iterates through all JSON payloads, dispatching load according to supplied mode."
    
    modes = {
        'itemwise': load_item,
        'nodewise': load_nodes
    }

    for json_path in JSON_PATHS:
        payload = json.loads(read_file(json_path))
        
        result = modes[mode](payload) # call function corresponding to mode

        if result.success:
            print '   Successfully loaded to %s' % result.url
        else:
            dump_errors(result)
Ejemplo n.º 47
0
def debug():
    # try to remove debug file
    try:
        os.remove('debug.log')
    except:
        pass
    lines = helpers.read_file('README.md')
    print('reading file finished')
    start, end = __get_info_line_num__(lines)
    print()
    print('got the start and end position of hospital list')
    print('start is :', start)
    print('end is:', end)
    print('parsing started, this can take a while, please wait')
    info_dict = __parse_all__(lines[start:end], debug_mode=True)
    print('debug finished, see the log on debug.log')
    return info_dict
Ejemplo n.º 48
0
    def update_figure(self):
        self.timer.setInterval(1000/FRAMERATE)
        changed = False
        self.last_open_up_individuals = time.ctime(os.path.getmtime(self.cfg_data_individuals))
        while (self.last_open_individuals != self.last_open_up_individuals) or (len(self.individuals_data) != len(self.individuals_data)):
            changed = True
            self.last_open_individuals = self.last_open_up_individuals
            self.individuals_data = helpers.read_file(self.cfg_data_individuals)
            self.annotation_last_step = []
        if changed or CURRENT_GENERATION is not self.curgen:
            self.scatter.remove()
            self.scatter_best.remove()
            self.produce_scatter()

            self.curgen = CURRENT_GENERATION
            self.draw()
        pass
Ejemplo n.º 49
0
def main():
    storage_data = apricot.get_storage_status()
    today = str(date.today())
    write_file(storage_data, f"{today}.json")

    discord_data = set_member_level(apricot)

    try:
        previous_storage_data = read_file("previous_data.json")
        storage_data = previous_storage_data
    except:
        previous_storage_data = storage_data
        write_file(storage_data, f"previous_data.json")

    free_units, unit_state = calc_storage(storage_data, previous_storage_data)
    assignments = save_storage_data_to_file(storage_data, free_units,
                                            unit_state)
    print(assignments)
    post_data_to_site(assignments, apricot)
Ejemplo n.º 50
0
 def update_figure(self):
     self.timer.setInterval(1000/FRAMERATE)
     changed = False
     self.last_open_up = time.ctime(os.path.getmtime(self.cfg_data_fitness))
     # Make sure both fitness developments are of same length
     while (self.last_open != self.last_open_up) or (len(self.fitness_data[0]) != len(self.fitness_data[1])):
         changed = True
         self.last_open = self.last_open_up
         self.fitness_data = helpers.read_file(self.cfg_data_fitness)
         time.sleep(0.05)
     if changed:
         self.x = np.arange(0., len(self.fitness_data[0]), 1.)
         maxfit = x = np.array(map(float, self.fitness_data[0]))
         meanfit = x = np.array(map(float, self.fitness_data[1]))
         self.l1, = self.axes.plot(self.x, maxfit, 'r')
         self.axes.hold(True)
         self.l2, = self.axes.plot(self.x, meanfit, 'b')
         self.axes.hold(False)
         self.draw()
Ejemplo n.º 51
0
def execute():
	baconBitsPath = helpers.run_command_output('cd {} && cd ../ && pwd'.format(helpers.path('util')), False).replace('\n', '')
	baconrcFile = baconBitsPath + '/.baconrc'
	DATA = helpers.read_file(baconrcFile)
	utilList = os.listdir(baconBitsPath)
	addPerks = helpers.kv_set(settings, 'perks')
	count = 0
	# print(utilList)
	# print(addPerks)
	APPENDED_DATA_STR = DATA
	for item in utilList:
		path = baconBitsPath + '/' + item
		try:
			alias = helpers.get_alias(path)
		except:
			alias = False
		if alias:
			aliasStr = 'alias {ALIAS}="python {PATH}/actions.py"'.format(ALIAS= alias, PATH= path)
			# print(aliasStr)
			pat = re.compile(aliasStr)
			match = re.search(pat, DATA)
			if not match:
				count += 1
				print('\nadding alias: {}'.format(alias))
				APPENDED_DATA_STR += '\n' + aliasStr
				if addPerks == "True" or addPerks == "true":
					aliasStrGoto = '''        elif [ $1 = "{ALIAS}" ]; then
            cd {PATH}
        #~~~ bacon:goto placeholder'''.format(ALIAS= alias, PATH= path)
					aliasStrShowme = '''        elif [ $1 = "{ALIAS}" ]; then
            open {PATH}
        #~~~ bacon:showme placeholder'''.format(ALIAS= alias, PATH= path)
					APPENDED_DATA_STR = APPENDED_DATA_STR.replace('        #~~~ bacon:goto placeholder', aliasStrGoto)
					APPENDED_DATA_STR = APPENDED_DATA_STR.replace('        #~~~ bacon:showme placeholder', aliasStrShowme)
	if count > 0:
		helpers.write_file(baconrcFile, APPENDED_DATA_STR)
	else:
		print("\n:: Nothing to add ::")

	msg.done()
Ejemplo n.º 52
0
def main():
    "Iterates through all JSON payloads, dispatching load according to supplied mode."
        
    def itemwise():
        "Loads content item and nodes in a single request, except for binary assets."
        
        # assemble content substructure and add to payload properties
        slingified_json, file_nodes = slingify(payload['nodes'])
        
        new_props = (   (':operation', 'import'),
                        (':contentType', 'json'),
                        (':content', slingified_json)
                        )
        map(lambda p: payload['properties'].append({'name': p[0], 'value': p[1]}), new_props)

        # populate the page
        populate_node(base_path, payload['properties'], label='  Content item')
        
        # populate binaries
        for node in file_nodes:
            node_path = '/'.join([base_path, node['path']])
            populate_node(node_path, node['properties'], label='    Binary')
    
    def nodewise():
        "Loads content item and each node's content as a separate request."
        
        # populate the page
        populate_node(base_path, payload['properties'], label='  Content item')
        
        # populate the nodes
        for node in payload['nodes']:
            node_path = '/'.join([base_path, node['path']])
            populate_node(node_path, node['properties'], label='    Node')
    
    for json_path in JSON_PATHS:
        payload = json.loads(read_file(json_path))
        base_path = CQ_SERVER + payload['path']
        
        locals()[MODE]() # call function corresponding to mode
Ejemplo n.º 53
0
def send_slack_report(passed, failed, skipped, error, browser, platform,
                      duration):
    color = '#e51c23' if failed > 0 or error > 0 else '#259b24'
    emoji = ':thumbsdown:' if failed > 0 or error > 0 else ':thumbsup:'
    total = passed + failed + skipped + error
    now = datetime.now().strftime('%d/%m/%Y %H:%M:%S')
    report = Template(read_file('templates/slack_report.json')).substitute(
        BROWSER=browser,
        DATE=now,
        EMOJI=emoji,
        COLOR=color,
        PLATFORM=platform,
        PASSED=passed,
        FAILED=failed,
        SKIPPED=skipped,
        ERROR=error,
        TOTAL=total,
        DURATION=duration)
    json_params_encoded = json.dumps(json.loads(report))
    requests.post(url=args.slack_hook,
                  data=json_params_encoded,
                  headers={"Content-type": "application/json"})
Ejemplo n.º 54
0
def execute():
    baconBitsPath = helpers.run_command_output(
        'cd {} && cd ../ && pwd'.format(helpers.path('util')),
        False).replace('\n', '')
    baconrcFile = baconBitsPath + '/.baconrc'
    DATA = helpers.read_file(baconrcFile)
    utilList = os.listdir(baconBitsPath)
    count = 0
    # print(utilList)
    MODIFIED_DATA_STR = DATA
    for item in utilList:
        path = baconBitsPath + '/' + item
        try:
            alias = helpers.get_alias(path)
        except:
            alias = False
        if alias:
            aliasStr1 = '''        elif [ $1 = "{ALIAS}" ]; then
            cd {PATH}
        #~~~ bacon:goto placeholder'''.format(ALIAS=alias, PATH=path)
            aliasStr2 = '''        elif [ $1 = "{ALIAS}" ]; then
            open {PATH}
        #~~~ bacon:showme placeholder'''.format(ALIAS=alias, PATH=path)
            pat = re.compile(
                'elif \[ \$1 = "{ALIAS}" \]; then'.format(ALIAS=alias))
            match = re.search(pat, DATA)
            if not match:
                count += 1
                print('\nadding utility to goto and showme: {}'.format(alias))
                MODIFIED_DATA_STR = MODIFIED_DATA_STR.replace(
                    '        #~~~ bacon:goto placeholder', aliasStr1)
                MODIFIED_DATA_STR = MODIFIED_DATA_STR.replace(
                    '        #~~~ bacon:showme placeholder', aliasStr2)
    if count > 0:
        helpers.write_file(baconrcFile, MODIFIED_DATA_STR)
    else:
        print("\n:: Nothing to add ::")

    msg.done()
Ejemplo n.º 55
0
def read():
    lines = helpers.read_file('README.md')
    start, end = __get_info_line_num__(lines)
    info_dict = __parse_all__(lines[start:end])
    return info_dict
Ejemplo n.º 56
0
    totals = {}

    # Check that all Location and Entity fields have mappings
    # {"location": ["est", "mun"], "entity": ["hs4_4digit"]}
    classifications = {}
    for field in ["location", "entity"]:
        if field not in config["classifications"]:
            log(
                ERROR,
                "Please supply a classification for {} called {}.".format(
                    field, field))
            sys.exit(1)
        for classification, classification_config in config["classifications"][
                field].items():
            df_class = read_file(
                os.path.join(base_path, classification_config["file"]))
            df_class = process_classification(df_class, classification_config)
            log(
                INFO, "Classification system for {}:\n {}".format(
                    classification, df_class))
            if field not in classifications:
                classifications[field] = {}
            classifications[field][classification] = df_class

    for variation in variations:

        # Load file
        file_name = file_pattern.format(**variation)
        df = read_file(file_name)

        # Check file has all the fields specified
Ejemplo n.º 57
0
from collections import Counter
import sys
import os
import helpers

if len(sys.argv) == 1:
    print "Pass filename"
    sys.exit(2)

filename = sys.argv[1]
if not os.path.exists(filename):
    print "Can't open {}".format(filename)
    sys.exit(2)

data = helpers.read_file(filename)

counters = [Counter() for c in range(0, len(data[0]))]

for row in data:
    for idx, c in enumerate(list(row)):
        counters[idx][c] += 1

c1 = [c.most_common()[0][0] for c in counters]

print "".join(c1)
Ejemplo n.º 58
0
 def test_env_inheritance(self):
     with helpers.update_env(TEST_VALUE='abc'):
         job = helpers.wait_for_job(self._wp.start_job('echo $TEST_VALUE'))
     self.assertEqual(helpers.read_file(job.stdout_path), 'abc\n')
Ejemplo n.º 59
0
 def test_cwd(self):
     job = helpers.wait_for_job(self._wp.start_job('pwd'))
     self.assertEqual(helpers.read_file(job.stdout_path), job.path + '\n')