Beispiel #1
0
def main():
    parser = argparse.ArgumentParser(description = 'Exporting data matrix from HIT summary result.')
    parser.add_argument('-f', action = 'append', help = 'The CSV files.')
    parser.add_argument('-c', help = 'The exporting columns separated with comma.')
    parser.add_argument('-o', help = 'The output file.')
    parser.add_argument('-t', help = 'The types used to filter out data row.')
    parser.add_argument('-p', default = '0', help = 'The padding for filtered rows.')
    parser.add_argument('-d', help = 'The data source file.')

    args = parser.parse_args()

    data_sources = []
    data_labels = []
    data_ids = []
    if (args.d != None):
        data_sources = utils.load_file(args.d)

        data_metainfo = regex_datasource(data_sources)

        # data_labels: flickr high interesting 1, flickr low interesting 2, pinterest [3, 4, 5]
        data_labels = data_metainfo[0]
        # data_ids: (flickr, pinterest) image id
        data_ids = data_metainfo[1]

    output = read_data(args, data_sources, data_labels)

    if (args.o != None):
        utils.write_file(output, args.o)
Beispiel #2
0
def main():
    if request.method == 'GET':
        return render_template('enter_name_or_code.html')

    # get args
    company_name = request.form.get('c_name')
    ticker = request.form.get('c_ticker')

    if not company_name and not ticker:
        flash('Введите название или тикер')
        return redirect(url_for('main'))

    # if ticker is not provided, find it by name.
    # If not successful, flash error and redirect
    if not ticker:
        t, msg = find_single_ticker(company_name)
        if not t:
            flash(msg)
            return redirect(url_for('main'))
        ticker = t

    # get date and lag args
    deal_date = request.form.get('deal_date')
    future_lag = int(request.form.get('future_lag'))
    past_lag = int(request.form.get('past_lag'))

    # validate existence
    if not deal_date or not future_lag or not past_lag:
        flash('Введите дату, лаг вперед и лаг назад.')
        return redirect(url_for('main'))

    # validate date format
    try:
        deal_date = datetime.strptime(deal_date, '%d.%m.%Y')
    except ValueError:
        flash('Некорректный формат даты.')
        return redirect(url_for('main'))

    # validate weekdays
    if deal_date.weekday() in [5, 6]:
        msg = '{} - выходной день. В этот день не было торгов. Выберите рабочий день.'
        flash(msg.format(deal_date.strftime('%d.%m.%Y')))
        return redirect(url_for('main'))

    hist_data = get_historical_data(ticker, deal_date, future_lag, past_lag)

    # get_historical_data() will return '404' if the request is unsuccessful
    # i.e. wrong ticker specified
    if hist_data == '404':
        flash('Такого тикера не существует.')
        return redirect(url_for('main'))

    if not company_name:
        company_name = ticker
    filename = ticker + '.xls'
    write_file(hist_data, deal_date, company_name, filename)

    return send_file(filename,
                     as_attachment=True,
                     attachment_filename=filename)
def process_json(input_file: str, output_folder: str):
    output_swift = path.join(output_folder, "Stylist+textStyles.swift")
    output_enums = path.join(output_folder, "enums_text.yaml")

    try:
        sketch_json = read_json(input_file)
    except:
        print('Could not open file ' + input_file)
        exit(0)

    text_styles_data = parse(sketch_json)
    swift_doc = swift_code(text_styles_data)
    enums_doc = enum_report(text_styles_data)

    # Create output directory if it doesn't exist.
    if not path.exists(output_folder):
        makedirs(output_folder)

    try:
        write_file(output_swift, swift_doc)
    except IOError:
        print('Could not write textStyles swift file ' + output_swift)

    try:
        write_file(output_enums, enums_doc)
    except IOError:
        print('Could not write textStyles enum file ' + output_swift)
Beispiel #4
0
	def btn_export_event(self, event):
		raw = utils.pack_list(self.waypoints)
		print ""		
		print "--- ! --- Caution : exceptions won't be handled"		
		self.Warning("Unimplemented", "Gonna export to 'export.ub'")
		utils.write_file("export.ub", raw)
		print "Done exporting\n"
Beispiel #5
0
def main():
    if sys.argv[1] == "sender":
        scu = SCU(mtu=1500)
        scu.bind_as_sender(receiver_address=("169.254.229.153", 8888))
        try:
            # serial
            for id in range(0, 1000):
                scu.send(f"./data/data{id}", id)
                print(f"file sent: {id}", end="\r")

            # parallel
            # threads = []
            # for id in range(0, 1000):
            #     threads.push(threading.Thread(target = scu.send("data/data{id}", id)))
            #     threads[-1].setDaemon(True)
            #     threads[-1].start()

            # for th in threads:
            #     th.join()
        except Exception as e:
            print(e)
            scu.drop()  # Okay without it (For safety purposes)

    elif sys.argv[1] == "receiver":
        scu = SCU(mtu=1500)
        scu.bind_as_receiver(receiver_address=("169.254.155.219", 8888))
        for i in range(0, 1000):
            filedata = scu.recv()
            utils.write_file(f"./data/data{i}", filedata)
            print(f"file received: {i}", end="\r")
Beispiel #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--processed_break_csv",
        default='data/qdmr_data/processed_data_hotpotqa_gold.csv',
        type=str,
        help=
        "path to the processed .csv of the break experiment (gold/predicted/rule-based)."
    )
    parser.add_argument(
        "--out_jsonl",
        default='qdmrs_hotpot_gold.jsonl',
        type=str,
        help=
        "the processed break .jsonl to be saved in same dir as input break file."
    )
    args = parser.parse_args()

    break_csv = args.processed_break_csv

    # process the QDMRs
    logging.info(f'processing {break_csv} ...')
    qdmrs = prepare_decompositions(break_csv)

    # operator statistics
    print_op_stats(qdmrs)

    outfile = f'{dirname(break_csv)}/{args.out_jsonl}'
    logging.info(f'saving the processed QDMRs to {outfile}')
    write_file([qdmr.__dict__ for qdmr in qdmrs], outfile)
Beispiel #7
0
def dockerfile_to_singularity(dockerfile_path, output_dir=None):
    '''dockerfile_to_singularity will return a Singularity build file based on
    a provided Dockerfile. If output directory is not specified, the string
    will be returned. Otherwise, a file called Singularity will be written to 
    output_dir
    :param dockerfile_path: the path to the Dockerfile
    :param output_dir: the output directory to write the Singularity file to
    '''
    if os.path.basename(dockerfile_path) == "Dockerfile":
        spec = read_file(dockerfile_path)
        # Use a common mapping
        mapping = get_mapping()
   
        # Put into dict of keys (section titles) and list of commands (values)
        sections = organize_sections(lines=spec,
                                     mapping=mapping)

        # We have to, by default, add the Docker bootstrap
        sections["bootstrap"] = ["docker"]

        # Put into one string based on "order" variable in mapping
        build_file = print_sections(sections=sections,
                                    mapping=mapping)
        if output_dir != None:
            write_file("%s/Singularity" %(output_dir),build_file)
            print("Singularity spec written to %s" %(output_dir))
        return build_file

    # If we make it here, something didn't work
    logger.error("Could not find %s, exiting.", dockerfile_path)
    return sys.exit(1)
Beispiel #8
0
def processModule(args, repoDir, outDir, module):
    """ given input paramaters, process a module  """

    moduleDir = os.path.join(repoDir, module)
    moduleOutDir = os.path.join(outDir,module)
    utils.copyMediaDir(repoDir, moduleOutDir, module)

    # Fetch and parse md file
    filein = utils.fetchMarkdownFile(moduleDir)
    with open(filein, encoding='utf-8') as md_file:
        m = model.Module(md_file, module, args.baseUrl)

    m.toHTML(args.feedback) # only generate html for all subsections

    # write html, XML, and JSon files
    utils.write_file(m.toGift(), moduleOutDir, '', module+'.questions_bank.gift.txt')
    utils.write_file(m.toVideoList(), moduleOutDir, '', module+'.video_iframe_list.txt')
    mod_config = utils.write_file(m.toJson(), moduleOutDir, '',  module+'.config.json') # FIXME : this file should be optionnaly written

    # EDX files
    if args.edx:
        m.edx_archive_path = toEDX.generateEDXArchive(m, moduleOutDir)

    # if chosen, generate IMS archive
    if args.ims:
        m.ims_archive_path = toIMS.generateImsArchive(m, module, moduleOutDir)
        logging.warn('*Path to IMS = %s*' % m.ims_archive_path)

    # return module object
    return m
def write_address(csvfile):
    PREFECTURE = 6
    CITY = 7
    TOWN = 8

    address_set = set()
    with open(csvfile, newline="", encoding="shift_jis") as cf:
        rows = csv.reader(cf, delimiter=",")
        for row in rows:
            address = "".join([row[PREFECTURE], row[CITY]])
            town = row[TOWN]
            if town == "以下に掲載がない場合":
                continue
            else:
                sections = town.split("、")
                for s in sections:
                    house_number_index = s.find("(")
                    if house_number_index > -1:
                        s = s[:house_number_index]
                    address_set.add((address, s))

    dataset = []
    for p, t in address_set:
        dataset.append((p, t))

    utils.write_file(ADDRESS_TXT, dataset)
Beispiel #10
0
    def test_write_read_files(self):
        '''test_write_read_files will test the functions write_file and read_file
        '''
        print("Testing utils.write_file...")
        from utils import write_file
        import json
        tmpfile = tempfile.mkstemp()[1]
        os.remove(tmpfile)
        write_file(tmpfile,"hello!")
        self.assertTrue(os.path.exists(tmpfile))        

        print("Testing utils.read_file...")
        from utils import read_file
        content = read_file(tmpfile)[0]
        self.assertEqual("hello!",content)

        from utils import write_json
        print("Testing utils.write_json...")
        print("Case 1: Providing bad json")
        bad_json = {"Wakkawakkawakka'}":[{True},"2",3]}
        tmpfile = tempfile.mkstemp()[1]
        os.remove(tmpfile)        
        with self.assertRaises(TypeError) as cm:
            write_json(bad_json,tmpfile)

        print("Case 2: Providing good json")        
        good_json = {"Wakkawakkawakka":[True,"2",3]}
        tmpfile = tempfile.mkstemp()[1]
        os.remove(tmpfile)
        write_json(good_json,tmpfile)
        content = json.load(open(tmpfile,'r'))
        self.assertTrue(isinstance(content,dict))
        self.assertTrue("Wakkawakkawakka" in content)
def print_all_hits(all_hits, filename, sep = ',', field_index = 0, with_header = True):

    output = []

    count = 0
    for hit in all_hits[0]:

        (first_hit, first_hit_row, org_first_hit) = reformat_hit(hit, sep, 0)

        for hits in all_hits[1:len(all_hits)]:
            for hit in hits[0:len(hits)]:
                (hit, hit_row, org_hit) = reformat_hit(hit, sep, 0)
                
                if (first_hit[4] == hit[4]):
                    (hit, hit_row, org_hit) = reformat_hit(org_hit, sep)

                    if (field_index != 0):
                        (first_hit, first_hit_row, org_first_hit) = reformat_hit(org_first_hit, sep, field_index)

                    if (first_hit[len(first_hit) - 1] == ''):
                        first_hit_row += hit_row
                    else:
                        first_hit_row += sep + hit_row

        if (count == 0 and with_header == False):
            count += 1
            continue

        output.append(first_hit_row)

    if (filename != None):
        utils.write_file(output, filename)

    return output
def print_hit_with_data_labels(hits, data_labels, filename):


    output = []

    print("data label: #" + str(len(data_labels)))

    labels = ', '.join(data_labels)

    print(labels)

    output.append(labels)

    for hit in hits:
        org_hit = hit
        hit = hit.rsplit(',')
        if (len(hit) <= 1):
            hit = org_hit.rsplit("\t")
        hit = hit[8:len(hit)]
        print("hit row: #" + str(len(hit)))
        hit_row = ', '.join(hit)
        print(hit_row)
        output.append(hit_row)

    if (filename != None):
        utils.write_file(output, filename)
 def test_get_existing_links(self):
     """Test get_existing_links()"""
     with utils.TempDir() as tmpdir:
         utils.write_file(os.path.join(tmpdir, 'orig'), 'foo')
         os.symlink('orig', os.path.join(tmpdir, 'lnk'))
         self.assertEqual(python_tools.get_existing_links(tmpdir),
                          [os.path.join(tmpdir, 'lnk')])
Beispiel #14
0
    def save_items_as_text(self, event):
        """Export current selection as CSV file."""
        if hasattr(self, 'toRename'):
            CSVfile = ""
            q = app.prefs.get('logEnclose')
            sep = app.prefs.get('logSeparator')
            ext = app.prefs.get('logFextension')

            if app.showTimes:
                t = time.time()

            # create file contents
            for original, renamed in self.toRename:
                CSVfile += unicode(q + original[0] + q + sep + q + renamed[0] + q + '\n')

            if app.showTimes:
                print("Export file contents for %s items: %s" % (len(self.toRename), (time.time() - t)))

            # triggered by menu, allow to choose output file
            if event:
                dlg = wx.FileDialog(self, message=_(u"Save current items as ..."),
                                    defaultDir='', defaultFile=u'.%s' % ext,
                                    wildcard=_(u"CSV file (*.%s)") % ext + u'|*.%s' % ext,
                                    style=wx.SAVE | wx.OVERWRITE_PROMPT
                                    )
                if dlg.ShowModal() == wx.ID_OK:
                    # attempt to write file
                    utils.write_file(dlg.GetPath(), CSVfile)
                dlg.Destroy()
            # auto logging, generate file name
            else:
                file = time.strftime("undo_%Y-%m-%d_%Hh%Mm%Ss",
                                     time.localtime()) + '.' + app.prefs.get('logFextension')
                path = os.path.join(app.prefs.get(u'logLocation'), file)
                utils.write_file(path, CSVfile)
Beispiel #15
0
def build_pages(config):
    """
    Builds all the pages and writes them into the build directory.
    """
    site_navigation = nav.SiteNavigation(config['pages'], config['use_direcory_urls'])
    loader = jinja2.FileSystemLoader(config['theme_dir'])
    env = jinja2.Environment(loader=loader, trim_blocks=True)

    for page in site_navigation.walk_pages():
        # Read the input file
        input_path = os.path.join(config['docs_dir'], page.input_path)
        input_content = open(input_path, 'r').read().decode('utf-8')

        # Process the markdown text
        html_content, table_of_contents, meta = convert_markdown(input_content)
        html_content = post_process_html(html_content, site_navigation)

        context = get_context(
            page, html_content, site_navigation,
            table_of_contents, meta, config
        )

        # Allow 'template:' override in md source files.
        if 'template' in meta:
            template = env.get_template(meta['template'][0])
        else:
            template = env.get_template('base.html')

        # Render the template.
        output_content = template.render(context)

        # Write the output file.
        output_path = os.path.join(config['site_dir'], page.output_path)
        utils.write_file(output_content.encode('utf-8'), output_path)
Beispiel #16
0
 def post(self, request, app_name, env_name, app_path):
     action = request.data['action']
     if action == 'rename':
         env_path = _get_existent_env_path(app_path, env_name)
         new_env_name = request.data['name']
         check_name(new_env_name)
         new_env_path = _get_absent_env_path(app_path, new_env_name)
         stop_patsaks(get_id(request.dev_name, app_name, env_name))
         write_file(new_env_path, new_env_name)
         schema_prefix = get_id(request.dev_name, app_name) + ':'
         execute_sql(
             'SELECT ak.rename_schema(%s, %s)',
             (schema_prefix + env_name.lower(),
              schema_prefix + new_env_name.lower()))
         os.remove(env_path)
         return HttpResponse()
     if action == 'eval':
         request.lock.release()
         request.lock = None
         if env_name == _RELEASE_ENV_NAME:
             env_name = None
         response = send_to_ecilop(
             'EVAL ' + get_id(request.dev_name, app_name, env_name),
             request.data['expr'])
         assert response
         status = response[0]
         result = response[1:]
         assert status in ('E', 'F', 'S')
         if status == 'E':
             raise Error(result, status=NOT_FOUND)
         return {'ok': status == 'S', 'result': result}
     raise Error('Unknown action: "%s"' % action)
Beispiel #17
0
def like_recs():
    counter = 0
    try:
        while counter < 20:
            results = get_recs()
            liked = utils.read_file("liked")
            instagrams = utils.read_file("/Instagram/instagrams")
            for i in results:
                time.sleep(1)
                link = 'https://api.gotinder.com/like/{0}'.format(i["_id"])
                liking_header = {'X-Auth-Token': tinder_token,
                                 'Authorization': 'Token token="{0}"'.format(tinder_token).encode('ascii', 'ignore'),
                                 'firstPhotoID': ''+str(i['photos'][0]['id'])
                                 }
                likereq = requests.get(link, headers = liking_header)
                print i['name'] + ' - ' +  i['_id']
                print 'status: ' + str(likereq.status_code) + ' text: ' + str(likereq.text)
                liked += str(i['name']) + ' - ' + str(i['_id']) + ' - ' + str(i['photos'][0]['url']) + '\n'
                try:
                    if 'instagram' in i:
                      instagrams+= str(i['instagram']['username'] + " ")
                    else:
                        print "no instagram mate soz"
                except KeyError as ex:
                    print 'nah mate'
                #print "photoid " + str(i['photos'][0]['id'])
            utils.write_file("liked", liked)
            utils.write_file("/Instagram/instagrams", instagrams)
            counter += 1

    except Exception as ex:
        print "hit an exception i guess"
        print ex
Beispiel #18
0
def make_info(WD, DNA_file, RNA_file, prot_file, name):
    """Function to organize all the previous ones to write a .txt file for an organism.
    ARGS:
        WD (str) -- the working directory.
        DNA_file (str) -- the name of the fasta file of the total DNA.
        RNA_file (str) -- the name of the fasta file of only the proteins.
        prot_file (str) -- the name of the fasta file of the proteins.
        name (str) -- the name for the output .txt file.
    """
    
    letterProt = "ACDEFGHIKLMNPQRSTVWY*"
    letterDNA = "CGTAN"
    str_res = "DNA stats :\n"
    DNA_file_clean = clean(WD + DNA_file)
    resDNA = count(DNA_file_clean, letterDNA)
    str_res += stats(resDNA)
    RNA_file_clean = clean(WD + RNA_file)
    resRNA = count(RNA_file_clean, letterDNA)
    str_res += "RNA stats :\n"
    str_res += stats(resRNA)
    prot_file_clean = clean(WD + prot_file)
    resProt = count(prot_file_clean, letterProt)
    str_res += "Prot stats :\n"
    str_res += stats(resProt)
    tools.write_file(WD, name + "_stats.txt", str_res)
    print(str_res)
Beispiel #19
0
def create_gobject_files(classname, author, email, no_license=False):
    if no_license:
        license = ""
    else:
        license = templates.LICENSE_GPL

    attr = utils.Attributes()
    words = utils.split_camel_case(classname)

    attr.cc = classname
    attr.cc_prefix = "".join(words[1:])
    attr.lc = "_".join(words).lower()
    attr.uc = "_".join(words).upper()
    attr.uc_prefix = words[0].upper()
    attr.uc_suffix = "_".join(words[1:]).upper()
    attr.filename = "-".join(words).lower()
    attr.author = author
    attr.email = email

    filename_h = attr.filename + ".h"
    filename_c = attr.filename + ".c"

    content_h = templates.HEADER.format(license=license, **attr)
    content_c = templates.SOURCE.format(license=license, **attr)

    bundle = [(filename_h, content_h), (filename_c, content_c)]
    for filename, content in bundle:
        try:
            utils.write_file(filename, content, False)
        except utils.FileExistsError as e:
            print >> sys.stderr, format(e)
            print >> sys.stdout, utils.str_on_file_overwrite,
            answer = raw_input(_("[y|n]"))
            if answer in ["y", "Y", "yes", "YES", "Yes"]:
                utils.write_file(filename, content, True)
 def test_sign_metadata(self):
     with patch('sign_metadata.run',
                autospec=True,
                side_effect=self.fake_gpg) as smr:
         with temp_dir() as meta_dir:
             meta_file = join(meta_dir, 'index.json')
             write_file(meta_file, self.content)
             with NamedTemporaryFile() as temp_file:
                 with patch('sign_metadata.NamedTemporaryFile',
                            autospec=True,
                            return_value=temp_file) as ntf:
                     sign_metadata('*****@*****.**', meta_dir)
                     self.verify_signed_content(meta_dir)
     signed_file = meta_file.replace('.json', '.sjson')
     gpg_file = '{}.gpg'.format(meta_file)
     calls = [
         call([
             'gpg', '--no-tty', '--clearsign', '--default-key',
             '*****@*****.**', '-o', signed_file, temp_file.name
         ]),
         call([
             'gpg', '--no-tty', '--detach-sign', '--default-key',
             '*****@*****.**', '-o', gpg_file, meta_file
         ])
     ]
     self.assertEqual(smr.mock_calls, calls)
     ntf.assert_called_once_with()
Beispiel #21
0
    def test_precondition_failed(self):

        file1contents = "file 1 contents"
        
        headers = {}
        response = self.client.request("PUT", "/file.txt", headers, file1contents)
        (status, reason, body, headers) = response

        self.assertEqual(412, status)
        
        headers = {"If-None-Match": "*", "If-Match": "*"}
        response = self.client.request("PUT", "/file.txt", headers, file1contents)
        (status, reason, body, headers) = response

        self.assertEqual(412, status)
        
        headers = {"If-Match": "*"}
        response = self.client.request("PUT", "/file.txt", headers, file1contents)
        (status, reason, body, headers) = response

        self.assertEqual(412, status)
        
        utils.write_file("file.txt", file1contents)
        
        headers = {"If-None-Match": "*"}
        response = self.client.request("PUT", "/file.txt", headers, file1contents)
        (status, reason, body, headers) = response

        self.assertEqual(412, status)
Beispiel #22
0
def update_branch_now(branch):
    '''
    Task: update information of branch is working now
    :param branch: branch is switch
    :return: nothing
    '''
    write_file(['ref: refs/heads/%s\n' % (branch)], '.lgit/HEAD')
Beispiel #23
0
 def post(self, request, app_name, env_name, app_path):
     action = request.data['action']
     if action == 'rename':
         env_path = _get_existent_env_path(app_path, env_name)
         new_env_name = request.data['name']
         check_name(new_env_name)
         new_env_path = _get_absent_env_path(app_path, new_env_name)
         stop_patsaks(get_id(request.dev_name, app_name, env_name))
         write_file(new_env_path, new_env_name)
         schema_prefix = get_id(request.dev_name, app_name) + ':'
         execute_sql('SELECT ak.rename_schema(%s, %s)',
                     (schema_prefix + env_name.lower(),
                      schema_prefix + new_env_name.lower()))
         os.remove(env_path)
         return HttpResponse()
     if action == 'eval':
         request.lock.release()
         request.lock = None
         if env_name == _RELEASE_ENV_NAME:
             env_name = None
         response = send_to_ecilop(
             'EVAL ' + get_id(request.dev_name, app_name, env_name),
             request.data['expr'])
         assert response
         status = response[0]
         result = response[1:]
         assert status in ('E', 'F', 'S')
         if status == 'E':
             raise Error(result, status=NOT_FOUND)
         return {'ok': status == 'S', 'result': result}
     raise Error('Unknown action: "%s"' % action)
Beispiel #24
0
    def test_ifnonematch(self):

        file1contents = "file 1 contents"
        utils.write_file("file.txt", file1contents)

        response = self.client.request("GET", "/file.txt")
        (status, reason, body, headers) = response

        etag = utils.get_header("etag", headers)

        headers = {"If-None-Match": etag}
        response = self.client.request("GET", "/file.txt", headers)
        (status, reason, body, headers) = response
        
        self.assertEqual(status, 304)                

        file1contents = "file 1 contents - even more!"
        utils.write_file("file.txt", file1contents)

        headers = {"If-None-Match": etag}
        response = self.client.request("GET", "/file.txt", headers)
        (status, reason, body, headers) = response

        self.assertEqual(status, 200)
        self.assertEquals(file1contents, body)
Beispiel #25
0
def available_models(experiments, variables, frequencies, realms, cmor_tables, outfile=None):
    """Get a list of models with the required data.

    Args:
        experiments (list): list of experiments required
        variables (list): list of variables required
        frequencies (list): list of time frequency
        realms (list): CMIP5 realm
        cmor_tables (list): CMOR table
        outfile (Optional[str]): full file path to optionally write out data

    Returns:
        set

    """
    models = []
    iterator = itertools.product(experiments, variables, frequencies, realms, cmor_tables)
    for experiment, variable, frequency, realm, cmor_table in iterator:
        files = availability("*", experiment, frequency, realm, cmor_table, "r1i1p1", variable)
        models.append([f.split("/")[7] for f in files])

    # Get the common models in the lists of lists
    result = set(models[0])
    for item in models[1:]:
        result.intersection_update(item)

    # Optionally write the file
    if outfile != None:
        utils.write_file(sorted(result), outfile)

    return sorted(result)
    def save_data(self, final=False):
        """
        Saves updates the JSON file being generated at the of the data extraction or updates the current file
        :param: bool (optional), if selected it saves the final under and new name and moves it to another folder
        :return: null
        """
        city = self.city.replace(' ', '_').lower()
        company = self.company.replace(' ', '_').lower()
        old_filepath = self.last_saved_file
        filename = f'{unidecode(city)}_{unidecode(company)}_page_{self.page}.json'
        new_filepath = create_path(filename=filename, folder='../data_in_progress')
        data = self.all_results
        if not len(old_filepath):
            write_file(data, new_filepath)
            self.logger.info(f"First file created - page{self.page}")

        elif final:
            final_pathname = create_path(filename=filename, folder='../data_raw', final=True)
            rename_file(data, old_filepath, final_pathname)

        else:
            rename_file(data, old_filepath, new_filepath)
            self.logger.info(f"File updated - {self.page}")

        self.last_saved_file = new_filepath
Beispiel #27
0
def indexWiki():
	utils.write_file("wiki/index.html", 'Index <br/>')
	l=glob.glob("corpus/*")
	l.sort()
	for path in l:
		i=path[7:]
		utils.add_file("wiki/index.html", '<li><a href="'+i+'.html">'+i+'</a></li>')
Beispiel #28
0
 def check_rss(self):
     rss_cache_dir = config.cache_dir + os.sep + "rss"
     newest_item_written = False
     if config.rss_feeds:
         self.rootlog.debug("rss feeds found:" + str(config.rss_feeds))
         for name, feed in config.rss_feeds.items():
             last_cache_item = utils.read_file(rss_cache_dir + os.sep + name)
             f = feedparser.parse(feed)
             self.rootlog.debug(str(f["channel"]["title"] + " feed fetched"))
             if last_cache_item != None:
                 self.rootlog.debug("last_cache_item not None: " + last_cache_item)
                 for i in f["items"]:
                     if str(last_cache_item.strip()) == str(i["date"].strip()):
                         self.rootlog.debug("item found, aborting")
                         break
                     else:
                         if newest_item_written == False:
                             utils.write_file(rss_cache_dir + os.sep + name, i["date"].strip())
                             newest_item_written = True
                         # write date of this feed into file (not elegant)
                         text2chat = "".join(["[", name, "] ", i["title"], " ", i["link"]])
                         self.rootlog.debug(text2chat)
                         self.send(self.room_name, text2chat)
             else:
                 self.rootlog.debug("last_cache_item is None")
                 utils.write_file(rss_cache_dir + os.sep + name, f["items"][0]["date"])
Beispiel #29
0
def genereHtml(d, i):
	s = utils.read_file("corpus/" + i)
	for j in d[i]:
		pattern = re.compile(j + ' ' , re.I)
		s = pattern.sub('<a href="keywords/' + j + '.html"/>' + j + "</a> ", s)
		# print j, "\n", s.encode('utf-8'), "\n\n"
	utils.write_file("wiki/" + i + ".html", s)
Beispiel #30
0
def like_recs():
    counter = 0
    try:
        while counter < 20:
            results = get_recs()
            liked = utils.read_file("liked")
            instagrams = utils.read_file("/Instagram/instagrams")
            for i in results:
                time.sleep(1)
                link = 'https://api.gotinder.com/like/{0}'.format(i["_id"])
                liking_header = {'X-Auth-Token': tinder_token,
                                 'Authorization': 'Token token="{0}"'.format(tinder_token).encode('ascii', 'ignore'),
                                 'firstPhotoID': ''+str(i['photos'][0]['id'])
                                 }
                likereq = requests.get(link, headers = liking_header)
                liked += str(i['name']) + ' - ' + str(i['_id']) + ' - ' + str(i['photos'][0]['url']) + '\n'
                try:
                    if 'instagram' in i:
                      instagrams+= str(i['instagram']['username'] + " ")
                    else:
                        print "no instagram mate soz"
                except KeyError as ex:
                    print 'nah mate'
            utils.write_file("liked", liked)
            counter += 1

    except Exception as ex:
        print "hit an exception i guess"
        print ex
Beispiel #31
0
    def download_images_from_webpage(self, href, out_path=None, img_name=None):
        ret = False
        print href
        sub_html = utils.wget(href)

        if not sub_html:
            print 'WARNING: request to %s failed.' % sub_html
        else:
            ret = True
            # get the jpg image in the page
            #image_urls = re.findall(ur'<img [^>]*src="([^"]*?\.jpg)"[^>]*>', sub_html)
            #print sub_html
            image_urls = re.findall(ur'<img [^>]*src\s*=\s*"([^"]*?)"[^>]*?>', sub_html)
            print image_urls
            for image_url in image_urls:
                if not image_url.startswith('/'):
                    image_url = re.sub(ur'/[^/]*$', '/' + image_url, href)
                else:
                    image_url = re.sub(ur'^(.*?//.*?/).*$', r'\1' + image_url, href)
                print image_url

                # get the image
                image = utils.wget(image_url)

                if not image:
                    print 'WARNING: request to %s failed.' % image_url
                else:
                    # save it
                    image_path = os.path.join(out_path, img_name or re.sub(ur'^.*/', '', image_url)) + ''
                    print image_path
                    utils.write_file(image_path, image)

        return ret
Beispiel #32
0
    def test_mount_fstab(self):
        """ Test mounting and unmounting devices in /etc/fstab """
        # this test will change /etc/fstab, we want to revert the changes when it finishes
        fstab = utils.read_file("/etc/fstab")
        self.addCleanup(utils.write_file, "/etc/fstab", fstab)

        succ = BlockDev.fs_vfat_mkfs(self.loop_dev, None)
        self.assertTrue(succ)

        tmp = tempfile.mkdtemp(prefix="libblockdev.",
                               suffix="mount_fstab_test")
        self.addCleanup(os.rmdir, tmp)

        utils.write_file("/etc/fstab",
                         "%s %s vfat defaults 0 0\n" % (self.loop_dev, tmp))

        # try to mount and unmount using the device
        self.addCleanup(utils.umount, self.loop_dev)
        succ = BlockDev.fs_mount(device=self.loop_dev)
        self.assertTrue(succ)
        self.assertTrue(os.path.ismount(tmp))

        succ = BlockDev.fs_unmount(self.loop_dev)
        self.assertTrue(succ)
        self.assertFalse(os.path.ismount(tmp))

        # try to mount and unmount just using the mountpoint
        self.addCleanup(utils.umount, self.loop_dev)
        succ = BlockDev.fs_mount(mountpoint=tmp)
        self.assertTrue(succ)
        self.assertTrue(os.path.ismount(tmp))

        succ = BlockDev.fs_unmount(tmp)
        self.assertTrue(succ)
        self.assertFalse(os.path.ismount(tmp))
Beispiel #33
0
    def test_two_files(self):
        
        file1contents = "file 1 contents"
        file2contents = "file 2 contents contents"
        utils.write_file("file1.txt", file1contents)
        utils.write_file("file2.txt", file2contents)

        response = self.client.request("GET", "/")

        (status, reason, body, headers) = response

        self.assertEqual(200, status)

        body = eval(body)
        body = body["dir"]

        self.assertTrue(isinstance(body,list))
        self.assertEqual(2, len(body))

        self.assertEqual("file1.txt",        body[0]["name"])
        self.assertEqual(False,              body[0]["is_dir"])
        self.assertEqual(len(file1contents), body[0]["size"])

        self.assertEqual("file2.txt",        body[1]["name"])
        self.assertEqual(False,              body[1]["is_dir"])
        self.assertEqual(len(file2contents), body[1]["size"])
def save_enrichment_set():
     lib = gp.get_library_name('Human')
     lib = lib[53]

     files = [("gcn-hom-hom", "enrich/gcn-hom-hom.csv"),
              ("gcn-hom-onto", "enrich/gcn-hom-onto.csv"),
              ("gcn-onto-onto", "enrich/gcn-onto-onto.csv"),
              ("gae-hom-hom", "enrich/gae-hom-hom.csv"),
              ("gae-hom-onto", "enrich/gae-hom-onto.csv"),
              ("gae-onto-onto", "enrich/gae-onto-onto.csv")]

     enrich_set = {}
     for key, file in files:
          print(file)
          cluster_data = read_file(file)
          for i in cluster_data:
              print(len(cluster_data[i][2]))
              try:
                  enr = gp.enrichr(gene_list=list(cluster_data[i][2])[:1000], gene_sets=lib, organism='Human', cutoff=0.05).results
                  name = key + "-" + str(i)
                  term = enr['Term'].to_list()
                  enrich_set[name] = term
                  # print(i)
                  print(enr)
              except:
                   pass


     write_file("enrich-cluster/full_result_dic.csv", enrich_set)
Beispiel #35
0
 def extract_pdf(self, file_info):
     file_info['extraction'] = 'tesseract'
     file_info['text'] = utils.extract_text_from_pdf(
         file_info['file'], True)
     if self.has_save():
         utils.write_file(f'data/out/{file_info["file"].name}.txt',
                          file_info['text'])
Beispiel #36
0
    def save_items_as_text(self, event):
        """Export current selection as CSV file."""
        if hasattr(self, 'toRename'):
            CSVfile = ""
            q = app.prefs.get('logEnclose')
            sep = app.prefs.get('logSeparator')
            ext = app.prefs.get('logFextension')

            if app.showTimes:
                t = time.time()

            # create file contents
            for original, renamed in self.toRename:
                CSVfile += unicode(q + original[0] + q + sep + q + renamed[0] + q + '\n')

            if app.showTimes:
                print("Export file contents for %s items: %s" % (len(self.toRename), (time.time() - t)))

            # triggered by menu, allow to choose output file
            if event:
                dlg = wx.FileDialog(self, message=_(u"Save current items as ..."),
                                    defaultDir='', defaultFile=u'.%s' % ext,
                                    wildcard=_(u"CSV file (*.%s)") % ext + u'|*.%s' % ext,
                                    style=wx.SAVE | wx.OVERWRITE_PROMPT
                                    )
                if dlg.ShowModal() == wx.ID_OK:
                    # attempt to write file
                    utils.write_file(dlg.GetPath(), CSVfile)
                dlg.Destroy()
            # auto logging, generate file name
            else:
                file = time.strftime("undo_%Y-%m-%d_%Hh%Mm%Ss",
                                     time.localtime()) + '.' + app.prefs.get('logFextension')
                path = os.path.join(app.prefs.get(u'logLocation'), file)
                utils.write_file(path, CSVfile)
Beispiel #37
0
    def test_write_read_files(self):
        '''test_write_read_files will test the functions write_file and read_file
        '''
        print("Testing utils.write_file...")
        from utils import write_file
        import json
        tmpfile = tempfile.mkstemp()[1]
        os.remove(tmpfile)
        write_file(tmpfile, "hello!")
        self.assertTrue(os.path.exists(tmpfile))

        print("Testing utils.read_file...")
        from utils import read_file
        content = read_file(tmpfile)[0]
        self.assertEqual("hello!", content)

        from utils import write_json
        print("Testing utils.write_json...")
        print("Case 1: Providing bad json")
        bad_json = {"Wakkawakkawakka'}": [{True}, "2", 3]}
        tmpfile = tempfile.mkstemp()[1]
        os.remove(tmpfile)
        with self.assertRaises(TypeError) as cm:
            write_json(bad_json, tmpfile)

        print("Case 2: Providing good json")
        good_json = {"Wakkawakkawakka": [True, "2", 3]}
        tmpfile = tempfile.mkstemp()[1]
        os.remove(tmpfile)
        write_json(good_json, tmpfile)
        content = json.load(open(tmpfile, 'r'))
        self.assertTrue(isinstance(content, dict))
        self.assertTrue("Wakkawakkawakka" in content)
Beispiel #38
0
def auto_stalk():
    folder = "fbUsers/"
    sep = "/"
    extension = ".jpg"
    counter = 0
    userImgCount = 0
    smallist = []
    smallist.append(getFriendIDs()[1])
    for friend in getFriendIDs():
        jsonUserData = getUserData(friend)
        name = jsonUserData['results']['name']
        images = []
        userImgCount = 0
        folderName = folder + name + str(counter)
        for i in jsonUserData['results']['photos']:
            image = i["url"]
            if not os.path.exists(folderName):
                os.makedirs(folderName)
            path = folder + name + str(counter) + sep + str(
                userImgCount) + extension
            urllib.urlretrieve(str(image), path)
            userImgCount += 1
        utils.write_file(folderName + sep + "bio.txt",
                         jsonUserData['results']['bio'].encode('utf-8'))
        counter += 1
Beispiel #39
0
def create_stash(modified_file, time_ns):
    if _is_valid_stash(modified_file):
        create_object(modified_file)
        data_stash = []
        for file in modified_file:
            data_stash.append("%s %s\n" % (hash_sha1(file), file))
        write_file(data_stash, '.lgit/refs/stash/%s' % (time_ns))
Beispiel #40
0
def markup(text_path, ann_path, html_path):
    """Markup text in file `text_path` with annotations in file `ann_path` as HTML and write to
        file `html_path`
    """
    text, ann = get_entities_from_brat(text_path, ann_path)
    # print('&' * 80)
    print(len(ann), text_path, html_path)
    if not ann:
        return
    # for i, a in enumerate(ann[:5]):
    #     s = text[a['start']:a['end']]
    #     # print('%3d: %10s %s %s' % (i, a['type'], a['text'], s))
    gaps = [text[a['end']:b['start']] for a, b in zip(ann[:-1], ann[1:])]
    gaps = [text[:ann[0]['start']]] + gaps + [text[ann[-1]['end']:]]
    gaps = [abridge(g) for g in gaps]
    words = ['<b>%s</b> [%s] ' % (a['text'], a['type']) for a in ann]
    # for i, (g, w) in enumerate(list(zip(gaps, words))[:5]):
    #     print('%3d: "%s" -- "%s"' % (i, g, w))
    # print(text[:ann[5]['end']])

    gw = [g + w for g, w in zip(gaps, words)]
    gw.append(gaps[-1])
    body = '<body>%s</body>' % ''.join(gw)
    marked = '<html>%s</html>' % body

    write_file(html_path, marked)
Beispiel #41
0
def trigger_process_binary(input_file, output_file, is_predict=False):
    rows = open(input_file, encoding='utf-8').read().splitlines()
    results = []
    for row in rows:
        if len(row) == 1: print(row)
        row = json.loads(row)
        start_labels = ['O'] * len(row["text"])
        end_labels = ['O'] * len(row["text"])
        if is_predict:
            results.append({
                "id": row["id"],
                "tokens": list(row["text"]),
                "start_labels": start_labels,
                "end_labels": end_labels
            })
            continue
        for event in row["event_list"]:
            trigger = event["trigger"]
            event_type = event["event_type"]
            trigger_start_index = event["trigger_start_index"]
            trigger_end_index = trigger_start_index + len(trigger) - 1
            start_labels[trigger_start_index] = event_type
            end_labels[trigger_end_index] = event_type
        results.append({
            "id": row["id"],
            "tokens": list(row["text"]),
            "start_labels": start_labels,
            "end_labels": end_labels
        })
    write_file(results, output_file)
Beispiel #42
0
def trigger_process_bio(input_file, output_file, is_predict=False):
    rows = open(input_file, encoding='utf-8').read().splitlines()
    results = []
    for row in rows:
        if len(row) == 1: print(row)
        row = json.loads(row)
        labels = ['O'] * len(row["text"])
        if is_predict:
            results.append({
                "id": row["id"],
                "tokens": list(row["text"]),
                "labels": labels
            })
            continue
        for event in row["event_list"]:
            trigger = event["trigger"]
            event_type = event["event_type"]
            trigger_start_index = event["trigger_start_index"]
            labels[trigger_start_index] = "B-{}".format(event_type)
            for i in range(1, len(trigger)):
                labels[trigger_start_index + i] = "I-{}".format(event_type)
        results.append({
            "id": row["id"],
            "tokens": list(row["text"]),
            "labels": labels
        })
    write_file(results, output_file)
Beispiel #43
0
def convert_bio_to_segment(input_file, output_file):
    lines = open(input_file, encoding='utf-8').read().splitlines()
    res = []
    for line in lines:
        line = json.loads(line)
        text = line["text"]
        labels = line["labels"]
        tokens = list(text)
        if len(labels) != len(tokens):
            print(len(labels), len(tokens))

        triggers = extract_result(text, labels)
        if len(triggers) == 0:
            print("detect no trigger")
        for trigger in triggers:
            event_type = trigger["type"]
            segment_ids = [0] * (len(tokens))
            trigger_start_index = trigger['start']
            trigger_end_index = trigger['start'] + len(trigger['text'])
            for i in range(trigger_start_index, trigger_end_index):
                segment_ids[i] = 1
            start_labels = ['O'] * (len(tokens))
            end_labels = ['O'] * (len(tokens))

            cur_line = {}
            cur_line["id"] = line["id"]
            cur_line["tokens"] = tokens
            cur_line["event_type"] = event_type
            cur_line["segment_ids"] = segment_ids
            cur_line["start_labels"] = start_labels
            cur_line["end_labels"] = end_labels
            res.append(cur_line)
    write_file(res, output_file)
Beispiel #44
0
def index_output_bio_arg(test_file, prediction_file, output_file):
    tests = open(test_file, encoding='utf-8').read().splitlines()
    predictions = open(prediction_file, encoding='utf-8').read().splitlines()
    results = []
    index = 0
    max_length = 256 - 2
    for test, prediction in zip(tests, predictions):
        index += 1
        test = json.loads(test)
        tokens = test.pop('tokens')
        test['text'] = ''.join(tokens)

        prediction = json.loads(prediction)
        labels = prediction["labels"]
        if len(labels) != len(tokens) and len(labels) != max_length:
            print(labels, tokens)
            print(len(labels), len(tokens), index)
            break

        args = extract_result(test["text"], labels)
        arguments = []
        for arg in args:
            argument = {}
            argument["role"] = arg["type"]
            argument["argument_start_index"] = arg['start']
            argument["argument"] = ''.join(arg['text'])
            arguments.append(argument)

        test.pop("labels")
        test["arguments"] = arguments
        results.append(test)
    write_file(results, output_file)
Beispiel #45
0
def role_process_filter(event_class,
                        input_file,
                        output_file,
                        is_predict=False):
    rows = open(input_file, encoding='utf-8').read().splitlines()
    results = []
    for row in rows:
        if len(row) == 1: print(row)
        row = json.loads(row)
        labels = ['O'] * len(row["text"])
        if is_predict: continue
        flag = False
        for event in row["event_list"]:
            event_type = event["event_type"]
            if event_class != event["class"]:
                continue
            flag = True
            for arg in event["arguments"]:
                role = arg['role']
                argument = arg['argument']
                argument_start_index = arg["argument_start_index"]
                labels[argument_start_index] = "B-{}".format(role)
                for i in range(1, len(argument)):
                    labels[argument_start_index + i] = "I-{}".format(role)
        if not flag: continue
        results.append({
            "id": row["id"],
            "tokens": list(row["text"]),
            "labels": labels
        })
    write_file(results, output_file)
Beispiel #46
0
def trigger_classify_process(input_file, output_file, is_predict=False):
    rows = open(input_file, encoding='utf-8').read().splitlines()
    results = []
    count = 0
    for row in rows:
        if len(row) == 1: print(row)
        row = json.loads(row)
        count += 1
        if "id" not in row:
            row["id"] = count
        labels = []
        if is_predict:
            results.append({
                "id": row["id"],
                "text": row["text"],
                "labels": labels
            })
            continue
        for event in row["event_list"]:
            event_type = event["event_type"]
            labels.append(event_type)
        labels = list(set(labels))
        results.append({
            "id": row["id"],
            "text": row["text"],
            "labels": labels
        })
    write_file(results, output_file)
Beispiel #47
0
def evaluation(grammars_test, pcky,  mode) :

	if mode == 'test' :
		print("...........Début de l'évaluation........")
		cfg_test = get_all_trees(grammars_test)
		corpus_test = [' '.join(tree.leaves()) for tree in cfg_test] 
		predictions_test = []
		for i, sentence in enumerate(corpus_test) :
			predictions_test.append(pcky.induce_CYK(sentence, show=False))
		status_test, predictions_test_ = [x[1] for x in predictions_test], [x[0] for x in predictions_test]
		print('Precision on test :', compute_precision(predictions_test, grammars_test))
		write_file(predictions_test_, corpus_test)
		print("...........Fin de l'évaluation........")
	elif mode == 'eval' :
		print('....................Début.................')
		print("Pour sortir, entrez : exit")
		while True :
			phrase_to_parse = str(input(">>>>> Veuillez entrer une phrase :"))
			if phrase_to_parse == 'exit' :
				break
			prediction, status = pcky.induce_CYK(phrase_to_parse, show=True)
			if status == 0:
				print("La phrase n'a pas pu être parsée")
			else :
				print(prediction)
			print('....................Fin...................')
def do_parse(opt, filename):

    ls = subprocess.Popen([djvutoxml, filename],
                          stdout=subprocess.PIPE,
                          preexec_fn=setrlimits,
                          close_fds=True)

    page_nr = 1
    for event, elem in etree.iterparse(XmlFile(ls.stdout)):
        if elem.tag.lower() == 'object':
            page = OcrPage()
            if not opt.silent:
                print >> sys.stderr, page_nr, '\r',
            page.start_page(elem)
            parse_page(page, elem, page_nr)
            page.end_page(elem)

            filename = opt.out_dir + 'page_%04d.html' % page_nr

            if opt.compress:
                text = page.get_hocr_html().encode('utf-8')
                utils.compress_file_data(filename, text, opt.compress)
            else:
                utils.write_file(filename, text)

            elem.clear()
            page_nr += 1

    if not opt.silent:
        print >> sys.stderr

    ls.wait()
    return ls.returncode
Beispiel #49
0
    def load_text(self, p, variant):
        filename = self.cache_dir + self.lang + '/' + str(p.latestRevision())

        if not os.path.exists(filename):
            html = self.get_html(p)
            new_html = common_html.get_head(u'TITLE') + u"\n<body>"  + html + u'\n</body>\n</html>'

            new_html = new_html.replace(u'&nbsp;', u' ')

            root = etree.fromstring(new_html.encode('utf-8'))
            exclude = set()
            html_id = self.config[variant]['modernize_div_id']

            for it in root.findall(".//{http://www.w3.org/1999/xhtml}div[@id='%s']" % html_id):
                exclude.add(it)

            text = self.get_etree_text(root, exclude)
            for d in self.config[variant]['transform']:
                text = re.sub(d[0], d[1], text)

            utils.write_file(filename, text)
        else:
            text = utils.read_file(filename)

        return text
def set_ini_properties(properties):
  aux = []
  for p in properties:
    line = parse_ini_property(p) + '=' + str(properties[p]) + '\r\n'
    aux.append(line)
  if len(aux) > 0:
    utils.write_file(config.get('ini_path')+config.get('emulator')+'.ini', aux)
Beispiel #51
0
def unwrap(filename, key=None):
    '''
    dict unwrap(string filename, string key = None)
    filename is a JSON file (signed or not)
    If key, verifies JSON file
    Returns data from filename or {} if sign is not verificable
    '''

    if key:
        _content = open(filename, 'rb').read()
        _n = len(_content)
        utils.write_file('%s.sign' % filename, _content[_n - 256:_n])
        utils.write_file(filename, _content[0:_n - 256])

    try:
        _data = json.loads(open(filename, 'rb').read())
    except ValueError:
        print(filename)
        return {}  # no response in JSON format

    if not key:
        return _data

    if not verify(filename, key):
        return {
            'errmfs': {
                'code': server_errors.SIGN_NOT_OK,
                'info': ''
            }
        }  # Sign not OK

    os.remove('%s.sign' % filename)  # remove temp file (verify function)
    return _data
Beispiel #52
0
def dockerfile_to_singularity(dockerfile_path, output_dir=None):
    '''dockerfile_to_singularity will return a Singularity build file based on
    a provided Dockerfile. If output directory is not specified, the string
    will be returned. Otherwise, a file called Singularity will be written to 
    output_dir
    :param dockerfile_path: the path to the Dockerfile
    :param output_dir: the output directory to write the Singularity file to
    '''
    if os.path.basename(dockerfile_path) == "Dockerfile":
        spec = read_file(dockerfile_path)
        # Use a common mapping
        mapping = get_mapping()

        # Put into dict of keys (section titles) and list of commands (values)
        sections = organize_sections(lines=spec, mapping=mapping)

        # We have to, by default, add the Docker bootstrap
        sections["bootstrap"] = ["docker"]

        # Put into one string based on "order" variable in mapping
        build_file = print_sections(sections=sections, mapping=mapping)
        if output_dir != None:
            write_file("%s/Singularity" % (output_dir), build_file)
            print("Singularity spec written to %s" % (output_dir))
        return build_file

    # If we make it here, something didn't work
    logger.error("Could not find %s, exiting.", dockerfile_path)
    return sys.exit(1)
    def create_file(self, row_count=None):
        """
        Creates a new large test file, passing in a row count to 
        set a maximum number of rows. 
        """
        self.ts_df, self.lar_df = utils.read_data_file(path=self.source_filepath, 
            data_file=self.source_filename)

        #Stores the second column of the TS data as "bank_name." 
        self.bank_name = self.ts_df.iloc[0][1]
        
        #Stores the fifteenth column of the TS data as "lei."    
        self.lei = self.ts_df.iloc[0][14]

        #Changes the TS row to the number of rows specified. 
        self.ts_df['lar_entries'] = str(row_count)

        #Changes each LAR row to the LEI specified in the TS row. 
        self.lar_df["lei"] = self.lei

        #Creates a dataframe of LAR with the number of rows specified.
        new_lar_df = self.new_lar_rows(row_count=row_count)

        #Writes file to the output filename and path. 
        utils.write_file(path=self.output_filepath, ts_input=self.ts_df, 
            lar_input=new_lar_df, name=self.output_filename)
        
        #Prints out a statement with the number of rows created, 
        #and the location of the new file.
        statement = (str("{:,}".format(row_count)) + 
            " Row File Created for " + str(self.bank_name) + 
            " File Path: " + str(self.output_filepath+self.output_filename))
        
        print(statement)
Beispiel #54
0
 def __init__(self):
     self.cores = multiprocessing.cpu_count()
     self.warmup = 20
     self.parse_arguments()
     self.parse_config()
     self.p = angr.Project(
         self.args[0],
         exclude_sim_procedures_list=self.exclude_sim_procedures_list,
         load_options={'auto_load_libs': self.simlibs})
     self.drun = self.drun.replace("@", str(self.p.arch.bits))
     self.dtrace = self.dtrace.replace("@", str(self.p.arch.bits))
     self.sym_file_name = "symbolic_file"
     self.map = dict()  # mapping addr to random values to encode edges
     self.bitmap = dict()  # map of all encoded edges (of multiple inputs)
     # same as bitmap but for edges that are never explored (artifacts from
     # concrete trace)
     self.blacklist = []
     self.setup_directories()
     self.new = utils.Cache("new", self.temp_dir)
     self.delayed = utils.Cache("delayed", self.temp_dir)
     txt = "Fuzzing " + self.cmd + "\n"
     txt += "Hybrid: " + str(self.hybrid)
     utils.write_file(self.temp_dir + "stats", txt, "w")
     print("[+] Setup complete\n[+] Identified " + \
         str(self.cores) + " usable cores")
     print("[!] Please use echo core > /proc/sys/kernel/core_pattern before using this program")
     self.simfiles = []
     if self.verbose:
         self.msg = utils.msg
     else:
         self.msg = utils.dummy
Beispiel #55
0
def do_parse(opt, filename):

    ls = subprocess.Popen([ djvutoxml, filename], stdout=subprocess.PIPE, preexec_fn=setrlimits, close_fds = True)

    page_nr = 1
    for event, elem in etree.iterparse(XmlFile(ls.stdout)):
        if elem.tag.lower() == 'object':
            page = OcrPage()
            if not opt.silent:
                print >> sys.stderr, page_nr, '\r',
            page.start_page(elem)
            parse_page(page, elem, page_nr)
            page.end_page(elem)

            filename = opt.out_dir + 'page_%04d.html' % page_nr

            if opt.compress:
                text = page.get_hocr_html().encode('utf-8')
                utils.compress_file_data(filename, text, opt.compress)
            else:
                utils.write_file(filename, text)

            elem.clear()
            page_nr += 1

    if not opt.silent:
        print >> sys.stderr

    ls.wait()
    return ls.returncode
 def test_get_file(self):
     """Test get_file function"""
     with utils.TempDir() as tmpdir:
         fname = os.path.join(tmpdir, 'foo')
         utils.write_file(fname, 'foobar')
         fh, fn = check_standards.get_file(fname)
         self.assertEqual(fn, fname)
         self.assertEqual(fh.read(), 'foobar')
Beispiel #57
0
 def test_pingdom(self):
     self.get('pingdom/')
     GroupedMessage.objects.create(message='error')
     self.assertEqual(
         self.get('pingdom/', status=INTERNAL_SERVER_ERROR), 'chatlanian')
     write_file(ROOT.log, 'error')
     self.assertEqual(
         self.get('pingdom/', status=INTERNAL_SERVER_ERROR), 'patsak')
Beispiel #58
0
 def test_update_file_content(self):
     with NamedTemporaryFile() as meta_file:
         with NamedTemporaryFile() as dst_file:
             write_file(meta_file.name, self.content)
             update_file_content(meta_file.name, dst_file.name)
             with open(dst_file.name) as dst:
                 dst_file_content = dst.read()
             self.assertEqual(dst_file_content, self.expected)