예제 #1
0
def brickfind_crawl(brick, args):
    if brick.endswith("/"):
        brick = brick[0:len(brick)-1]

    working_dir = os.path.dirname(args.outfile)
    mkdirp(working_dir, exit_on_err=True, logger=logger)
    create_file(args.outfile, exit_on_err=True, logger=logger)

    with open(args.outfile, "a+") as fout:
        brick_path_len = len(brick)

        def output_callback(path, filter_result, is_dir):
            path = path.strip()
            path = path[brick_path_len+1:]

            if args.type == "both":
                output_write(fout, path, args.output_prefix,
                             encode=(not args.no_encode), tag=args.tag,
                             field_separator=args.field_separator)
            else:
                if (is_dir and args.type == "d") or (
                    (not is_dir) and args.type == "f"):
                    output_write(fout, path, args.output_prefix,
                    encode=(not args.no_encode), tag=args.tag,
                    field_separator=args.field_separator)

        ignore_dirs = [os.path.join(brick, dirname)
                       for dirname in
                       conf.get_opt("brick_ignore_dirs").split(",")]

        find(brick, callback_func=output_callback,
             ignore_dirs=ignore_dirs)

        fout.flush()
        os.fsync(fout.fileno())
예제 #2
0
def brickfind_crawl(brick, args):
    if brick.endswith("/"):
        brick = brick[0:len(brick)-1]

    working_dir = os.path.dirname(args.outfile)
    mkdirp(working_dir, exit_on_err=True, logger=logger)
    create_file(args.outfile, exit_on_err=True, logger=logger)

    with open(args.outfile, "a+") as fout:
        brick_path_len = len(brick)

        def output_callback(path, filter_result):
            path = path.strip()
            path = path[brick_path_len+1:]
            output_write(fout, path, args.output_prefix, encode=True)

        ignore_dirs = [os.path.join(brick, dirname)
                       for dirname in
                       conf.get_opt("brick_ignore_dirs").split(",")]

        find(brick, callback_func=output_callback,
             ignore_dirs=ignore_dirs)

        fout.flush()
        os.fsync(fout.fileno())
예제 #3
0
def prefix(asn):
    # ASes = []
    # f = open('ASes', 'r')
    # for line in f.readlines():
    #	 ASes.append(line.rstrip())

    # for asn in ASes:
    # asn = asn.rstrip('\n')
    as_base = 'AS'
    ASN = as_base + asn
    url_base = 'http://ipinfo.io/'

    sys.stdout.write("[" + ASN + "] " + "Getting prefixes...")
    sys.stdout.flush()

    filename = "Prefix/" + ASN
    create_file(filename)
    output = open(filename, "w")

    page = requests.get(url_base + ASN)
    html_doc = page.content
    soup = BeautifulSoup(html_doc, 'html.parser')
    for link in soup.find_all('a'):
        if asn in link.get('href'):
            auxstring = '/' + as_base + asn + '/'
            line = re.sub(auxstring, '', link.get('href'))
            printstring = line + '\n'
            if 'AS' not in printstring:
                output.write(printstring)

    sys.stdout.write(" Done!\n")
    sys.stdout.write("[" + ASN + "] " + "Prefixes are in file: ./" + filename +
                     "\n")
예제 #4
0
def brickfind_crawl(brick, args):
    if brick.endswith("/"):
        brick = brick[0:len(brick)-1]

    working_dir = os.path.dirname(args.outfile)
    mkdirp(working_dir, exit_on_err=True, logger=logger)
    create_file(args.outfile, exit_on_err=True, logger=logger)

    with open(args.outfile, "a+") as fout:
        brick_path_len = len(brick)

        def output_callback(path, filter_result):
            path = path.strip()
            path = path[brick_path_len+1:]
            output_write(fout, path, args.output_prefix,
                         encode=(not args.no_encode), tag=args.tag)

        ignore_dirs = [os.path.join(brick, dirname)
                       for dirname in
                       conf.get_opt("brick_ignore_dirs").split(",")]

        find(brick, callback_func=output_callback,
             ignore_dirs=ignore_dirs)

        fout.flush()
        os.fsync(fout.fileno())
예제 #5
0
def changelog_crawl(brick, end, args):
    """
    Init function, prepares working dir and calls Changelog query
    """
    if brick.endswith("/"):
        brick = brick[0:len(brick)-1]

    # WORKING_DIR/BRICKHASH/OUTFILE
    working_dir = os.path.dirname(args.outfile)
    brickhash = hashlib.sha1(brick)
    brickhash = str(brickhash.hexdigest())
    working_dir = os.path.join(working_dir, brickhash)

    mkdirp(working_dir, exit_on_err=True, logger=logger)
    create_file(args.outfile, exit_on_err=True, logger=logger)
    create_file(args.outfile + ".gfids", exit_on_err=True, logger=logger)

    log_file = os.path.join(conf.get_opt("log_dir"),
                            args.session,
                            args.volume,
                            "changelog.%s.log" % brickhash)

    logger.info("%s Started Changelog Crawl. Start: %s, End: %s"
                % (brick, args.start, end))
    get_changes(brick, working_dir, log_file, end, args)
예제 #6
0
파일: compile.py 프로젝트: jmcruz1983/UBS
 def __init__(self, *args, **kwargs):
     # Setup
     bs = BuildSetup()
     self.os_environ = bs.get_os_environ()
     self.author = bs.get_author()
     self.java_home = bs.get_java_home()
     self.android_home = bs.get_android_home()
     self.aapt_bin = bs.get_aapt_bin()
     self.dx_bin = bs.get_dx_bin()
     self.javac_bin = bs.get_javac_bin()
     self.android_jar = bs.get_android_jar()
     # Local setup
     if not kwargs.has_key('name'):
         log.warn('No project name given!')
         return
     self.project_name = kwargs['name']
     self.project_path = os.path.join(bs.get_workspace_path(),
                                      self.project_name)
     self.package_name = 'com.%s.%s'%(self.author,
                                      self.project_name)
     self.app_manifest = os.path.join(self.project_path,
                                      'AndroidManifest.xml')
     self.meta_info_path = os.path.join(self.project_path,
                                        'meta.info')
     create_file(self.meta_info_path)
예제 #7
0
def createMocFile(data):
    columns = json.loads(data.get("columns"))
    dataBase = data.get("dataBase")
    fields = []
    enums = {}
    for column in columns:
        columnName = column.get("column_name")
        columnType = column.get("data_type")
        tempField = {"dataType": columnType, "columnName": columnName}
        if columnType in ("string", "date", "datetime"):
            tempField["length"] = column.get("character_maximum_length")
        elif columnType == "enum":
            tempField["enumName"] = columnName + "Enum"
            enums[columnName + "Enum"] = getEnumDict(column.get("enum_code"),
                                                     dataBase)
        fields.append(tempField)
    createData = {
        "fields": fields,
        "enums": enums,
        "mocName": data.get("mocName"),
        "table": data.get("table")
    }
    utils.create_file(
        data.get("packageName") + "/moc",
        render_template('/generate/moc.xml', data=createData),
        data.get("mocName") + ".xml")
예제 #8
0
파일: views.py 프로젝트: pwzgorilla/nae-web
def createFile(request):
    if request.method == 'POST':
        file_name=request.POST.get("filename","test")
        #save file to localhost repo
        repo_path=utils.get_repo_path(file_name)
        rev_control=utils.MercurialRevisionControl()
        rev_control.create_repo(repo_path)

        auth_user=request.session['auth_username']
        rev_control.hg_rc(repo_path,'ui','username',auth_user)

        file_content=request.POST.get("content","")
        utils.create_file(repo_path,file_content)

        rev_control.add(repo_path)
        rev_control.commit(repo_path)
        #utils.write_file(dockerfile,request.POST.get("content",""))
        #revision_control=utils.MercurialRevisionControl()
        #revision_control.create_repo(
        #save file to db
        file_path=utils.get_file_path(file_name)
        file_size=utils.get_file_size(file_path)
        created=utils.get_current_datatime()
        created_by=request.session.get("auth_username")
        modified=created
        modified_by=created_by 
        path=file_path
        data=DockerFiles(Name=file_name,Size=file_size,Created=created,CreatedBy=created_by,Modified=modified,ModifiedBy=modified_by,Path=path)
        data.save()
    return HttpResponseRedirect('/admin/files')
예제 #9
0
def mode_pre(session_dir, args):
    global gtmpfilename
    """
    Read from Session file and write to session.pre file
    """
    endtime_to_update = int(time.time()) - get_changelog_rollover_time(
        args.volume)
    status_file = os.path.join(session_dir, args.volume, "status")
    status_file_pre = status_file + ".pre"

    mkdirp(os.path.dirname(args.outfile), exit_on_err=True, logger=logger)

    # If Pre status file exists and running pre command again
    if os.path.exists(status_file_pre) and not args.regenerate_outfile:
        fail("Post command is not run after last pre, "
             "use --regenerate-outfile")

    start = 0
    try:
        with open(status_file) as f:
            start = int(f.read().strip())
    except ValueError:
        pass
    except (OSError, IOError) as e:
        fail("Error Opening Session file %s: %s" % (status_file, e),
             logger=logger)

    logger.debug("Pre is called - Session: %s, Volume: %s, "
                 "Start time: %s, End time: %s" %
                 (args.session, args.volume, start, endtime_to_update))

    prefix = datetime.now().strftime("%Y%m%d-%H%M%S-%f-")
    gtmpfilename = prefix + next(tempfile._get_candidate_names())

    run_cmd_nodes("pre", args, start=start, end=-1, tmpfilename=gtmpfilename)

    # Merger
    if args.full:
        cmd = ["sort", "-u"] + node_outfiles + ["-o", args.outfile]
        execute(cmd,
                exit_msg="Failed to merge output files "
                "collected from nodes",
                logger=logger)
    else:
        # Read each Changelogs db and generate finaldb
        create_file(args.outfile, exit_on_err=True, logger=logger)
        outfilemerger = OutputMerger(args.outfile + ".db", node_outfiles)
        write_output(args.outfile, outfilemerger, args.field_separator)

    try:
        os.remove(args.outfile + ".db")
    except (IOError, OSError):
        pass

    run_cmd_nodes("cleanup", args, tmpfilename=gtmpfilename)

    with open(status_file_pre, "w", buffering=0) as f:
        f.write(str(endtime_to_update))

    sys.stdout.write("Generated output file %s\n" % args.outfile)
예제 #10
0
파일: test_unstow.py 프로젝트: sleach/dploy
def test_unstow_folding_with_existing_file_in_dest(source_a, source_b, dest):
    os.mkdir(os.path.join(dest, 'aaa'))
    a_file = os.path.join(dest, 'aaa', 'a_file')
    utils.create_file(a_file)
    dploy.stow([source_a, source_b], dest)
    dploy.unstow([source_a], dest)
    assert os.path.exists(a_file)
예제 #11
0
def mode_pre(session_dir, args):
    global gtmpfilename

    """
    Read from Session file and write to session.pre file
    """
    endtime_to_update = int(time.time()) - get_changelog_rollover_time(
        args.volume)
    status_file = os.path.join(session_dir, args.volume, "status")
    status_file_pre = status_file + ".pre"

    mkdirp(os.path.dirname(args.outfile), exit_on_err=True, logger=logger)

    # If Pre status file exists and running pre command again
    if os.path.exists(status_file_pre) and not args.regenerate_outfile:
        fail("Post command is not run after last pre, "
             "use --regenerate-outfile")

    start = 0
    try:
        with open(status_file) as f:
            start = int(f.read().strip())
    except ValueError:
        pass
    except (OSError, IOError) as e:
        fail("Error Opening Session file %s: %s"
             % (status_file, e), logger=logger)

    logger.debug("Pre is called - Session: %s, Volume: %s, "
                 "Start time: %s, End time: %s"
                 % (args.session, args.volume, start, endtime_to_update))

    prefix = datetime.now().strftime("%Y%m%d-%H%M%S-%f-")
    gtmpfilename = prefix + next(tempfile._get_candidate_names())

    run_cmd_nodes("pre", args, start=start, end=-1, tmpfilename=gtmpfilename)

    # Merger
    if args.full:
        cmd = ["sort", "-u"] + node_outfiles + ["-o", args.outfile]
        execute(cmd,
                exit_msg="Failed to merge output files "
                "collected from nodes", logger=logger)
    else:
        # Read each Changelogs db and generate finaldb
        create_file(args.outfile, exit_on_err=True, logger=logger)
        outfilemerger = OutputMerger(args.outfile + ".db", node_outfiles)
        write_output(args.outfile, outfilemerger, args.field_separator)

    try:
        os.remove(args.outfile + ".db")
    except (IOError, OSError):
        pass

    run_cmd_nodes("cleanup", args, tmpfilename=gtmpfilename)

    with open(status_file_pre, "w", buffering=0) as f:
        f.write(str(endtime_to_update))

    sys.stdout.write("Generated output file %s\n" % args.outfile)
예제 #12
0
 def load(self):
     if not isfile(self._filename):
         create_file(self._filename)
     with open(self._filename, 'r') as f:
         l = []
         for line in f:
             l.append(line)
     return l
예제 #13
0
 def train_agent(self, agent_name):
     try:
         train_data = {}
         train_data["rasa_nlu_data"] = {}
         training = self._get_agent_training_data(agent_name)
         train_data["rasa_nlu_data"]["common_examples"] = training
         if training in [[], None]:
             logger.warn(
                 "Agent {0} has no training data".format(agent_name))
             return ("Could not found training data for Agent {0}".format(
                 agent_name))
         if self._has_at_list_two_intents(training):
             logger.warn(
                 "Agent {0} has only one intent, while a minimum of two intents are needed"
                 .format(agent_name))
             return ("Could not train Agent {0}, only found 1 intent".
                     format(agent_name))
         else:
             try:
                 os.makedirs("./models/" + agent_name)
             except FileExistsError:
                 """directory already exists"""
                 pass
             train_data["rasa_nlu_data"][
                 "lookup_tables"] = self._get_agent_lookups(agent_name)
             train_data["rasa_nlu_data"][
                 "entity_synonyms"] = self._get_agent_synonyms(agent_name)
             config = self.agents_repository.get_agent_config(
                 agent_name).get("config")
             training_data_file = os.environ.get(
                 "MODELS_PATH") + agent_name + ".json"
             config_file = os.environ.get(
                 "MODELS_PATH") + agent_name + "_config.json"
             create_file(training_data_file, json.dumps(train_data))
             create_file(config_file, json.dumps(config))
             agent = Agent(agentName=agent_name,
                           botconfig=config_file,
                           data=training_data_file)
             model_recorder = get_recorder(os.environ.get("MODEL_RECORDER"))
             model_recorder.save(agent_name, agent.model_path,
                                 agent.model_name, agent.model_version)
             versions = model_recorder.list_versions(agent_name)
             now = int(datetime.timestamp(datetime.now()))
             self.agents_repository.update_trained_agent(
                 agent_name, agent, versions, now)
             """Clean Up"""
             remove_file_or_dir(training_data_file)
             remove_file_or_dir(config_file)
             remove_file_or_dir(os.environ.get("MODELS_PATH") + agent_name)
             logger.info(
                 "Bot {0}, successfully trained, and model {1} persisted. ".
                 format(agent_name, agent.model_name))
     except Exception as e:
         logger.error("Exception when training agent {0}. {1}".format(
             agent_name, e),
                      exc_info=True)
예제 #14
0
 def store(self, s):
     if not isfile(self._filename):
         create_file(self._filename)
     f = open(self._filename, 'r')
     lines = f.readlines()
     f.close()
     with open(self._filename, 'w') as f:
         for line in lines:
             f.write(line)
         f.write(s)
예제 #15
0
def main():
    # Create file with $1_$2.log
    # Create file with $1_$2_long.log
    # Start bot
    global log, log_long, log_directory
    create_folder(log_directory)
    log = create_file(log_directory + username + "_" + stream + ".log")
    log_long = create_file(log_directory + username + "_" + stream +
                           "_long.log")
    bot()
예제 #16
0
def traceroute(asn, key, slicename, nodenum):
	directory = "Traceroutes"
	key = paramiko.RSAKey.from_private_key_file(key)
	paramiko.util.log_to_file("rocketfuel.log")
	create_file(directory + "/")
	ips = _get_ips(asn)
	nodes = _planet_lab_nodes(nodenum)
	try:
		_run(asn, key, slicename, nodes, ips)
	except Exception:
		pass
예제 #17
0
def do_create_httpd(options):
    config_path = options['system']['httpd_config_path']
    content = utils.get_config_content(config_path)
    content = get_prepared_content(content, options, 'httpd')

    httpd_path = options['system']['httpd_path']
    file_name = options['httpd']['site_name'] + '.conf'
    full_file_path = httpd_path + file_name

    utils.create_file(full_file_path, content)

    utils.success_msg("Httpd File was Created.")
예제 #18
0
    def load_success_files(cls):
        success_files = {}
        if not os.path.exists(DownloadedFiles.FILE_NAME):
            utils.create_file(DownloadedFiles.FILE_NAME)

        with open(DownloadedFiles.FILE_NAME, 'r') as f:
            lines = f.readlines()
            for line in lines:
                date_id, file_name = RecoveryInfo.from_self_str(line)
                if date_id is not None and file_name is not None:
                    success_files[RecoveryInfo(date_id, file_name)] = True
        return success_files
예제 #19
0
def save_all(news, db_name):
    filename = f'db/{db_name}.db'
    create_file(filename)
    db.bind(provider='sqlite', filename=filename, create_db=True)
    db.generate_mapping(create_tables=True)
    for country, news_list in news.items():
        for new in news_list:
            create_news_item(country=country,
                             reference=new['url'],
                             date=new['publishedAt'].split('T')[0],
                             title=new['title'],
                             lead=new['description'],
                             content=new['content'])
예제 #20
0
def test_import__with_whitespace(runner):
    with runner.isolated_filesystem():
        result = runner.invoke(cli, ['create', 'hello'])
        create_file()
        result = runner.invoke(cli, ['import', 'csv'],
                               input="\rhello.csv\t\n,\n\n")
        project_dir = os.path.join(runner.env['POP_PROJECT_HOME'], 'hello')
        project_store_exist = os.path.exists(
            os.path.join(project_dir, 'data/store.h5'))

        #assert project_store_exist
        assert 'Data injected' in result.output
        assert result.exit_code == 0
예제 #21
0
def mode_query(session_dir, args):
    # Verify volume status
    cmd = ["gluster", 'volume', 'info', args.volume, "--xml"]
    _, data, _ = execute(cmd,
                         exit_msg="Failed to Run Gluster Volume Info",
                         logger=logger)
    try:
        tree = etree.fromstring(data)
        statusStr = tree.find('volInfo/volumes/volume/statusStr').text
    except (ParseError, AttributeError) as e:
        fail("Invalid Volume: %s" % e, logger=logger)

    if statusStr != "Started":
        fail("Volume %s is not online" % args.volume, logger=logger)

    mkdirp(session_dir, exit_on_err=True, logger=logger)
    mkdirp(os.path.join(session_dir, args.volume),
           exit_on_err=True,
           logger=logger)
    mkdirp(os.path.dirname(args.outfile), exit_on_err=True, logger=logger)

    # Configure cluster for pasword-less SSH
    ssh_setup(args)

    # Enable volume options for changelog capture
    enable_volume_options(args)

    # Start query command processing
    if args.since_time:
        start = args.since_time
        logger.debug("Query is called - Session: %s, Volume: %s, "
                     "Start time: %s" % ("default", args.volume, start))

        run_cmd_nodes("query", args, start=start)

        # Merger
        # Read each Changelogs db and generate finaldb
        create_file(args.outfile, exit_on_err=True, logger=logger)
        outfilemerger = OutputMerger(args.outfile + ".db", node_outfiles)
        write_output(args, outfilemerger)

        try:
            os.remove(args.outfile + ".db")
        except (IOError, OSError):
            pass

        run_cmd_nodes("cleanup", args)

        sys.stdout.write("Generated output file %s\n" % args.outfile)
    else:
        fail("Please specify --since-time option")
예제 #22
0
파일: main.py 프로젝트: dluobo/glusterfs
def mode_query(session_dir, args):
    # Verify volume status
    cmd = ["gluster", 'volume', 'info', args.volume, "--xml"]
    _, data, _ = execute(cmd,
                         exit_msg="Failed to Run Gluster Volume Info",
                         logger=logger)
    try:
        tree = etree.fromstring(data)
        statusStr = tree.find('volInfo/volumes/volume/statusStr').text
    except (ParseError, AttributeError) as e:
        fail("Invalid Volume: %s" % e, logger=logger)

    if statusStr != "Started":
        fail("Volume %s is not online" % args.volume, logger=logger)

    mkdirp(session_dir, exit_on_err=True, logger=logger)
    mkdirp(os.path.join(session_dir, args.volume), exit_on_err=True,
           logger=logger)
    mkdirp(os.path.dirname(args.outfile), exit_on_err=True, logger=logger)

    # Configure cluster for pasword-less SSH
    ssh_setup(args)

    # Enable volume options for changelog capture
    enable_volume_options(args)

    # Start query command processing
    if args.since_time:
        start = args.since_time
        logger.debug("Query is called - Session: %s, Volume: %s, "
                     "Start time: %s"
                     % ("default", args.volume, start))

        run_cmd_nodes("query", args, start=start)

        # Merger
        # Read each Changelogs db and generate finaldb
        create_file(args.outfile, exit_on_err=True, logger=logger)
        outfilemerger = OutputMerger(args.outfile + ".db", node_outfiles)
        write_output(args, outfilemerger)

        try:
            os.remove(args.outfile + ".db")
        except (IOError, OSError):
            pass

        run_cmd_nodes("cleanup", args)

        sys.stdout.write("Generated output file %s\n" % args.outfile)
    else:
        fail("Please specify --since-time option")
예제 #23
0
def do_create_nginx(options):
    if not utils.is_true('create_nginx', options['nginx']):
        return False

    config_path = options['system']['nginx_config_path']
    content = utils.get_config_content(config_path)
    content = get_prepared_content(content, options, 'nginx')

    httpd_path = options['system']['nginx_path']
    file_name = options['nginx']['site_name'] + '.conf'
    full_file_path = httpd_path + file_name

    utils.create_file(full_file_path, content)
    utils.success_msg("Nginx File was Created.")
예제 #24
0
def main(debug, b_reg):
    """
    in: bool debug, bool b register
    out: void
    """
    ML = []
    st = SymbolTable()
    if len(sys.argv) < 2:
        no_file_arg()
    input_file = sys.argv[1]
    name = os.path.splitext(input_file)[0]
    parsed = Parser(input_file, debug, b_reg)
    rom_address = 0
    ram_address = 16
    """
    First pass
    """
    while parsed.has_more_cmds():
        if parsed.command_type() == "C_COMMAND" or parsed.command_type() == "A_COMMAND":
            rom_address += 1
        elif parsed.command_type() == "L_COMMAND":
            st.add_entry(parsed.symbol(), rom_address)
        parsed.advance()
    parsed.reset()

    """
    Second pass
    """
    i = 0
    while parsed.has_more_cmds():
        cc = parsed.b_cc() # account for b reg
        command_type = parsed.command_type()
        if command_type == "A_COMMAND":
            """
            Handle A commands.
            """
            if st.contains(cc[1:]):
                ML.append(parsed.a_int_to_binary(st.get_address(cc[1:])))
            elif parsed.cc_is_int():
                ML.append(parsed.a_int_to_binary(cc))
            elif not st.contains(cc[1:]):
                st.add_entry(cc[1:], ram_address)
                ML.append(parsed.a_int_to_binary(str(st.get_address(cc[1:]))))
                ram_address += 1
        else:
            ML.append(parsed.c_to_binary(cc, command_type, st))
        parsed.advance()
        i += 1
    create_file(ML, name)
예제 #25
0
파일: base.py 프로젝트: cern-fts/gfal2-util
    def setUp(self):
        self.dirname = '/tmp/test_' + utils.create_random_suffix()
        os.mkdir(self.dirname)

        self.subdir = self.dirname + '/test_' + utils.create_random_suffix()
        os.mkdir(self.subdir)

        self.fname1 = 'f1_' + utils.create_random_suffix()
        self.fname2 = 'f2_' + utils.create_random_suffix()

        self.ffname1 = self.dirname + '/' + self.fname1
        self.ffname2 = self.dirname + '/' + self.fname2

        utils.create_file(self.ffname1, 1025)
        utils.create_file(self.ffname2, 1025)
예제 #26
0
    def load_failed_infos(cls, past_days=1):
        recovery_infos = []

        for i in range(1, past_days + 1):
            recovery_file = RecoveryFiles.FILE_NAME.format(
                utils.get_date_string(-1 * i))

            if not os.path.exists(recovery_file):
                utils.create_file(recovery_file)

            with open(recovery_file, 'r') as f:
                lines = f.readlines()
                for line in lines:
                    date_id, file_name = RecoveryInfo.from_self_str(line)
                    if date_id is not None and file_name is not None:
                        recovery_infos.append(RecoveryInfo(date_id, file_name))
        return set(recovery_infos)
예제 #27
0
def createI18NFile(data):
    columns = json.loads(data.get("columns"))
    strprefix = data.get("strprefix")
    cns, ens = [], []
    for column in columns:
        if column.get("creat_i18n") == 'on':
            i18nStr = strprefix + column.get("column_name")
            if column.get("data_type") in ("date", "datetime"):
                cns.append({
                    "key":
                    i18nStr + "_search",
                    "value":
                    utils.to_unicode("查询" + column.get("cn_name"))
                })
                ens.append({
                    "key": i18nStr + "_search",
                    "value": "Search" + column.get("en_name")
                })
            cns.append({
                "key": i18nStr,
                "value": utils.to_unicode(column.get("cn_name"))
            })
            ens.append({"key": i18nStr, "value": column.get("en_name")})
    cns.append({
        "key": "com.zhiyin.mes.app.to",
        "value": utils.to_unicode("到")
    })
    ens.append({"key": "com.zhiyin.mes.app.to", "value": "To"})
    if data.get("checkFactory") == 'on':
        cns.append({
            "key": "com.zhiyin.mes.app.factory_name",
            "value": utils.to_unicode("所属工厂")
        })
        ens.append({
            "key": "com.zhiyin.mes.app.factory_name",
            "value": "FactoryName"
        })
    utils.create_file(
        data.get("packageName") + "/i18n",
        render_template('/generate/i18n.txt', data=cns),
        data.get("packageName") + ".datagrid_zh_CN.properties")
    utils.create_file(
        data.get("packageName") + "/i18n",
        render_template('/generate/i18n.txt', data=ens),
        data.get("packageName") + ".datagrid_en_US.properties")
예제 #28
0
def brickfind_crawl(brick, args):
    if brick.endswith("/"):
        brick = brick[0:len(brick) - 1]

    working_dir = os.path.dirname(args.outfile)
    mkdirp(working_dir, exit_on_err=True, logger=logger)
    create_file(args.outfile, exit_on_err=True, logger=logger)

    with open(args.outfile, "a+") as fout:
        brick_path_len = len(brick)

        def mtime_filter(path):
            try:
                st = os.lstat(path)
            except (OSError, IOError) as e:
                if e.errno == ENOENT:
                    st = None
                else:
                    raise

            if st and (st.st_mtime > args.start or st.st_ctime > args.start):
                return True

            return False

        def output_callback(path):
            path = path.strip()
            path = path[brick_path_len + 1:]
            output_write(fout, path, args.output_prefix)

        ignore_dirs = [
            os.path.join(brick, dirname)
            for dirname in conf.get_opt("brick_ignore_dirs").split(",")
        ]

        if args.full:
            find(brick, callback_func=output_callback, ignore_dirs=ignore_dirs)
        else:
            find(brick,
                 callback_func=output_callback,
                 filter_func=mtime_filter,
                 ignore_dirs=ignore_dirs)

        fout.flush()
        os.fsync(fout.fileno())
예제 #29
0
def run_bot():

    TOKEN = read_file(TOKEN_FILE)
    KEYWORD = read_file(KEYWORD_FILE)
    REPLY = read_file(REPLY_FILE)
    OFFSET = read_file(OFFSET_FILE)
    exclusion_list = read_file(EXCLUSION_FILE)

    bot = create_bot(TOKEN)

    while True:
        updates = fetch_updates(bot, OFFSET)
        if updates is not None:
            process_updates(updates, bot, KEYWORD, REPLY, exclusion_list)
            OFFSET = get_offset(updates)
            create_file(OFFSET_FILE, OFFSET)
        else:
            time.sleep(1)
예제 #30
0
def brickfind_crawl(brick, args):
    if brick.endswith("/"):
        brick = brick[0:len(brick)-1]

    working_dir = os.path.dirname(args.outfile)
    mkdirp(working_dir, exit_on_err=True, logger=logger)
    create_file(args.outfile, exit_on_err=True, logger=logger)

    with open(args.outfile, "a+") as fout:
        brick_path_len = len(brick)

        def mtime_filter(path):
            try:
                st = os.lstat(path)
            except (OSError, IOError) as e:
                if e.errno == ENOENT:
                    st = None
                else:
                    raise

            if st and (st.st_mtime > args.start or st.st_ctime > args.start):
                return True

            return False

        def output_callback(path):
            path = path.strip()
            path = path[brick_path_len+1:]
            output_write(fout, path, args.output_prefix)

        ignore_dirs = [os.path.join(brick, dirname)
                       for dirname in
                       conf.get_opt("brick_ignore_dirs").split(",")]

        if args.full:
            find(brick, callback_func=output_callback,
                 ignore_dirs=ignore_dirs)
        else:
            find(brick, callback_func=output_callback,
                 filter_func=mtime_filter,
                 ignore_dirs=ignore_dirs)

        fout.flush()
        os.fsync(fout.fileno())
예제 #31
0
def ip_from_prefix(asn):
    as_base = 'AS'
    ASN = as_base + asn
    sys.stdout.write("[" + ASN + "] " +
                     "Getting ip addresses from advertised prefixes...")
    sys.stdout.flush()

    filename = "IP/" + ASN
    create_file(filename)
    output_file = open(os.getcwd() + "/" + filename, 'w')
    input_file = open(os.getcwd() + "/Prefix/AS" + asn, 'r')

    # Regex taken from http://blog.markhatton.co.uk/2011/03/15/regular-expressions-for-ip-addresses-cidr-ranges-and-hostnames/
    cidr_regex = re.compile(
        "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/([0-9]|[1-2][0-9]|3[0-2]))$"
    )

    for line in input_file.readlines():

        is_ipv4_cidr_range = cidr_regex.match(line)

        if is_ipv4_cidr_range:
            line = line.rstrip('\n')
            ipnet = ip.ip_network(unicode(line))

            hosts = list(ipnet.hosts())
            if len(hosts) > 2:
                output_file.write(
                    str(hosts[random.randint(1,
                                             len(hosts) - 1)]) + "\n")
                output_file.write(
                    str(hosts[random.randint(1,
                                             len(hosts) - 1)]) + "\n")
                output_file.write(
                    str(hosts[random.randint(1,
                                             len(hosts) - 1)]) + "\n")

    output_file.close()
    input_file.close()

    sys.stdout.write(" Done!\n")
    sys.stdout.write("[" + ASN + "] " + "IPs are in file: ./" + filename +
                     "\n")
예제 #32
0
def start():
    # create a folder for the tfrecords if doesn't exist'''
    utils.create_dir(PATH_TFRECORDS)
    utils.create_dir(PATH_TFRECORDS_TRAIN)
    utils.create_dir(PATH_TFRECORDS_TEST)
    train_labels, test_labels = utils.random_split_kitti(
        PATH_LABELS, 0.8, CLASSES, RANDOM_SEED)

    # Step 1: create a writer to write tfrecord to that file
    labels_tuple = (train_labels, test_labels)
    for l in range(len(labels_tuple)):
        files_counter = 0
        labels_src = labels_tuple[l]
        labels_keys = list(labels_src)
        labels_num = len(labels_src)

        if l is 0:
            tfrecord_folder = PATH_TFRECORDS_TRAIN
        else:
            tfrecord_folder = PATH_TFRECORDS_TEST
        i = 0
        while i < labels_num:
            sys.stdout.write('\r>> Creating TFRecord file number %d ' %
                             files_counter)
            tfrecord_file = os.path.join(
                tfrecord_folder, 'train_' + '%d' % files_counter + '.tfrecord')
            utils.create_file(tfrecord_file)
            with tf.python_io.TFRecordWriter(tfrecord_file) as writer:
                j = 0
                while i < labels_num and j < SAMPLES_PER_FILES:
                    sys.stdout.write('\r>> Converting image %d/%d ' %
                                     (i + 1, labels_num))
                    sys.stdout.flush()
                    image_number = labels_keys[i]
                    image_path = os.path.join(PATH_IMAGES,
                                              image_number + '.png')
                    utils.append_to_tfrecord(image_path,
                                             labels_src[image_number], writer)
                    i += 1
                    j += 1

            files_counter += 1
예제 #33
0
def setup_bot():
    print("Setup time! Please don't lie to me, I'd know.")
    token = input("My bot token: ")
    keyword = input("The keyword I should react to: ")
    reply = input(
        "My witty response whenever the keyword appears in the conversation: ")

    excluded = []
    exclusion = "word"
    while exclusion != '':
        exclusion = input("Is there any word you'd like to me to ignore?")
        print("Sure! I'm going to pretend I haven't heard anyone saying '{}'!".
              format(exclusion))
        if exclusion != '':
            excluded.append(exclusion + '\n')

    create_file(TOKEN_FILE, token)
    create_file(KEYWORD_FILE, keyword)
    create_file(REPLY_FILE, reply)
    create_file(OFFSET_FILE, OFFSET_VALUE_INITIAL)
    create_file(EXCLUSION_FILE, excluded)

    print("Setup complete! You may now run telegren.py.")
예제 #34
0
def changelog_crawl(brick, end, args):
    """
    Init function, prepares working dir and calls Changelog query
    """
    if brick.endswith("/"):
        brick = brick[0:len(brick) - 1]

    # WORKING_DIR/BRICKHASH/OUTFILE
    working_dir = os.path.dirname(args.outfile)
    brickhash = hashlib.sha1(brick)
    brickhash = str(brickhash.hexdigest())
    working_dir = os.path.join(working_dir, brickhash)

    mkdirp(working_dir, exit_on_err=True, logger=logger)
    create_file(args.outfile, exit_on_err=True, logger=logger)
    create_file(args.outfile + ".gfids", exit_on_err=True, logger=logger)

    log_file = os.path.join(conf.get_opt("log_dir"), args.session, args.volume,
                            "changelog.%s.log" % brickhash)

    logger.info("%s Started Changelog Crawl. Start: %s, End: %s" %
                (brick, args.start, end))
    get_changes(brick, working_dir, log_file, end, args)
예제 #35
0
def do_create_site(options):
    if not utils.is_true('create_test_folder', options['default']):
        return False

    if not os.path.isdir(options['default']['site_path']):
        utils.error_msg('Path to Folder Site Not Found')
        return False

    site_folder_path = options['default']['site_path'] + options['default'][
        'site_name']

    if os.path.isdir(site_folder_path):
        utils.error_msg('Site Folder is Exist')
        return False

    os.makedirs(site_folder_path)

    template_path = options['system']['template_page_path']
    content = utils.get_config_content(template_path)

    site_folder_path = site_folder_path + "/index.html"
    utils.create_file(site_folder_path, content)

    utils.success_msg("Site Folder was Created.")
예제 #36
0
def main():

    # Path
    daily_dir_path = "C:\\wamp64\www\\detection\\"

    # Cam and cascade init
    cap = cv2.VideoCapture(0)
    ret, frame = cap.read()

    # Define scene to detect
    a, b, c, d = define_scene(frame)

    # Process on each frame
    while (True):

        # Create daily directory
        directory = utils.create_daily_dir(daily_dir_path)

        # Read webcam frame
        ret, frame = cap.read()

        # Copy and crop the frame
        original_frame = frame.copy()
        cropped_frame = frame[a:b, c:d]

        # Check if face and person detected
        if face_detection(cropped_frame) and person_detection(cropped_frame):
            file = utils.create_file(directory)
            cv2.imwrite(file, original_frame)
            time.sleep(1)

        # Check if quit
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # Release cam
    cap.release()
    cv2.destroyAllWindows()
예제 #37
0
def crawl_link(collection, url):
    # skip crawling if crawled within last 24 hours
    doc = collection.find_one({"link": url})
    if doc is not None and doc['isCrawled'] and\
     doc['lastCrawledDT'] > datetime.now() - timedelta(days=1):
        log.info(f' \tThe {url} is already crawled within last 24hr')
        return

    # make connection requests to url
    try:
        req = requests.get(url)
    except OSError as exc:
        log.info(
            f" Error occured with {url} at time {datetime.now()}. Following error occured :\n{exc}\nSkipping..."
        )
        return

    if req.status_code != 200:
        log.debug(f" \t\t\t{url} gave response code {req.status_code}")
        update_db(collection, url, src_url, req)
        return

    # create html file after successful request
    if 'text/html' in req.headers['content-type']:
        html_doc = req.text
        file_path, file_created_DT = create_file(html_doc, conf.file_dir,
                                                 conf.path, 'utf-8',
                                                 doc['filePath'])
    else:
        log.info(f' Not html.. ignored')
        return

    # update the database information
    update_db(collection, url, src_url, req, file_path, file_created_DT)

    # return the html document for scraping purposes
    return html_doc
예제 #38
0
def setup():  # returns None
    # create directory if needed
    for dir in const.BASIC_DIR_TREE:
        if utils.check_if_directory_exists(dir) == False:
            utils.create_directory(dir)
    # create files if needed
    if utils.check_if_file_exists(const.CONFIG_FILE) == False:
        # utils.create_file(const.CONFIG_FILE)
        do_file_from_list(const.CONFIG_FILE, const.DEFAULT_CONFIG_FILE)
    if utils.check_if_file_exists(const.KEYS_DOWN_FILE) == False:
        utils.create_file(const.KEYS_DOWN_FILE)
        do_file_from_list(const.KEYS_DOWN_FILE, const.DEFAULT_KEYS_DOWN_FILE)
    if utils.check_if_file_exists(const.KEYS_UP_FILE) == False:
        utils.create_file(const.KEYS_UP_FILE)
        do_file_from_list(const.KEYS_UP_FILE, const.DEFAULT_KEYS_UP_FILE)
    if utils.check_if_file_exists(const.MODS_FILE) == False:
        utils.create_file(const.MODS_FILE)
        do_file_from_list(const.MODS_FILE, const.DEFAULT_MODS_FILE)
예제 #39
0
def mode_query(session_dir, args):
    global gtmpfilename

    # Verify volume status
    cmd = ["gluster", 'volume', 'info', args.volume, "--xml"]
    _, data, _ = execute(cmd,
                         exit_msg="Failed to Run Gluster Volume Info",
                         logger=logger)
    try:
        tree = etree.fromstring(data)
        statusStr = tree.find('volInfo/volumes/volume/statusStr').text
    except (ParseError, AttributeError) as e:
        fail("Invalid Volume: %s" % e, logger=logger)

    if statusStr != "Started":
        fail("Volume %s is not online" % args.volume, logger=logger)

    mkdirp(session_dir, exit_on_err=True, logger=logger)
    mkdirp(os.path.join(session_dir, args.volume),
           exit_on_err=True,
           logger=logger)
    mkdirp(os.path.dirname(args.outfile), exit_on_err=True, logger=logger)

    # Configure cluster for pasword-less SSH
    ssh_setup(args)

    # Enable volume options for changelog capture
    enable_volume_options(args)

    # Test options
    if not args.since_time and not args.end_time and not args.full:
        fail(
            "Please specify either {--since-time and optionally --end-time} "
            "or --full",
            logger=logger)

    if args.since_time and args.end_time and args.full:
        fail(
            "Please specify either {--since-time and optionally --end-time} "
            "or --full, but not both",
            logger=logger)

    if args.end_time and not args.since_time:
        fail("Please specify --since-time as well", logger=logger)

    # Start query command processing
    start = -1
    end = -1
    if args.since_time:
        start = args.since_time
        if args.end_time:
            end = args.end_time
    else:
        start = 0  # --full option is handled separately

    logger.debug("Query is called - Session: %s, Volume: %s, "
                 "Start time: %s, End time: %s" %
                 ("default", args.volume, start, end))

    prefix = datetime.now().strftime("%Y%m%d-%H%M%S-%f-")
    gtmpfilename = prefix + next(tempfile._get_candidate_names())

    run_cmd_nodes("query",
                  args,
                  start=start,
                  end=end,
                  tmpfilename=gtmpfilename)

    # Merger
    if args.full:
        cmd = ["sort", "-u"] + node_outfiles + ["-o", args.outfile]
        execute(cmd,
                exit_msg="Failed to merge output files "
                "collected from nodes",
                logger=logger)
    else:
        # Read each Changelogs db and generate finaldb
        create_file(args.outfile, exit_on_err=True, logger=logger)
        outfilemerger = OutputMerger(args.outfile + ".db", node_outfiles)
        write_output(args.outfile, outfilemerger, args.field_separator)

    try:
        os.remove(args.outfile + ".db")
    except (IOError, OSError):
        pass

    run_cmd_nodes("cleanup", args, tmpfilename=gtmpfilename)

    sys.stdout.write("Generated output file %s\n" % args.outfile)
예제 #40
0
def clean_folder(config_json_path,
                 user_config_file,
                 folder_to_clean,
                 log=False):
    """
    Performs Folder Cleaning Operation

    Parameters:
    ---
    config_json_path: str
        Path of the JSON File that contains the details of all the possible file types.
        Use file_formats_scrapper package to generate this JSON file.
    folder_to_clean: str
        Path of folder to be cleaned
    log: bool
        Diplay Spinner with the corresponding log message when doing the corresponding operation (default False)
    """

    # Starting Directory Scanning Spinner
    spnr = utils.start_spinner(log, msg=fcc.MSG_DIR_SCAN)

    # Getting all files from the directory to be scanned
    files = scan_directory(folder_to_clean)

    # Stopping Directory Scanning Spinner
    utils.stop_spinner(spnr, msg=fcc.MSG_DIR_SCANNED)

    # Loading the User Config FileTypes JSON Data
    if not exists(user_config_file):
        utils.create_file(user_config_file, '{}')
    user_config = utils.load_json(user_config_file)
    user_types = get_user_file_types(user_config_file)

    # Loading the APP Generated FileTypes JSON Data
    file_type_info = utils.load_json(config_json_path)

    # Starting Folder Cleaner Spinner
    spnr = utils.start_spinner(log, msg=fcc.MSG_FILE_MOVE)

    # Move Files using user config
    move_files_user_config(user_config, files)

    # Filtering files
    files = filter(lambda file: get_extension(file)[1:] not in user_types,
                   files)

    exceptional_files = {}
    # Looping over the files
    for file in files:
        # Getting the File Extension and removing . (E.g. .mp4 => mp4)
        ext = get_extension(file)[1:].lower()
        file_path = []

        user_file_path = user_config.get(ext.lower())
        if user_file_path:
            new_dir = folder_to_clean
            if not user_file_path == usc.PARENT:
                new_dir = join(folder_to_clean, user_file_path)
                new_file = join(new_dir, basename(file))
                create_directory(new_dir)
                move_file(file, new_file)
        # Getting the Path of the file based on it's extension (ext)
        elif get_file_path(file_type_info, ext, file_path):
            # Reversing the list of path to get the correct path
            file_path = file_path[::-1]
            new_dir = join(folder_to_clean, '\\'.join(file_path))
            new_file = join(new_dir, basename(file))
            create_directory(new_dir)
            move_file(file, new_file)
        else:
            map_file_to_file_type(exceptional_files, ext, file)

    # Stopping Folder Cleaner Spinner
    utils.stop_spinner(spnr, msg=fcc.MSG_FILE_MOVED)

    exceptional_file_types = [ext for ext in exceptional_files.keys()]

    if len(exceptional_file_types) > 0:
        set_path_to_user_settings(exceptional_file_types, user_config_file)

        files = [
            file for ext in exceptional_files.keys()
            for file in exceptional_files[ext]
        ]

        spnr = utils.start_spinner(log, msg=fcc.MSG_FILE_MOVE)
        # Loading the Updated User Config
        user_config = utils.load_json(user_config_file)
        # Moving all the Exceptional Files
        move_files_user_config(user_config, files)
        utils.stop_spinner(spnr, msg=fcc.MSG_FILE_MOVED)
예제 #41
0
def get_changes(brick, hash_dir, log_file, end, args):
    """
    Makes use of libgfchangelog's history API to get changelogs
    containing changes from start and end time. Further collects
    the modified gfids from the changelogs and writes the list
    of gfid to 'gfid_list' file.
    """
    try:
        libgfchangelog.cl_init()
        libgfchangelog.cl_register(brick, hash_dir, log_file,
                                   CHANGELOG_LOG_LEVEL, CHANGELOG_CONN_RETRIES)
    except libgfchangelog.ChangelogException as e:
        fail("%s Changelog register failed: %s" % (brick, e), logger=logger)

    # Output files to record GFIDs and GFID to Path failure GFIDs
    gfid_list_path = args.outfile + ".gfids"
    gfid_list_failures_file = gfid_list_path + ".failures"
    create_file(gfid_list_path, exit_on_err=True, logger=logger)
    create_file(gfid_list_failures_file, exit_on_err=True, logger=logger)

    # Changelogs path(Hard coded to BRICK/.glusterfs/changelogs
    cl_path = os.path.join(brick, ".glusterfs/changelogs")

    # Fail if History fails for requested Start and End
    try:
        actual_end = libgfchangelog.cl_history_changelog(
            cl_path, args.start, end, CHANGELOGAPI_NUM_WORKERS)
    except libgfchangelog.ChangelogException as e:
        fail("%s Historical Changelogs not available: %s" % (brick, e),
             logger=logger)

    try:
        # scan followed by getchanges till scan returns zero.
        # history_scan() is blocking call, till it gets the number
        # of changelogs to process. Returns zero when no changelogs
        # to be processed. returns positive value as number of changelogs
        # to be processed, which will be fetched using
        # history_getchanges()
        changes = []
        while libgfchangelog.cl_history_scan() > 0:
            changes += libgfchangelog.cl_history_getchanges()

            if changes:
                with open(gfid_list_path, 'a+') as fgfid:
                    for change in changes:
                        with open(change) as f:
                            for line in f:
                                # Space delimited list, collect GFID
                                details = line.split()
                                fgfid.write("%s\n" % details[1])

                        libgfchangelog.cl_history_done(change)
                    fgfid.flush()
                    os.fsync(fgfid.fileno())
    except libgfchangelog.ChangelogException as e:
        fail("%s Error during Changelog Crawl: %s" % (brick, e),
             logger=logger)

    # If TS returned from history_changelog is < end time
    # then FS crawl may be required, since history is only available
    # till TS returned from history_changelog
    if actual_end < end:
        fail("Partial History available with Changelog", 2, logger=logger)

    sort_unique(gfid_list_path)
    gfid_to_path_using_pgfid(brick, gfid_list_path,
                             args.outfile, gfid_list_failures_file)
    gfid_to_path_using_batchfind(brick, gfid_list_failures_file, args.outfile)
예제 #42
0
파일: all.py 프로젝트: omerher/6pQwNMdYNX
from main import main
import utils, overnight
from unfollower import unfollow

WINDOW_TITLE = 'IG Upload Helper'
x = 650
y = 750

sg.theme('Dark')  # Add a touch of color
# All the stuff inside your window.

base_path = os.path.realpath(__file__)[:-len(os.path.basename(__file__))]

# creates accounts.txt if it doesn't exist
accounts_path = os.path.join(base_path, "accounts.txt")
utils.create_file(accounts_path, "")
accounts = [
    account for account in open("accounts.txt", "r").read().split("\n")
    if account
]  # gets all accounts

accounts_visible = False
if len(accounts) > 1:
    accounts_visible = True

barrier = sg.Text("|", font=("Ariel 15"))
barrier_visible = sg.Text("|", font=("Ariel 15"), visible=accounts_visible)

if len(accounts) >= 1:
    default_account = accounts[0]
else:
예제 #43
0
파일: main.py 프로젝트: bcicen/glusterfs
def mode_pre(session_dir, args):
    """
    Read from Session file and write to session.pre file
    """
    endtime_to_update = int(time.time()) - get_changelog_rollover_time(
        args.volume)
    status_file = os.path.join(session_dir, args.volume, "status")
    status_file_pre = status_file + ".pre"

    mkdirp(os.path.dirname(args.outfile), exit_on_err=True, logger=logger)

    # If Pre status file exists and running pre command again
    if os.path.exists(status_file_pre) and not args.regenerate_outfile:
        fail("Post command is not run after last pre, "
             "use --regenerate-outfile")

    start = 0
    try:
        with open(status_file) as f:
            start = int(f.read().strip())
    except ValueError:
        pass
    except (OSError, IOError) as e:
        fail("Error Opening Session file %s: %s"
             % (status_file, e), logger=logger)

    logger.debug("Pre is called - Session: %s, Volume: %s, "
                 "Start time: %s, End time: %s"
                 % (args.session, args.volume, start, endtime_to_update))

    run_cmd_nodes("pre", args, start=start)

    # Merger
    if args.full:
        cmd = ["sort", "-u"] + node_outfiles + ["-o", args.outfile]
        execute(cmd,
                exit_msg="Failed to merge output files "
                "collected from nodes", logger=logger)
    else:
        # Read each Changelogs db and generate finaldb
        create_file(args.outfile, exit_on_err=True, logger=logger)
        outfilemerger = OutputMerger(args.outfile + ".db", node_outfiles)

        with open(args.outfile, "a") as f:
            for row in outfilemerger.get():
                # Multiple paths in case of Hardlinks
                paths = row[1].split(",")
                for p in paths:
                    if p == "":
                        continue
                    f.write("%s %s %s\n" % (row[0], p, row[2]))

    try:
        os.remove(args.outfile + ".db")
    except (IOError, OSError):
        pass

    run_cmd_nodes("cleanup", args)

    with open(status_file_pre, "w", buffering=0) as f:
        f.write(str(endtime_to_update))

    sys.stdout.write("Generated output file %s\n" % args.outfile)
예제 #44
0
파일: main.py 프로젝트: gluster/glusterfs
def mode_query(session_dir, args):
    global gtmpfilename

    # Verify volume status
    cmd = ["gluster", 'volume', 'info', args.volume, "--xml"]
    _, data, _ = execute(cmd,
                         exit_msg="Failed to Run Gluster Volume Info",
                         logger=logger)
    try:
        tree = etree.fromstring(data)
        statusStr = tree.find('volInfo/volumes/volume/statusStr').text
    except (ParseError, AttributeError) as e:
        fail("Invalid Volume: %s" % e, logger=logger)

    if statusStr != "Started":
        fail("Volume %s is not online" % args.volume, logger=logger)

    mkdirp(session_dir, exit_on_err=True, logger=logger)
    mkdirp(os.path.join(session_dir, args.volume), exit_on_err=True,
           logger=logger)
    mkdirp(os.path.dirname(args.outfile), exit_on_err=True, logger=logger)

    # Configure cluster for pasword-less SSH
    ssh_setup(args)

    # Enable volume options for changelog capture
    enable_volume_options(args)

    # Test options
    if not args.full and args.type in ["f", "d"]:
        fail("--type can only be used with --full")
    if not args.since_time and not args.end_time and not args.full:
        fail("Please specify either {--since-time and optionally --end-time} "
             "or --full", logger=logger)

    if args.since_time and args.end_time and args.full:
        fail("Please specify either {--since-time and optionally --end-time} "
             "or --full, but not both",
             logger=logger)

    if args.end_time and not args.since_time:
        fail("Please specify --since-time as well", logger=logger)

    # Start query command processing
    start = -1
    end = -1
    if args.since_time:
        start = args.since_time
        if args.end_time:
            end = args.end_time
    else:
        start = 0  # --full option is handled separately

    logger.debug("Query is called - Session: %s, Volume: %s, "
                 "Start time: %s, End time: %s"
                 % ("default", args.volume, start, end))

    prefix = datetime.now().strftime("%Y%m%d-%H%M%S-%f-")
    gtmpfilename = prefix + next(tempfile._get_candidate_names())

    run_cmd_nodes("query", args, start=start, end=end,
                  tmpfilename=gtmpfilename)

    # Merger
    if args.full:
        cmd = ["sort", "-u"] + node_outfiles + ["-o", args.outfile]
        execute(cmd,
                exit_msg="Failed to merge output files "
                "collected from nodes", logger=logger)
    else:
        # Read each Changelogs db and generate finaldb
        create_file(args.outfile, exit_on_err=True, logger=logger)
        outfilemerger = OutputMerger(args.outfile + ".db", node_outfiles)
        write_output(args.outfile, outfilemerger, args.field_separator)

    try:
        os.remove(args.outfile + ".db")
    except (IOError, OSError):
        pass

    run_cmd_nodes("cleanup", args, tmpfilename=gtmpfilename)

    sys.stdout.write("Generated output file %s\n" % args.outfile)