示例#1
0
def minimize(filename):

    trolleybuses = utils.readfile(filename)
    minimum_prec, minimum = 15000000000, 10000000000
    max_interval = 20

    while (minimum_prec > minimum):
        T_prime, alpha = {}, {}
        trolleybus = []
        for key in trolleybuses.keys():
            T_prime[key] = utils.add_one(trolleybuses, trolleybuses[key])
        for key in T_prime.keys():
            T_prime[key].sort()
            alpha[key] = utils.calc_sum(T_prime[key])
        print(alpha)

        minimum_key = min(alpha, key=alpha.get)
        minimum_prec = minimum
        minimum = alpha[minimum_key]

        print(minimum_key, minimum)
        trolleybuses[minimum_key] = [x + 1 for x in trolleybuses[minimum_key]]

    visualise(trolleybuses)
    plt.title("New time-table")
    visualise(utils.readfile("sources/stefan_cel_mare.txt"))
    plt.title("Old time-table")
    plt.show()
    return trolleybuses
示例#2
0
文件: timed_run.py 项目: 5nizza/bene
def main(timed_run_params:TimedRunParams, tool_run:ToolRunParams) -> (RunStats, int):
    # TODO: add memory limit

    logging.info('timed_run.main')

    stats_file_name = get_tmp_file_name()
    exec_log_file = get_tmp_file_name()

    rc, out, err = \
        execute_shell('{runsolver} -o {tool_log} -v {stats_file} -w {exec_log} -W {time_limit} '
                      '{tool_cmd}'
                      .format(runsolver=RUN_SOLVER_EXEC,
                              tool_log=tool_run.log_file,
                              stats_file=stats_file_name,
                              exec_log=exec_log_file,
                              time_limit=str(timed_run_params.time_limit_sec),
                              tool_cmd=tool_run.to_cmd_str()))

    logging.info(readfile(exec_log_file))

    # TODO: this should also be logged in the DB
    assert rc == 0, 'timed run failed: rc={rc}, \nout={out}, \nerr={err}'\
            .format(rc=str(rc), out=out, err=err)

    tool_rc = get_tool_rc(readfile(exec_log_file))
    stats = parse_stats(readfile(stats_file_name))

    os.remove(stats_file_name)
    os.remove(exec_log_file)

    return stats, tool_rc
示例#3
0
文件: uparser.py 项目: sogou-ufo/ursa
def compileHTML(filepath, needCompress):
    """编译html文件
    
    Arguments:
    - `filepath`:
    """
    if not os.path.exists(filepath):
        return False
    tpl = utils.readfile(filepath)

    log.log("Compile for " + filepath + ".")

    LINK_TOKEN = "<link.* href=['\"](?!http|https)(.*?\.css)['\"]"  # 不包含以http/https开头的资源
    SCRIPT_TOKEN = "<script.* src=['\"](?!http|https)(.*?\.js)['\"]"

    iters = re.finditer(LINK_TOKEN, tpl)
    for i in reversed(list(iters)):
        path = i.group(1)
        path = compileCommon(path, "local", True)  # 内部可能有替换的变量
        if not conf.getConfig().get("disableAutoTimestamp"):
            tpl = tpl[0 : i.start(1)] + i.group(1) + "?t=" + getFileTimeStamp(path, filepath) + tpl[i.end(1) :]

    iters = re.finditer(SCRIPT_TOKEN, tpl)
    for i in reversed(list(iters)):
        path = i.group(1)
        path = compileCommon(path, "local", True)  # 内部可能有替换的变量
        if not conf.getConfig().get("disableAutoTimestamp"):
            tpl = tpl[0 : i.start(1)] + i.group(1) + "?t=" + getFileTimeStamp(path, filepath) + tpl[i.end(1) :]

    return tpl
示例#4
0
    def join(self):
        arg = pydev.AutoArg()
        test_num = int(arg.option('testnum', -1))
        input_filename = arg.option('f')
        movie_dir = arg.option('m')
        slot_output_filename = arg.option('s')
        output_filename = arg.option('o')
        coder_output_filename = arg.option('c')

        data = utils.readfile(file(input_filename), test_num=test_num)

        extractor = MovieLensRankingFeatureExtractor(movie_dir)
        writer = sf.SlotFileWriter(output_filename)
        for user_id, item_id, click in tqdm.tqdm(data):
            writer.begin_instance(click)

            extractor.begin(user_id, item_id)
            ps = extractor.processes()
            for p in ps:
                slot, lst = p()
                writer.write_slot(slot, lst)

            writer.end_instance()

        extractor.save(coder_output_filename, slot_output_filename)
        writer.summary()
示例#5
0
def getData(token):
    """
    
    Arguments:
    - `token`:
    """
    data = getRawData(token)
    if len(data):
        data = json.loads(data)
    else:
        data = {}

    dorepeat(data);
    
    commonpath = BASE + COMMON_TOKEN + SUFFIX
    if os.path.exists(commonpath):
        commondata = json.loads( utils.readfile( commonpath ) )
    else:
        commondata = {}
    
    data.update(commondata)
    
    multoken = token.split('/')

    data.update({'_token':token.replace('/','_')})
    data.update({'_folder':multoken[0]})
    data.update({'_subtoken':multoken[1] if len(multoken)>1 else ""})
    return data
示例#6
0
    def _css(self):
        '''
        handle css

        r.js会对不同目录下CSS合并时的URL进行修正,因而对于@something@开头的路径会被认为是相对路径,
        产生修正错误,解决方案是先对所有CSS文件进行变量替换,时间戳添加,再由r.js合并。这会降低处理速
        度但可以解决该问题。

        考虑到速度,此过程仅支持在build时进行,开发服务器访问时不能使用。

        所有静态资源路径都应该使用绝对路径,避免在CSS中引用相对路径的图像资源。
        '''

        #搜索所有CSS文件
        all_css_files = utils.FileSearcher(r'\.css$',self._build_css_dir,relative = False).search()

        #替换和加时间戳
        for dst in all_css_files:
            try:
                content = utils.readfile(dst)
                content = all_url(content,os.path.dirname(dst))
                content = replace(content,self._target)
                utils.writefile(dst,content)
            except Exception,e:
                if self._force:
                    log.error('[css]%s'%e)
                else:
                    raise e
示例#7
0
文件: uparser.py 项目: sogou-ufo/ursa
def parseTpl(token, data={}, noGetTpl=False, isbuild=False):
    """
    """
    if not noGetTpl:
        tpl = token + ".tpl"
    else:
        tpl = token

    if not noGetTpl:
        if isbuild:
            body = build_jinjaenv.get_template(tpl)
        else:
            body = jinjaenv.get_template(tpl)
    elif os.path.exists(tpl):
        body = Template(utils.readfile(tpl))
    else:
        return ""

    if not len(data):
        data = mgr.getData(token)
    try:
        body = body.render(data)
    except TemplateNotFound as e:
        return "Template %s not found" % (str(e),)
    except TemplateSyntaxError as e:  # [email protected]:捕获语法错误,很有用
        return "Template %s:%d Syntax Error:%s" % (e.filename, e.lineno, e.message)
    except Exception as e:
        print e
        return ""
    return body
示例#8
0
    def _tpl(self):
        '''
        handle tempaltes

        模板仅需加时间戳和变量替换。

        这里需要加入额外的{compile_dir}文件夹下的文本文件。
        '''
        fs = utils.FileSearcher(r'\.%s$'%C('template_ext'),self._build_tpl_dir,relative = False)
        tpls = fs.search()
        if self._compile_dir:
            nfs = utils.FileSearcher(r'.+',self._build_compile_dir,relative = False)
            compile_files = nfs.search()
            for f in compile_files:
                if not utils.isBinary(f):
                    tpls.insert(0,f)

        for tpl in tpls:
            try:
                content = utils.readfile(tpl)
                #模板的静态资源相对目录应该写死为cwd,即资源路径应该始终是绝对路径
                content = allt(content,self._build_dir,force_abspath = False)
                content = replace(content,self._target)
                content = removeCssDepsDeclaration(content)
                utils.writefile(tpl,content)
            except Exception,e:
                if self._force:
                    log.error('[tpl]%s'%e)
                else:
                    raise e
示例#9
0
def step_t_5(a_base, B, p1):
    if p1 <= int(root(B, 5)):
        ### Посчет времени работы
        start_time = time.time()
        ###
        n_list = []
        l = 5

        if p1 % 4 == 3:
            n_list = [p1]
            q_3k4 = readfile("primes/4k+3.txt")
            sign_p1 = []
            for a in a_base:
                sign_p1.append(numth.jacobi(a, p1))

            for q in q_3k4:
                if q % 24 == p1 % 24:
                    sign_q = []
                    for a in a_base:
                        sign_q.append(numth.jacobi(a, q))
                    if sign_q == sign_p1:
                        n_list.append(q)
        # else:
        # for p in primes:

        ###
        total_time = "--- %s seconds ---\n" % (time.time() - start_time)
        ###
        return np.array(n_list)
    else:
        print(f"Value Error: p1 > {int(root(B, 5))}")
示例#10
0
def compileHTML(filepath, needCompress):
    """编译html文件
    
    Arguments:
    - `filepath`:
    """
    if not os.path.exists(filepath):
        return False
    tpl = utils.readfile(filepath)

    log.log('Compile for ' + filepath + '.')

    LINK_TOKEN = '<link.* href=[\'"](?!http|https)(.*?\.css)[\'"]'  # 不包含以http/https开头的资源
    SCRIPT_TOKEN = '<script.* src=[\'"](?!http|https)(.*?\.js)[\'"]'

    iters = re.finditer(LINK_TOKEN, tpl)
    for i in reversed(list(iters)):
        path = i.group(1)
        path = compileCommon(path, 'local', True)  #内部可能有替换的变量
        if not conf.getConfig().get('disableAutoTimestamp'):
            tpl = tpl[0:i.start(1)] + i.group(1) + '?t=' + getFileTimeStamp(
                path, filepath) + tpl[i.end(1):]

    iters = re.finditer(SCRIPT_TOKEN, tpl)
    for i in reversed(list(iters)):
        path = i.group(1)
        path = compileCommon(path, 'local', True)  #内部可能有替换的变量
        if not conf.getConfig().get('disableAutoTimestamp'):
            tpl = tpl[0:i.start(1)] + i.group(1) + '?t=' + getFileTimeStamp(
                path, filepath) + tpl[i.end(1):]

    return tpl
示例#11
0
def parseTpl(token, data={}, noGetTpl=False, isbuild=False):
    """
    """
    if not noGetTpl:
        tpl = token + '.tpl'
    else:
        tpl = token

    if not noGetTpl:
        if isbuild:
            body = build_jinjaenv.get_template(tpl)
        else:
            body = jinjaenv.get_template(tpl)
    elif os.path.exists(tpl):
        body = Template(utils.readfile(tpl))
    else:
        return ''

    if not len(data):
        data = mgr.getData(token)
    try:
        body = body.render(data)
    except TemplateNotFound as e:
        return 'Template %s not found' % (str(e), )
    except TemplateSyntaxError as e:  #[email protected]:捕获语法错误,很有用
        return 'Template %s:%d Syntax Error:%s' % (e.filename, e.lineno,
                                                   e.message)
    except Exception as e:
        print e
        return ''
    return body
示例#12
0
def compileCommon(filepath, token, force=False):
    """通用编译方法
    编译 @tm:file_path@为6位时间戳
    
    Arguments:
    - `content`:
    """
    if force:
        content = filepath
    else:
        if not os.path.exists(filepath):
            return False
        ftype = filepath.split('.')[-1]
        if not ftype in ['html', 'htm', 'css', 'js', 'tpl', 'jsp']:
            return False
        content = utils.readfile(filepath)
    TM_TOKEN = '@tm:(.*?)@'
    DATE_TOKEN = '@date@'
    COMMON_TOKEN = '@(.*?)@'

    iters = re.finditer(TM_TOKEN, content)
    for i in reversed(list(iters)):
        content = content[0:i.start(0)] + getFileTimeStamp(
            i.group(1), filepath) + content[i.end(0):]

    iters = re.finditer(DATE_TOKEN, content)
    for i in reversed(list(iters)):
        content = content[0:i.start(0)] + utils.getDate() + content[i.end(0):]

    iters = re.finditer(COMMON_TOKEN, content)

    for i in reversed(list(iters)):
        config = conf.getConfig()
        name = i.group(1)
        value = (token and config[token].get(name)) or config.get(name)
        if value:
            if value.find('{num}') != -1:
                num = (token and
                       config[token].get('num')) or config.get('num') or '10'
                num = range(num + 1)
                substr100 = content[i.end(0):i.end(0) + 100]
                istimestamp = substr100.find('t=')
                if istimestamp != -1:  #has timestamp
                    try:
                        tm = int(substr100[istimestamp + 2:istimestamp + 3])
                    except ValueError:
                        continue
                    if tm >= len(num):
                        tm = tm - len(num)
                    value = value.replace('{num}', str(tm))
                else:
                    global range_item
                    value = value.replace('{num}', str(num[range_item]))
                    range_item = range_item + 1
                    if range_item >= len(num):
                        range_item = 0
            content = content[0:i.start(0)] + value + content[i.end(0):]

    return content
示例#13
0
文件: views.py 项目: shijx12/CKE
def show_iteration_content(request, name, iteration):
	iteration_path = os.path.join(config.CATEGORY_BASEDIR, name, iteration)
	if os.path.isfile(iteration_path):
		instances, patterns = utils.readfile(iteration_path)
		context = { 'instances': instances, 'patterns': patterns, 'can_update': can_update(name, iteration) }
		return render(request, 'category/iteration_content.html', context)
	else:
		return HttpResponseNotFound('iteration %s/%s not exist!' % (name, iteration))
示例#14
0
文件: tester.py 项目: Leo1973/work
def RedisScript(spirit_name):
    #position = utils.seek2(log[lenseinfo_name])
    process = os.popen("redis-server /etc/redis.conf >  ./tempfile 2>&1")
    #process0 = os.popen("redis-server /etc/redis.conf")
    #print process.read()
    time.sleep(2)
    process1 = os.popen("redis-cli shutdown")
    return utils.readfile("./tempfile")
示例#15
0
def save_build_env():
    jailname = readfile(e('${OBJDIR}/jailname'))
    sh("mkdir -p ${SAVED_BUILD_ENV_DESTDIR}")
    sh("mkdir -p ${SAVED_BUILD_ENV_DESTDIR}/wrkdirs")
    sh("cp ${BE_ROOT}/repo-manifest ${SAVED_BUILD_ENV_DESTDIR}/")
    sh("cp -R ${WORLD_DESTDIR} ${SAVED_BUILD_ENV_DESTDIR}/")
    sh("cp -R ${DEBUG_ROOT} ${SAVED_BUILD_ENV_DESTDIR}/")
    sh("cp -R ${OBJDIR}/ports/wrkdirs/${jailname}-p/p/*.tbz ${SAVED_BUILD_ENV_DESTDIR}/wrkdirs/", nofail=True)
示例#16
0
文件: main.py 项目: CollectQT/website
def dynamic(path):
    try:
        filename = utils.get_dynamic_path(path)
        return flask.render_template(
            'base.jade', post=utils.readfile(filename))
    except ValueError:
        print('no files with path {}'.format(path))
        return flask.abort(404)
示例#17
0
文件: uparser.py 项目: sogou-ufo/ursa
def compileCommon(filepath, token, force=False):
    """通用编译方法
    编译 @tm:file_path@为6位时间戳
    
    Arguments:
    - `content`:
    """
    if force:
        content = filepath
    else:
        if not os.path.exists(filepath):
            return False
        ftype = filepath.split(".")[-1]
        if not ftype in ["html", "htm", "css", "js", "tpl", "jsp"]:
            return False
        content = utils.readfile(filepath)
    TM_TOKEN = "@tm:(.*?)@"
    DATE_TOKEN = "@date@"
    COMMON_TOKEN = "@(.*?)@"

    iters = re.finditer(TM_TOKEN, content)
    for i in reversed(list(iters)):
        content = content[0 : i.start(0)] + getFileTimeStamp(i.group(1), filepath) + content[i.end(0) :]

    iters = re.finditer(DATE_TOKEN, content)
    for i in reversed(list(iters)):
        content = content[0 : i.start(0)] + utils.getDate() + content[i.end(0) :]

    iters = re.finditer(COMMON_TOKEN, content)

    for i in reversed(list(iters)):
        config = conf.getConfig()
        name = i.group(1)
        value = (token and config[token].get(name)) or config.get(name)
        if value:
            if value.find("{num}") != -1:
                num = (token and config[token].get("num")) or config.get("num") or "10"
                num = range(num + 1)
                substr100 = content[i.end(0) : i.end(0) + 100]
                istimestamp = substr100.find("t=")
                if istimestamp != -1:  # has timestamp
                    try:
                        tm = int(substr100[istimestamp + 2 : istimestamp + 3])
                    except ValueError:
                        continue
                    if tm >= len(num):
                        tm = tm - len(num)
                    value = value.replace("{num}", str(tm))
                else:
                    global range_item
                    value = value.replace("{num}", str(num[range_item]))
                    range_item = range_item + 1
                    if range_item >= len(num):
                        range_item = 0
            content = content[0 : i.start(0)] + value + content[i.end(0) :]

    return content
示例#18
0
def load_extract_data(data_path, w2v_path, w2v_dim, sequence_length):
    def exEntities(list_s):
        start_positions = []
        for i, token in enumerate(list_s):
            if token.find("/Entity") != -1:
                start_positions.append(i)
        return start_positions

    print("[LOADING] Extract data: ", data_path)
    data = utils.readfile(data_path)
    dict_origin_data = {}
    dict_tagged_data = {}
    tagged_sents = []

    for line in data:
        sent = line[0]
        lbox_ids = ":".join(line[1:])
        tagged_s = utils.pos_tag_no_e1e2(sent)
        tagged_sents.append(tagged_s)
        dict_origin_data[lbox_ids] = sent
        dict_tagged_data[lbox_ids] = tagged_s
    bow, embed = const_bow(tagged_sents, w2v_path, w2v_dim)
    print("[COMPLETE] Constructing BOW")
    o_sents = []
    o_pos1 = []
    o_pos2 = []
    o_e1 = []
    o_e2 = []
    o_ids = []
    for key_, tagged_s in dict_tagged_data.items():
        if len(tagged_s) > sequence_length: continue
        entities = exEntities(tagged_s)
        for e1_pos, e2_pos in itertools.permutations(entities, 2):
            e1 = tagged_s[e1_pos].split("/En")[0]
            e2 = tagged_s[e2_pos].split("/En")[0]
            tmp_s = []
            tmp_pos1 = []
            tmp_pos2 = []
            for idx, token in enumerate(tagged_s):
                if token in bow:
                    tmp_s.append(bow[token])
                    tmp_pos1.append(pos_embed(e1_pos - idx))
                    tmp_pos2.append(pos_embed(e2_pos - idx))
            sent_len = len(tagged_s)
            while len(tmp_s) != sequence_length:
                tmp_s.append(bow["<zero>"])
                tmp_pos1.append(122)
                tmp_pos2.append(122)
                sent_len += 1
            o_sents.append(tmp_s)
            o_pos1.append(tmp_pos1)
            o_pos2.append(tmp_pos2)
            o_e1.append(e1)
            o_e2.append(e2)
            o_ids.append(key_)
    return bow, embed, o_sents, o_pos1, o_pos2, o_e1, o_e2, o_ids, dict_origin_data
示例#19
0
文件: reu.py 项目: 5nizza/bene
def main(exp:ExpDesc, timed_run_params:TimedRunParams, tool_run_params:ToolRunParams):
    run_stats, tool_rc = timed_run.main(timed_run_params, tool_run_params)

    tool_log_str = readfile(tool_run_params.log_file)

    if tool_rc in (REAL_RC, UNREAL_RC):
        run_result = extract_data(readfile(tool_run_params.output_file) if tool_rc==REAL_RC else None,
                                  tool_log_str,
                                  tool_rc)
    elif tool_rc == TIMEOUT_RC:
        run_result = RunResult(None, None, None, TIMEOUT_STR, None)
    else:
        run_result = RunResult(None, None, None, FAIL_STR, None)

    adhoc_data = extract_adhoc_data(tool_log_str, adhoc_fields)

    run_result.total_time_sec = run_stats.wall_time_sec
    run_result.memory_mb = run_stats.virt_mem_mb
    upload_run(exp, timed_run_params, tool_run_params, run_result, adhoc_data)
示例#20
0
def get_capt_hints(fname):
    hints = dict()
    content = utils.readfile(fname)
    for line in content:
        if not line:
            continue

        fields = line.split('|')
        hints[fields[0]] = fields[1]
    return hints
示例#21
0
def get_capt_labels(fname):
    labels = dict()
    content = utils.readfile(fname)
    for line in content:
        if not line:
            continue
        fields = line.split('|')
        labels[fields[0]] = fields[1]


    return labels
def create_aux_files(dsl, dest):
    for aux in dsl.aux_files:
        if not os.path.exists(aux.source):
            continue

        if aux.get('template'):
            f = template(aux.source)
        else:
            f = readfile(aux.source)

        name = aux.name
        setfile('${dest}/${name}', f)
示例#23
0
文件: views.py 项目: shijx12/CKE
def update_p(request, name, iteration):
	if request.method == 'GET' and request.GET['tp'] and request.GET['i'] and request.GET['delta']:
		tp = request.GET['tp']
		i = int(request.GET['i'])
		delta = float(request.GET['delta'])
		iteration_path = os.path.join(config.CATEGORY_BASEDIR, name, iteration)
		if os.path.isfile(iteration_path) and can_update(name, iteration):
			instances, patterns = utils.readfile(iteration_path)
			utils.ppg(tp, i, delta, instances, patterns)
			utils.write2file(iteration_path, instances, patterns)
			return HttpResponse('')
	return HttpResponseNotFound('iteration %s/%s not exist!' % (name, iteration))
def create_aux_files(dsl, dest):
    for aux in dsl.aux_files:
        if not os.path.exists(aux.source):
            continue

        if aux.get('template'):
            f = template(aux.source)
        else:
            f = readfile(aux.source)

        name = aux.name
        setfile('${dest}/${name}', f)
示例#25
0
def get_capt_set(fname):
    capt_sets = []
    content = utils.readfile(fname)
    for line in content:
        if not line:
            continue
        fields = line.split('|')

        setformat = fields[0].split('_')[:3]
        capt_sets.append("_".join(setformat))

    return capt_sets
示例#26
0
文件: views.py 项目: shijx12/CKE
def show_iteration_content(request, name, iteration):
    iteration_path = os.path.join(config.CATEGORY_BASEDIR, name, iteration)
    if os.path.isfile(iteration_path):
        instances, patterns = utils.readfile(iteration_path)
        context = {
            'instances': instances,
            'patterns': patterns,
            'can_update': can_update(name, iteration)
        }
        return render(request, 'category/iteration_content.html', context)
    else:
        return HttpResponseNotFound('iteration %s/%s not exist!' %
                                    (name, iteration))
示例#27
0
文件: views.py 项目: shijx12/CKE
def update_p(request, name, iteration):
    if request.method == 'GET' and request.GET['tp'] and request.GET[
            'i'] and request.GET['delta']:
        tp = request.GET['tp']
        i = int(request.GET['i'])
        delta = float(request.GET['delta'])
        iteration_path = os.path.join(config.CATEGORY_BASEDIR, name, iteration)
        if os.path.isfile(iteration_path) and can_update(name, iteration):
            instances, patterns = utils.readfile(iteration_path)
            utils.ppg(tp, i, delta, instances, patterns)
            utils.write2file(iteration_path, instances, patterns)
            return HttpResponse('')
    return HttpResponseNotFound('iteration %s/%s not exist!' %
                                (name, iteration))
示例#28
0
def getRawData(token):
    """获取原始文件
    
    Arguments:
    - `token`:
    """
    fpath = BASE + token + SUFFIX
    if os.path.exists(fpath):
        data = utils.readfile( fpath )
    else:
        data = ''


    data = re.sub( '\/\*[\s\S]*?\*\/' , '' , data )
    return data
示例#29
0
def get_all_features(fname):
    features = []
    content = utils.readfile(fname)
    for line in content:
        fields = line.split(',')
        # remove captchaset name
        fields = fields[1:]
        for field in fields:
            if not field:
                continue
            # add to features
            if field not in features:
                features.append(field)

    return sorted(features)
示例#30
0
def main(args):

    # parse argument
    labels = args.labels
    servtags = args.servtags
    showname = False

    feature_dict = dict()

    # get all possible features
    features = get_all_features(servtags)
    for feat in sorted(features):
        feature_dict[feat] = 0

    # get set label
    label_dict = get_capt_labels(labels)

    # construct bitmap
    content = utils.readfile(servtags)

    # print header
    generate_header(showname, feature_dict, label_dict)

    for line in content:
        if not line:
            continue
        fields = line.split(',')
        set_name = fields[0]
        tags = fields[1:]

        if set_name not in label_dict.keys():
            continue

        bitmap = get_feature_bitmap(copy.deepcopy(feature_dict), tags)
        try:
            if showname:
                sys.stdout.write(set_name + ",")
                sys.stdout.write(",")
            #print bitmap
            for bitm in bitmap:
                sys.stdout.write(str(bitm))
                sys.stdout.write(",")

            sys.stdout.write(label_dict[set_name])
            print
        except Exception as err:
            pass
示例#31
0
    def build_js(self,src,dst,base_dir):
        '''
        handle one js src to dst

        合并、替换、加时间戳并按需压缩。
        '''
        js = os.path.relpath(src,base_dir)
        subprocess.call( 'node ' + RJS_PATH +' -o name=' + js[0:-3] + ' out='+ dst + ' optimize=none baseUrl='\
            + base_dir , shell = True)
        #repalce
        content = utils.readfile(dst)
        content = replace(content,self._target)
        utils.writefile(dst,content)
        if C('js_ascii_only'):
            subprocess.call( 'node ' + RPL_PATH +' '+dst+' '+dst,shell = True)
        if self._compress:
            subprocess.call( 'java -jar ' + YC_PATH + ' --type js --charset ' + C('encoding') + ' ' + dst + ' -o ' + dst , shell = True )
示例#32
0
 def _replace(self):
     '''
     替换所有文本的变量
     '''
     files = utils.FileSearcher(r'.+',self._build_dir).search()
     for f in files:
         f = os.path.join(self._build_dir,f)
         if not utils.isBinary(f):
             try:
                 content = utils.readfile(f)
                 content = replace(content,self._target)
                 utils.writefile(f,content)
             except Exception,e:
                 if self._force:
                     log.error('[replace][%s]%s'%(f,e))
                 else:
                     e
示例#33
0
文件: app.py 项目: zatol/hls-allinone
def play(key):
  real = os.path.splitext(key)[0]

  try:
    if key[-4:] == '.key':
      meta = readkey(real)
      r = Response(binascii.unhexlify(meta['key']), mimetype='application/octet-stream')
      r.headers.add('Access-Control-Allow-Origin', '*')
      return r

    meta = readfile(real)
    if key[-5:] == '.m3u8':
      r = Response(meta['raw'], mimetype='application/vnd.apple.mpegurl')
      r.headers.add('Access-Control-Allow-Origin', '*')
      return r

    return render_template('play.html', meta=meta)
  except:
    return jsonify({'err': 1, 'message': 'File does not exist'})
示例#34
0
def compileCss(filepath):
    if not os.path.exists(filepath):
        return False
    css = utils.readfile(filepath)

    #@todo:正则有问题,识别url("data:image/png,base64...")之类带引号的有[email protected]
    IMG_TOKEN = 'url\([\'"]?(?!data:image|about:blank|http|https)(.*?)[\'"]?\)'  #[email protected]:忽略base64图片和about:blank
    iters = re.finditer(IMG_TOKEN, css)
    for i in reversed(list(iters)):
        imgpath = i.group(1)
        imgpath = compileCommon(imgpath, 'local', True)  #内部可能有替换的变量
        if not conf.getConfig().get('disableAutoTimestamp'
                                    ):  #[email protected]:为什么http开头的不给加时间戳
            css = css[0:i.end(0) - 1] + '?t=' + getFileTimeStamp(
                imgpath,
                filepath) + css[i.end(0) -
                                1:]  #[email protected]:已经带?的做过识别么?

    return css
示例#35
0
文件: uparser.py 项目: sogou-ufo/ursa
def compileCss(filepath):
    if not os.path.exists(filepath):
        return False
    css = utils.readfile(filepath)

    # @todo:正则有问题,识别url("data:image/png,base64...")之类带引号的有[email protected]
    IMG_TOKEN = (
        "url\(['\"]?(?!data:image|about:blank|http|https)(.*?)['\"]?\)"
    )  # [email protected]:忽略base64图片和about:blank
    iters = re.finditer(IMG_TOKEN, css)
    for i in reversed(list(iters)):
        imgpath = i.group(1)
        imgpath = compileCommon(imgpath, "local", True)  # 内部可能有替换的变量
        if not conf.getConfig().get("disableAutoTimestamp"):  # [email protected]:为什么http开头的不给加时间戳
            css = (
                css[0 : i.end(0) - 1] + "?t=" + getFileTimeStamp(imgpath, filepath) + css[i.end(0) - 1 :]
            )  # [email protected]:已经带?的做过识别么?

    return css
示例#36
0
 def getData(self,including_deps = True):
     data = {}
     if C('disable_deps_search') or not including_deps:
         deps = [self.__token+'.'+C('template_ext')]
     else:
         #复制
         deps = self.__deps[0:]
         deps.reverse()
     deps.insert(len(deps),self.__token+".json")
     deps.insert(0,"_ursa.json")
     for dep in deps:
         try:
             json_filepath = utils.abspath(os.path.join(C('data_dir'),re.sub(r'\.%s$'%C('template_ext'),'.json',dep)))
             content = utils.readfile(json_filepath)
             content = re.sub('\/\*[\s\S]*?\*\/','',content)
             json_data = json.loads(content)
             data.update(json_data)
         except Exception, e:
             e#log.warn('[getdata]%s:%s'%(json_filepath,e))
示例#37
0
def getFileTimeStamp(fpath, parentpath=''):
    """为文件加上时间戳并返回
    
    Arguments:
    - `fpath`:
    """
    if fpath.find('/') == 0:
        fpath = fpath[1:]
    fpath2 = os.path.join(conf.getConfig()['path'], 'build', fpath)
    if not os.path.exists(fpath2) and parentpath:
        parentpath = parentpath.split('/')
        parentpath.pop()
        fpath2 = '/'.join(parentpath) + '/' + fpath

    if os.path.exists(fpath2):
        f = utils.readfile(fpath2, 'rb')
        m = hashlib.md5()
        m.update(f)
        md5 = utils.md5toInt(m.hexdigest())
        return md5
    return ''
示例#38
0
文件: uparser.py 项目: sogou-ufo/ursa
def getFileTimeStamp(fpath, parentpath=""):
    """为文件加上时间戳并返回
    
    Arguments:
    - `fpath`:
    """
    if fpath.find("/") == 0:
        fpath = fpath[1:]
    fpath2 = os.path.join(conf.getConfig()["path"], "build", fpath)
    if not os.path.exists(fpath2) and parentpath:
        parentpath = parentpath.split("/")
        parentpath.pop()
        fpath2 = "/".join(parentpath) + "/" + fpath

    if os.path.exists(fpath2):
        f = utils.readfile(fpath2, "rb")
        m = hashlib.md5()
        m.update(f)
        md5 = utils.md5toInt(m.hexdigest())
        return md5
    return ""
示例#39
0
文件: deps.py 项目: yanni4night/ursa2
    def _search(self,tpl):
        '''
        递归搜索
        '''
        try:
            abspath = utils.abspath(os.path.join(C('template_dir'),tpl))

            content = utils.readfile(abspath)
            iters = re.finditer(self._pattern,content)

            for i in reversed(list(iters)):
                tpl = utils.filterRelPath(i.group(3))
                if C('ignore_parents') and tpl.endswith('parent.'+C('template_ext')):
                    continue
                if self._history.get(tpl) is None:
                    self._result.append(tpl)
                    self._history[tpl] = 1
                    if 'include' == i.group(1):
                        self._include_result.append(tpl)
                    self._search(tpl)
        except Exception, e:
            log.error('[deps]%s'%e)
def save(data, filename, seperator1, seperator2, sd):
    publickey = ""
    privatekey = ""
    values = {}
    if not os.path.exists(filename):
        f = open(filename, "w+")
    with open(filename) as f:
        file_data = f.read()
    if len(file_data) > 0:
        privatekey, publickey, values = readfile(file_data, seperator1,
                                                 seperator2)
    else:
        privatekey, publickey = rsakeys()
    values[sv] = sd
    values[data["username"] + data["web"] + data["type"]] = data
    result = ""
    for i in values:
        res = json.dumps(values[i])
        res = encrypt(publickey, res.encode()).decode("utf-8")
        result += str(len(str(len(res)))) + str(len(res))
        result += res
    private_string = privatekey.exportKey("PEM").decode("utf-8")
    public_string = publickey.exportKey("PEM").decode("utf-8")
    full_string = private_string + result + public_string
    full_string = list(full_string)
    r1 = len(private_string)
    r2 = len(public_string)
    n = len(full_string)
    full_string.insert(seperator1 % n, r1)
    full_string.insert(seperator2 % n, r2)
    full_string.insert(0, len(str(r1)))
    full_string.insert(len(full_string), len(str(r2)))
    full_string = ''.join(map(str, full_string))
    full = ','.join(format(ord(x), 'b') for x in full_string).split(",")
    n = len(full)
    file1 = open(filename, "w")
    full = swaper(0, n, full, 1, seperator1, seperator2)
    file1.write(','.join(map(str, full)))
示例#41
0
def minimize(filename, station_dict):
	minimum_prec, minimum = 15000000000, 10000000000
	trolleybuses = utils.readfile(filename)
	T_prime, alpha = {}, {}
	troleibus =[]

	while(minimum_prec > minimum):
		troleibus=[]
		for key in trolleybuses.keys():
			T_prime[key] = utils.add_one(trolleybuses, trolleybuses[key])
		for key in T_prime.keys():	
			T_prime[key].sort()
			alpha[key] = utils.calc_sum(T_prime[key])

		minimum_key = min(alpha, key=alpha.get)
		minimum_prec = minimum
		minimum = alpha[minimum_key]

		for key in station_dict:
			for bus in station_dict[key]:
				if(minimum_key == bus):
					station_dict[key][bus] = [x+1 for x in station_dict[key][bus]]

	return station_dict
示例#42
0
    def __init__(self,
                 exp_desc:ExpDesc,
                 timed_run_params:TimedRunParams, tool_run_params:ToolRunParams,
                 run_result:RunResult,
                 *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.total_time_sec = run_result.total_time_sec
        self.circuit_size = run_result.circuit_size
        self.memory_mb = run_result.memory_mb
        self.is_realizable = run_result.is_realizable
        self.model = run_result.model

        self.input_file = tool_run_params.input_file
        self.logs = readfile(tool_run_params.log_file) if tool_run_params.log_file else ''
        self.tool_params = tool_run_params.params

        self.exp = exp_desc.exp_name
        self.commit = exp_desc.commit
        self.hardware = exp_desc.hardware
        self.datetime = exp_desc.datetime
        self.note = exp_desc.note

        self.time_limit_sec = timed_run_params.time_limit_sec
        self.memory_limit_mb = timed_run_params.memory_limit_mb
示例#43
0
from jaeschke import t_2
from utils import readfile

bases = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53]

primes = readfile("primes/primes_1m.txt")

if __name__ == "__main__":
    print(3, 10)
    t_2(bases[:3], 10**10, primes)
示例#44
0
source_dataset = ImageFolder('real_or_drawing/train_data',
                             transform=source_transform)  # (32, 32, 3)
target_dataset = ImageFolder('real_or_drawing/test_data',
                             transform=target_transform)
test_dataset = ImageFolder('real_or_drawing/test_data',
                           transform=test_transform)

source_dataloader = DataLoader(source_dataset, batch_size=32, shuffle=True)
target_dataloader = DataLoader(target_dataset, batch_size=32, shuffle=True)
test_dataloader = DataLoader(test_dataset, batch_size=128, shuffle=False)
device = torch.device(
    f'cuda:{args.cuda}' if torch.cuda.is_available() else 'cpu')
arch = f'arch/{args.arch}'

if args.do_preprocess:
    target_datas, target_labels = readfile('real_or_drawing/test_data/0')
    print('[*] Saving datas...')
    with open('preprocessed/target_datas.pkl', 'wb') as f:
        pickle.dump(target_datas, f)

if args.do_train:
    trainer = Trainer(arch, device)
    for epoch in range(args.max_epoch):
        trainer.run_epoch(epoch,
                          source_dataloader,
                          target_dataloader,
                          lamb=0.1)

    accept_indice, sudo_labels, total_labels = get_sudo_label(
        arch, device, test_dataloader, trainer.feature_extractor,
        trainer.label_predictor)
示例#45
0
from cnn import VGG13, VGG16, VGG19
from mobileNet import StudentNet
from utils import readfile, set_seed
from dataset import ImgDataset
from train import training, deep_mutual_learning
import torch.nn.functional as F
import torchvision.models as models

# Set Random seed
SEED = 0
set_seed(SEED)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
workspace_dir = sys.argv[1]

print("Reading data")
train_x, train_y, train_label_list = readfile(
    os.path.join(workspace_dir, "training"), True)
#print("Size of training data = {}".format(len(train_x)))
val_x, val_y, val_label_list = readfile(
    os.path.join(workspace_dir, "validation"), True)
#print("Size of validation data = {}".format(len(val_x)))

now_train_count = 0
now_val_count = 0
#print(train_label_list)
for i in range(11):
    if i == 0:
        train_X = np.concatenate(
            (train_x[:int(train_label_list[i] * 0.8), :, :, :],
             val_x[:val_label_list[i], :, :, :]))
        val_X = train_x[int(train_label_list[i] *
                            0.8):train_label_list[i], :, :, :]
示例#46
0
def uploadfile(username, agencydata, comments, md5sum, sizeoffile, bounds):
    ## todo: cache
    if model.Message.all().filter('md5sum =', md5sum).count() > 0:
        raise UploadError('This file has previously been uploaded')
    ## todo: cache
    if model.SkipMd5.all().filter('md5sum =', md5sum).count() > 0:
        raise UploadError('This file has previously been uploaded')

    raw_agencies = utils.readfile(agencydata)
    if not raw_agencies:
        raise UploadError(
            "zip file did not contain any valid agencies in agency.txt.")

    ## save our msg
    new_message = model.Message(user=username, content=comments)
    new_message.hasFile = True
    memcache.delete('Message.recent')
    # new_message.filename = filename
    new_message.md5sum = md5sum
    new_message.size = sizeoffile
    new_message.max_lat = None
    new_message.max_lng = None
    new_message.min_lat = None
    new_message.min_lng = None

    if bounds:
        bounds_list = bounds.split("|")
        try:
            new_message.max_lat = float(bounds_list[0])
            new_message.max_lng = float(bounds_list[1])
            new_message.min_lat = float(bounds_list[2])
            new_message.min_lng = float(bounds_list[3])
        except ValueError:
            logging.error('failed to set bounds from %s' % bounds)

    new_message.put()

    d = datetime.datetime.now()
    datestr = d.strftime('%Y%m%d_%H%M')
    seen_agencies = []
    for ag in raw_agencies:
        ## get from the db
        ## lookup by url first

        a = None
        if ag.get('agency_url', '').strip():
            ## try to get via url first as it's more unique
            url = ag['agency_url'].strip()
            try:
                # TODO: use urlnorm
                url_parsed = urlparse.urlparse(url)
                if not url_parsed.path:
                    url += '/'
            except:
                logging.exception('unable to parse url')

            a = model.Agency.all().filter('url =', url).get()
        if not a:
            slug = model.slugify(ag['agency_name'].strip())
            s = utils.lookup_agency_alias(slug)
            if s:
                slug = s
            a = memcache.get('Agency.slug.%s' % slug)
            if not a:
                a = model.Agency.all().filter('slug =', slug).get()
        if a:
            a.messagecount += 1
            a.lastupdate = datetime.datetime.now()
            a.put()
            memcache.set('Agency.slug.%s' % a.slug, a)
        if not a:
            a = model.Agency()
            a.name = ag['agency_name'].strip()
            a.url = ag.get('agency_url', '')
            a.messagecount = 1
            a.put()
            memcache.delete('Agency.recent')
            utils.incrAgencyCount()

        if len(raw_agencies) == 1:
            new_message.filename = '%s_%s.zip' % (a.slug, datestr)
            new_message.put()

        # some zip files have the same url several times; only capture the first time that url is used
        if a in seen_agencies:
            continue
        seen_agencies.append(a)

        ma = model.MessageAgency()
        ma.agency = a
        ma.message = new_message
        ma.hasFile = True
        ma.put()
        memcache.delete('Agency.all')  # because it has the cached last-update

    if not new_message.filename:
        new_message.filename = '%s_%s.zip' % (username.nickname(), datestr)
        new_message.put()

    # TODO: can we even hit this, since upload should only be called at a rate of once a minute anyway?
    recentFiles = model.Message.all().filter('hasFile =', True).filter(
        'date >=', d.replace(second=0, microsecond=0)).count()
    if recentFiles > 1:  # note we already saved *this* filename
        new_message.filename = new_message.filename.replace(
            '.zip', '_%d.zip' % recentFiles)
        new_message.put()

    ## send email to user ?

    return new_message.filename
示例#47
0
#!/usr/bin/env python
from utils import readfile
import matplotlib.pyplot as plt
import numpy as np
import sys

if __name__ == "__main__":
    x = readfile(sys.argv[1])
    nbins = min(np.size(x), 100)
    plt.figure()
    plt.subplot(211)
    plt.hist(x.flatten(), nbins)
    plt.title("Value Histogram")
    plt.xlabel("Value")
    plt.ylabel("Frequency")
    plt.subplot(212)
    ind1d = np.argsort(-x, axis=None)[:2000]
    ind2d = np.unravel_index(ind1d, x.shape)
    plt.plot(ind2d[1], ind2d[0], 'o')
    plt.gca().invert_yaxis()
    plt.show()
示例#48
0
def main(args):

    # parse argument
    labels = args.labels
    servtags = args.servtags
    hints = args.hints

    # make output folder
    OUTDIR = "test"
    if not os.path.exists(OUTDIR):
        os.makedirs(OUTDIR)

    # get all possible features
    feature_dict = dict()
    features = get_all_features(servtags)
    for feat in sorted(features):
        feature_dict[feat] = 0

    # get captcha set
    capt_sets = get_capt_set(labels)

    # get set label
    label_dict = get_capt_labels(labels)

    # construct bitmap
    content = utils.readfile(servtags)

    # constuct hint dict
    hints = get_capt_hints(hints)

    servtags = dict()
    for line in content:
        if not line:
            continue
        fields = line.split(',')
        set_name = fields[0]
        tags = fields[1:]
        servtags[set_name] = tags


    for cset in capt_sets:
        filept = open(os.path.join(OUTDIR,
                                   cset + ".arff"),
                      'w')
        hint = hints[cset]
        generate_header(cset, hint, feature_dict, filept)
        for i in range(10):
            bitmap = get_feature_bitmap(copy.deepcopy(feature_dict),
                                        servtags[cset + "_" + str(i)])
            try:
                bmstring = ",".join( str(bm) for bm in bitmap)
                if hint == label_dict[cset + "_" + str(i)]:
                    bmstring += ",1"
                else:
                    bmstring += ",0"

                bmstring += "\n"
                filept.write(bmstring)

            except Exception as err:
                print err


        filept.close()
示例#49
0
        objective.backward() # derive gradient
        optimizer.step() # update parameters
    print("raw maximized layer_activations imgs:",x.shape,"select:",0,"in",x.shape[0])
    filter_visualization = x.detach().cpu().squeeze()[0]
    hook_handle.remove() # reminber to rm it, or it exsits forever in every time forwarding
    return filter_activations, filter_visualization

if __name__ == "__main__":
    workspace_dir = sys.argv[1] #'/home/shannon/Downloads/food-11'
    model_filename = sys.argv[2]
    output_dir = sys.argv[3]
    cnnids = [7,14,14,14,24]
    filterids = [0,0,1,2,0]

    print("Reading data")
    train_x, train_y = readfile(os.path.join(workspace_dir, "training"), True)
    print("Size of training data = {}".format(len(train_x)))
    train_set = ImgDataset(train_x, train_y, test_transform)

    print("Loading model")
    model = Classifier().cuda()
    model.load_state_dict(torch.load(model_filename))

    # showing filters from assigned indices image 
    img_indices = [800,1602,2001,3201,4001,4800,5600,7000,7400,8003,8801]
    images, labels = train_set.getbatch(img_indices)

    for i, (cnnid,filterid) in enumerate(zip(cnnids,filterids)):
        filter_activations, filter_visualization = filter_explaination(images, model, cnnid=cnnid, filterid=filterid, iteration=100, lr=0.1)
        print(images.shape)
        print(filter_activations.shape)
示例#50
0
    menu = Menu()
    menu.title = 'IT'
    menu.page_id = it_page.id  # NOTE: As it refer to gaming_page.id, we need to commit gaming_page first
    menu.order = 1
    db.session.add(menu)
    db.session.commit()

    # All initial posts
    post = Page()
    post.title = 'Doom VFR datang!'
    post.excerpt = """
    Pertama kali saya menguji headset VR modern, saya bermain Doom.Demo tahun 2013 PAX West saya datang dari eksekutif Oculus Brendan Iribe, yang memasang headset VR yang terpasang di dompet, sebelum menyalakan versi modifikasi Doom 3. Hampir seketika, saya memuji perendaman itu. Aku melepaskan dan menumbuhkan kemampuanku untuk segera mengalihkan kepalaku ke barisan tembakan setan. Saya menghargai trik pencahayaan dan perspektif yang digunakan untuk menyampaikan betapa banyak kekacauan yang terjadi di sekitarku. Tidak ada yang seperti saat itu.
    """
    post.content = readfile(
        os.path.join(os.path.dirname(os.path.realpath(__file__)),
                     'doomfvr.txt'))
    post.category = 'gaming'
    post.tag = 'doom,vr,feature'
    db.session.add(post)

    post = Page()
    post.title = 'Bug memalukan pada OSX Sierra'
    post.excerpt = """
    Kekacauan yang luar biasa!
    """
    post.content = \
        '''
        <h1>Siapa yang salah ini?</h1>
        <p>Masa' bisa begitu saja error?</p>    
        '''
示例#51
0
from numpy.linalg import inv

from utils import readfile


class LinearRegression:
    def __init__(self):
        self.w = None

    def fit(self, x, y):
        X = np.matrix(np.insert(x, 0, [1], axis=1))
        Y = np.matrix(y)
        W = inv(X.transpose() * X) * X.transpose() * Y.transpose()
        self.w = np.asarray(W).reshape(-1)

    def predict(self, x):
        return -1 if np.dot(np.insert(x, 0, [1]), self.w) < 0 else 1


if __name__ == '__main__':
    train = readfile('train.dat')
    reg = LinearRegression()
    reg.fit(*train)

    test = readfile('test.dat')
    errors = 0
    for i in range(len(test[0])):
        if reg.predict(test[0][i]) * test[1][i] < 0:
            errors += 1
    print(errors / len(test[0]))
示例#52
0
    processor_valid = True
else:
    gpu_rg = r'cuda\:(\d{1,2})'
    m = re.search(gpu_rg, p, flags=re.I)
    if m:
        gpu_num = int(m.group(1))
        if gpu_num <= device_count() and gpu_num > 0:
            processor_valid = True

if processor_valid is False:
    exit("Processor type is invalid - only 'cuda' and 'cpu' are valid device types. Only up to cuda:%d are valid" % device_count())

print("Using {}.".format(args.processor))

print("Loading target language from {} and source language from {}.".format(args.targetfile, args.sourcefile))
target, source = readfile(args.targetfile, args.sourcefile)

print("Loading model from {}.".format(args.modelfile))
pre_trained_model = load_gensim_model(args.modelfile)

print("Removing words not found in the model.")
target_data, source_data = remove_words(target, source, pre_trained_model)

print("Splitting data into training and testing sets, {}/{}.".format(round(100-(test_size*100)), round(test_size*100)))
target_train, target_test, source_train, source_test = split_data(target_data, source_data, test_size)

if b >= len(target_train):
    exit("Error: training batch must be lower than training data.")


# train trigram model
示例#53
0
文件: main.py 项目: r9d9/Predictive
# coding: utf8
import pandas as pd
import utils

loaded_df = utils.readfile("data/", "housing.csv")
print(loaded_df.head())
loaded_df2 = utils.readfile(
    "C:/Users/Hanna/Documents/DSH/GitHub/Predictive/temperature/",
    "monthly_json.json")
print(loaded_df2.head())

if __name__ == '__main__':  # bei Direktaufruf soll das ausgeführt werden, nicht beim Import!
    print('Hello')
from utils import readpretrain
from tag import Tag
#from mask import Mask

trn_file = "train.input"
dev_file = "dev.input"
tst_file = "test.input"
pretrain_file = "sskip.100.vectors"
tag_info_file = "tag.info"
#trn_file = "train.input.part"
#dev_file = "dev.input.part"
#tst_file = "test.input.part"
#pretrain_file = "sskip.100.vectors.part"
UNK = "<UNK>"

trn_data = readfile(trn_file)
word_to_ix = {UNK: 0}
lemma_to_ix = {UNK: 0}
ix_to_lemma = [UNK]
for sentence, _, lemmas, tags in trn_data:
    for word in sentence:
        if word not in word_to_ix:
            word_to_ix[word] = len(word_to_ix)
    for lemma in lemmas:
        if lemma not in lemma_to_ix:
            lemma_to_ix[lemma] = len(lemma_to_ix)
            ix_to_lemma.append(lemma)
#############################################
## tags
tags_info = Tag(tag_info_file, ix_to_lemma)
SOS = tags_info.SOS
    parser.add_option("-i", "--ip",action="store", type="string", dest="ip",help="ip address option")
    parser.add_option("-u", "--url",action="store", type="string", dest="url",help="url option")
    parser.add_option("-j", "--json",action="store", type="string", dest="json",help="config json option")
    parser.add_option("-c", action="store_true", dest="iscreate",help="create option")
    parser.add_option("-m", action="store_true", dest="ismonitor",help="monitor option")
    
    options, args = parser.parse_args()

    logger.info('iscreate=%s' % (options.iscreate))
    logger.info('ismonitor=%s' % (options.ismonitor))
    isUsingLinux = utils.isUsingLinux or utils.isBeingDebugged
    if (isUsingLinux):
	__config__ = utils.SmartObject()
	logger.debug('(+++) options.json=%s' % (options.json))
	if (options.json) and (os.path.exists(options.json)):
	    json = ''.join(utils.readfile(options.json))
	    __config__ = utils.SmartObject(args=simplejson.loads(json))
	    logger.debug('(+++) __config__=%s' % (__config__))
	else:
	    logger.warning('Missing config.json from the -j option so using default services.')

	services = ["CPU Load","Current Users","PING","SSH","Total Processes","Zombie Processes"]
	
	if (len(__config__.keys()) > 1):
	    services = [k for k in __config__.keys() if (k not in ['DISKS','__dict__','PARTITIONS'])]
	    logger.debug('(+++) services=%s' % (services))

	if (options.url):
	    if (options.host):
		if (options.ip):
		    if (options.iscreate):
示例#56
0
def videos(skip):
    try:
        skip = (int(skip) - 1) * 50
    except:
        skip = 0
    return jsonify({'err': 0, 'data': readfile(skip)})
示例#57
0
def mount_packages():
    on_abort(umount_packages)
    jailname = readfile(e('${OBJDIR}/jailname'))
    sh('mkdir -p ${INSTUFS_DESTDIR}/usr/ports/packages')
    sh('mount -t nullfs ${OBJDIR}/ports/packages/${jailname}-p ${INSTUFS_DESTDIR}/usr/ports/packages')
def mount_packages():
    on_abort(umount_packages)
    jailname = readfile(e('${OBJDIR}/jailname'))
    sh('mkdir -p ${INSTUFS_DESTDIR}/usr/ports/packages')
    sh('mount -t nullfs ${OBJDIR}/ports/data/packages/${jailname}-p ${INSTUFS_DESTDIR}/usr/ports/packages'
       )