def parseLog(monkeydir, files):
    '''
    根据log文件地址,写入到error文件中
    :param logcatpath: log文件地址
    :param wirteerrorpath: 写入error的文件地址
    :return:0表示有错误日志,1表示没有错误日志
    '''
    crashnum = 0
    emCrash = ErrorMsg(CRASH, crashnum, '')
    curtime = time.strftime("%Y-%m-%d", time.localtime())
    crashloglist = os.listdir(base_dir + '/' + CrashLog)
    if 'Retired' in files:
        crashfiles = os.listdir(base_dir + '/' + IOSMonkeyLog + '/' +
                                crashLogPath)
        for crfile in crashfiles:
            if (re.findall('TruckManager-%s' % curtime, crfile)):
                crashnum += 1
                emCrash.error_count = crashnum
                emCrash.error_desc = emCrash.error_desc + "发现CRASH,请参考日志" + '<br>'
                logger.log_info("发现CRASH日志,CRASH错误数:%s" % crashnum)
                shutil.copyfile(
                    base_dir + '/' + IOSMonkeyLog + '/' + crashLogPath + '/' +
                    crfile, base_dir + '/' + CrashLog + '/' + crfile)
                crashloglist.append(base_dir + '/' + CrashLog + '/' + crfile)
        return 0, emCrash, crashloglist
    else:
        emCrash.error_count = crashnum
        emCrash.error_desc = emCrash.error_desc + "未发现CRASH" + '<br>'
        logger.log_info("没有发现CRASH日志")
        return 1, emCrash, crashloglist
Beispiel #2
0
def handle_request():
    request_data = request.form
    if 'test_id' not in request_data or 'group_id' not in request_data or 'git_url' not in request_data:
        logger.log_error('malformed post request data.')
        return 'malformed post request data.', 400

    if 'secret' not in request_data or request_data[
            'secret'] != config.REPORT_SECRET_KEY:
        logger.log_error("unauthorized data")
        return 'unauthorized request', 403

    group_id = request_data['group_id']

    if test_and_set_active(group_id):
        logger.log_info(
            'lock acquired for team with group_id {}'.format(group_id))
        test_id = int(request_data['test_id'])
        git_url = request_data['git_url']
        logger.log_info(
            'test id {} was given for team with group_id {}'.format(
                test_id, group_id))
        process_request(git_url=git_url, group_id=group_id, test_id=test_id)
        logger.log_success(
            'test for team with group_id {} initiated successfully'.format(
                group_id))
        return "success - test initiated"
    else:
        logger.log_error(
            'another test for team with group_id {} is in progress'.format(
                group_id))
        return "error - existing test in progress", 406
Beispiel #3
0
    def run_test(self, testcase_dict):
        """ run single testcase.
        @param (dict) testcase_dict
            {
                "name": "testcase description",
                "skip": "skip this test unconditionally",
                "times": 3,
                "requires": [],         # optional, override
                "function_binds": {},   # optional, override
                "variables": [],        # optional, override
                "request": {
                    "scrope": {BOTTOM: 2, LEFT: 1, RIGHT: 1, TOP: 3},
                    "add_cols": [
                        {COL: 1, COL_NAME: 月份, CONTENT: "201712"},
                        {COL: 35, COL_NAME: 口径, CONTENT: "管理"}
                    ]
                },
                sheetname: 项目-${口径},
                源文件: $workdir/${月份}/$名称-$口径-$月份.xls,
                目标文件: $workdir/${月份}/部门损益明细表-整理后-$月份.xls
            }
        @return True or raise exception during test
        """
        parsed_request = self.init_config(testcase_dict, level="testcase")

        try:
            logger.log_info("%s" % (parsed_request))
            self.change_sheet(parsed_request)
        except Exception as e:
            logger.log_error("run error[%s]" % (parsed_request), exc_info=True)
Beispiel #4
0
def build_and_run(gdb_man, logger, loop_offset, loop_code, loop_size):
    output_code(loop_offset, loop_code)

    compiler_bin = arm_platform.compiler_bin_for('gcc')
    build_flags = arm_platform.build_flags_for('gcc')

    to_run = [compiler_bin]
    to_run += build_flags.split()
    to_run += ['source.s', '-o', 'binary']

    try:
        os.remove('binary')
    except OSError:
        # We don't care if the file doesn't already exist
        pass

    logger.log_info("builder", "Executing {}".format(" ".join(to_run)))
    p = subprocess.Popen(to_run, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    make_out, make_err = p.communicate()

    if p.returncode != 0:
        logger.log_warn("builder", "Build failed!")
        logger.log_warn("builder", "{}\n{}".format(make_out.decode("utf-8"), make_err.decode("utf-8")))
        return

    run_id = logger.add_run(loop_offset, loop_size)
    run_obj = run.Run("loop", 'gcc', arm_platform, [], run_id)
    return gdb_man.read_energy('binary', run_obj)
Beispiel #5
0
def gethtml(monkeycmd):
    '''
    获取HTML报告
    :return:0表示获取html正常,1表示获取html失败
    '''
    logger.log_info("start gethtml")

    try:
        error = False
        data = {'monkeycmd': monkeycmd}
        r = requests.post('http://%s:%d/' % (host, port),data=data)
        with open(htmlpath, 'wb+') as f:
            f.write(r.content)

        logger.log_info('performance.html write complete' + '\n' \
              + 'path is: %s' % htmlpath)

    except Exception as e:
        logger.log_error('performance.html write fail' + '\n' + str(e))
        error = True


    r.close()
    stopflask()
    if error:
        return 1
    else:
        return 0
Beispiel #6
0
 def handleCache(self, backlog_lock):
     while 1:
         toRemove = []
         with backlog_lock:
             # print('Cache size: {}'.format(len(self.client_cache)))
             for entry in self.client_backlog:
                 client_id, socket = entry
                 if client_id in self.client_cache:
                     toRemove.append(entry)
                     start = time.time()
                     operation = ''
                     response = self.client_cache.get(client_id)
                     response = encode({
                         "id": client_id,
                         "response": response
                     })
                     socket.sendall(response)
                     self.client_cache.pop(client_id)
                     operation = '[CLIENT_POLL] send'
                     end = time.time()
                     timestamp = datetime.fromtimestamp(
                         end - start).strftime('%M:%S:%f')
                     logger.log_info(
                         '[TIME] MIX LISTENER {} TOOK {}'.format(
                             operation, timestamp))
             for entry in toRemove:
                 self.client_backlog.remove(entry)
         time.sleep(0.05)
Beispiel #7
0
def get_fixes_stats():
    no_deletes = 0
    total = 0
    total_commits = 0
    no_deletes_commits = 0
    for repo in get_repos():
        file = path.join('data', repo, 'test', 'test_buggy_fix_diffs.json')
        differences = load_data(file)
        for commit in differences.values():
            total_commits += 1
            nd = True  # no delete
            for files in commit.values():
                for file in files:
                    if len(file['deleted']) == 0:
                        no_deletes += 1
                    total += 1
                    if len(file['deleted']) > 0:
                        nd = False
                        continue
            if nd:
                no_deletes_commits += 1
        log_info(
            repo,
            'Number of fixed files with no deleted lines is {0[0]} out of {0[1]}',
            (no_deletes, total))
        log_info(
            repo,
            'Number of fixes with no deleted lines is {0[0]} out of {0[1]}',
            (no_deletes_commits, total_commits))
Beispiel #8
0
def handle_request():
    request_data = request.form
    if 'ip' not in request_data or 'group_id' not in request_data or 'port' not in request_data:
        logger.log_error('malformed post request data.')
        return 'malformed post request data.', 400

    group_id = request_data['group_id']

    if test_and_set_active(group_id):
        logger.log_info('lock acquired for team "{}" with group_id {}'.format(
            team_names[int(group_id)], group_id))
        ip = 'http://{}:{}'.format(request_data['ip'], request_data['port'])
        test_order = None
        if 'test_order' in request_data:
            test_order = literal_eval(request_data['test_order'])
            logger.log_info(
                'custom test order {} was given for team "{}" with group_id {}'
                .format(test_order, team_names[int(group_id)], group_id))

            if type(test_order) == int:
                test_order = [test_order]

        process_request(ip, group_id, test_order)
        logger.log_success(
            'test for team "{}" with group_id {} initiated successfully'.
            format(team_names[int(group_id)], group_id))
        return "success - test initiated"
    else:
        logger.log_error(
            'another test for team "{}" with group_id {} is in progress'.
            format(team_names[int(group_id)], group_id))
        return "error - existing test in progress", 406
Beispiel #9
0
 def handle_PIR(self, decrypted_msg, client_pk):
     time_queued = time.perf_counter() - self.t_accepted
     log_info(">>>>> TIME QUEUED: {}".format(time_queued))
     t1 = time.perf_counter()
     print("TRYING TO FETCH")
     answer = self.dbnode.fetch_answer(decrypted_msg)
     print("ANSWER:", answer)
     reply = encode(answer)
     encrypted_reply = encode(self.dbnode.encrypt(reply, client_pk))
     nymtuple = decrypted_msg['nymtuple']
     first_node = decode(nymtuple[0])
     header, delta = package_surb(getGlobalSphinxParams(), nymtuple,
                                  encrypted_reply)
     self.dbnode.get_mixnode_list()
     json_data, dest = RequestCreator().post_msg_to_mix(
         {
             'ip': first_node[1],
             'port': self.mixport
         }, {
             'header': header,
             'delta': delta
         })
     t2 = time.perf_counter()
     elapsed_time = (t2 - t1)
     log_info("TIME ELAPSED: {}".format(elapsed_time))
     self.network_sender.send_data(json_data, dest)
Beispiel #10
0
 def get_df_base(self):
     logger.log_info("Getting Base DF.")
     date_range = pd.date_range(start=config.DF_BASE_START_DATE,
                                end=config.DF_BASE_END_DATE,
                                freq=config.DF_BASE_FREQUENCY)
     df_base = pd.DataFrame(index=date_range)
     return df_base
Beispiel #11
0
 def get_tf_resampled_single_currency_raw_data_with_base(
         self, currency_dir):
     logger.log_info(
         "Resampling the time frame of data from {}.".format(currency_dir))
     df_raw_base = self.get_single_currency_raw_data_with_base(currency_dir)
     df_resample = tf_resampler.resample(df_raw_base)
     return df_resample
Beispiel #12
0
    def installapp(self, apkpackagename, apkpath):
        '''
        安装app
        :param path: apk路径
        :return: 0表示成功,1表示失败
        '''
        try:
            if self.inspectapp(apkpackagename) == 0:
                #logger.log_info('app已经存在,准备卸载')
                cmd1 = 'adb -s %s uninstall %s' % (self.dev, apkpackagename)
                logger.log_debug(cmd1)
                os.system(cmd1)
                logger.log_info('卸载完成,重新安装')
                cmd2 = 'adb -s %s install %s' % (self.dev, apkpath)
                logger.log_debug(cmd2)
                os.system(cmd2)
                time.sleep(3)

                if self.inspectapp(apkpackagename) == 0:
                    return 0
                else:
                    return 1
            else:
                #logger.log_info('app不存在,准备安装')
                cmd3 = 'adb -s %s install %s' % (self.dev, apkpath)
                logger.log_debug(cmd3)
                os.system(cmd3)
                time.sleep(3)
                if self.inspectapp(apkpackagename) == 0:
                    return 0
                else:
                    return 1
        except Exception, e:
            logger.log_error('安装%s失败' % apkpackagename + '\n' + '异常原因:%s' % e)
            return 1
Beispiel #13
0
    def runmonkey(self,seed, packagename, throttle, runtime,monkeylog, errorlog):
        '''
        执行Monkey
        :return:
        '''

        if not os.path.exists(self.db.monkeyfolder):
            os.mkdir(self.db.monkeyfolder)
        cmd = 'adb -s %s shell monkey ' \
              '-s %d ' \
              '-p %s ' \
              '--hprof ' \
              '--throttle %d ' \
              '--ignore-crashes ' \ 
              '--ignore-timeouts ' \
              '--ignore-security-exceptions ' \
              '--ignore-native-crashes ' \
              '--monitor-native-crashes ' \
              '--pct-syskeys 10 ' \
              '-v -v -v %d  2>%s 1>%s' % \
              (self.dev,int(seed), packagename, int(throttle),
               self.event, errorlog,monkeylog)

        logger.log_info("Monkey命令:%s" % cmd)
        pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout
        return cmd
Beispiel #14
0
def update_project_list_an():

    base_url = "http://www2.assemblee-nationale.fr/recherche/amendements"
    list_dossier_id, list_dossier_name = get_project_list_an(url=base_url)

    for i, dossier in enumerate(list_dossier_id):
        temp_str = str(list_dossier_name[i]).translate({ord(c): " " for c in "\'\""})  # Removes .'. chars from string
        p = re.compile('([A-Za-zéàèîïôêçû ’\-,]*)([: ]*)([ \(a-zA-Zéàèîïôêçû0-9’\-:,°\)]*)')

        if re.search(p, temp_str).group(2) != "":
            categorie = re.search(p, temp_str).group(1)
            projet = re.search(p, temp_str).group(3)
        else:
            categorie = " "
            projet = temp_str

        list_examen_id = []
        list_examen_name = []
        query_url = 'http://www2.assemblee-nationale.fr/recherche/query_amendements?typeDocument=amendement'\
                    + '&idDossierLegislatif=' + dossier \
                    + '&idExamen=&idArticle=&idAlinea=&sort=&numAmend=&idAuteur=&typeRes=facettes'
        try:
            response = requests.get(query_url)
            data = response.json()
            for exam_t in data['examenComposite']:
                list_examen_name.append(exam_t['txt'])
                list_examen_id.append(exam_t['val'])

            for j, examen in enumerate(list_examen_id):
                get_or_create(session, TableProjetsAN, n_dossier=list_dossier_id[i], cat_dossier=categorie,
                                         nom_dossier=projet, n_examen=list_examen_id[j], nom_examen=list_examen_name[j])
        except:
            log_info('Erreur sur le dossier n {}'.format(dossier))
Beispiel #15
0
def report_results(errors, project, name):
    good = name + ' -- OK'
    bad = name + ' -- Errors: {0[0]}'
    if len(errors) == 0:
        log_info(project, good, None)
    else:
        log_info(project, bad, errors)
Beispiel #16
0
def analyze_group_exts():
    """Get entropy by extension for a group and find the min.
    It's an alternative method to the average extension entropy values."""
    days = {'good': load_data('data/days_good.json'), 'bad': load_data('data/days_bad.json')}
    entropies = {'good': dict(), 'bad': dict()}
    for g, data in days.items():
        for dev, dates in data.items():
            uniques = get_unique_extensions(dates)
            for ext, ds in uniques.items():
                if entropies[g].get(ext) is None:
                    entropies[g].update({ext: list(ds)})
                else:
                    entropies[g][ext].extend(list(ds))
    for group, d in entropies.items():
        for extension, dys in d.items():
            e = 0
            for day in dys:
                p_x = float(day[1].count(extension)) / len(day[1])
                if p_x > 0:
                    e -= p_x * math.log(p_x, 2)
            entropies[group][extension] = e
    for gp, da in entropies.items():
        min = list()
        for et, x in da.items():
            if x == 0:
                min.append(et)
        log_info(None, 'Extension entropy mins for {0[0]} are: {0[1]}', (gp, min))
Beispiel #17
0
 def __init__(self, token, group_id):
     log_info(config.ver_msg)
     self.vk_session = vk_api.VkApi(token=token)
     self.longpoll = VkBotLongPoll(self.vk_session, group_id, wait=0.1)
     self.vk = self.vk_session.get_api()
     self.community_info = self.vk.groups.getById(group_id=group_id)
     self.start_listening()
Beispiel #18
0
def test_buggy_duplicates(project):
    """Out of all the buggy commits, if there is a duplicate line, test what are the file names and if the same, note as error.
    Those are not really errors, just the duplicates that need to be investigated so that all the commits don't have to
    be investigated for duplicates, just the suspects."""
    errors = list()
    for commit, lines in load_buggy(project).items():
        names = dict()
        dups = set()
        for l in lines:
            file = l[1].split('/')
            name = file[len(file) - 1]
            if names.get(name) is not None:
                if l[0] in list(names.get(name).keys()):
                    # print('increasing occurance for ', commit, name)
                    names.get(name).get(l[0])['occurance'] += 1
                    dups.add((name, l[0]))
                else:
                    names.get(name)[l[0]] = {'occurance': 1}
            else:
                names.update({name: {l[0]: {'occurance': 1}}})
        if len(dups) > 0:
            errors.append({commit: list(dups)})
    if len(errors) > 0:
        log_info(project, 'Number of duplicates to investigate: {0[0]}',
                 (len(errors), ))
    report_results(errors, project, 'test_buggy_duplicates')
Beispiel #19
0
 def intercept_respones_json(self):
     '''
     对返回的json数据做随机做增删操作
     :return:
     '''
     logger.log_info("intercept_respones_json")
     return json.dumps(self.rep_json)
 def _log_evaluation_config(self, model_json, model_weights, X, Y):
     logger.log_info(
         "Using model architecture from - {model_arch_path} and weights from - {model_weights_path}."
         .format(model_arch_path=model_json,
                 model_weights_path=model_weights))
     logger.log_info("Testing period from {start} to {end}.".format(
         start=config.TESTING_DATE_START, end=config.TESTING_DATE_END))
Beispiel #21
0
def eval_model(samples):
    """ Evaluate the LSTM model."""
    temp_dir = tempfile.mkdtemp(suffix='-lstm-syscall')
    logger.log_info(MODULE_NAME,
                    'Evaluation results will be written to ' + temp_dir)
    random.shuffle(samples)

    for sample in samples:
        trace = parse_report(sample['cuckoo_report'])
        if trace is None:
            continue

        o_filename = sample['label'] + '-' + path.basename(
            sample['base_dir']) + '.gz'
        o_filepath = path.join(temp_dir, o_filename)
        logger.log_debug(MODULE_NAME, 'Writing to ' + o_filepath)

        with gzip.open(o_filepath, 'wt') as ofile:
            for x_batch, y_batch in trace:
                ps = model.predict_on_batch(np.array(x_batch)).tolist()
                cs = [max(p) for p in ps]  # Max confidence
                ms = [p.index(max(p)) for p in ps]  # Most likely label
                ts = [int(a == b) for a, b in zip(ms, y_batch)
                      ]  # Compare prediction to real label
                for c, m, t, y in zip(cs, ms, ts, y_batch):
                    ofile.write(
                        str(t) + ',' + str(m) + ',' + str(c) + ',' + str(y) +
                        "\n")
Beispiel #22
0
 def not_intercept(self):
     '''
     不篡改响应,直接返回结果
     :return:
     '''
     logger.log_info("not_intercept")
     return json.dumps(self.rep_json)
Beispiel #23
0
def test_model(testing_set):
    """ Test the LSTM model."""
    # For reporting current metrics
    freq_s = options.status_interval * 60
    last_s = datetime.now()

    res = [0.0] * len(model.metrics_names)
    batches = 0
    num_samples = len(testing_set)

    for status in map_to_model(testing_set, model.test_on_batch):
        if status is None:
            break
        for stat in range(len(status)):
            res[stat] += status[stat]
        batches += 1
        # Print current metrics every minute
        if (datetime.now() - last_s).total_seconds() > freq_s:
            c_metrics = [status / batches for status in res]
            c_metrics_str = ', '.join([str(model.metrics_names[x]) + ' ' + '%.12f' % (c_metrics[x]) for x in range(len(c_metrics))])
            c_metrics_str += ', progress %.4f' % (float(generator.fin_tasks.value) / float(num_samples))
            logger.log_info(module_name, 'Status: ' + c_metrics_str)
            last_s = datetime.now()

    if batches < 1:
        logger.log_warning(module_name, 'Testing set did not generate a full batch of data, cannot test')
        return

    for stat in range(len(res)):
        res[stat] /= batches

    logger.log_info(module_name, 'Results: ' + ', '.join([str(model.metrics_names[x]) + ' ' + str(res[x]) for x in range(len(res))]))
Beispiel #24
0
def build_model():
    """ Builds the LSTM model assuming two categories."""
    model = Sequential()

    model.add(
        Embedding(input_dim=options.embedding_in_dim,
                  output_dim=options.embedding_out_dim,
                  input_length=options.seq_len))

    model.add(LSTM(options.units))

    model.add(Dense(128))
    model.add(Activation('relu'))

    model.add(Dropout(options.dropout))

    model.add(Dense(options.max_classes))
    model.add(Activation('softmax'))

    opt = optimizers.RMSprop(lr=options.learning_rate,
                             decay=options.learning_decay)
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=opt,
                  metrics=[
                      'sparse_categorical_accuracy',
                      'sparse_top_k_categorical_accuracy'
                  ])

    logger.log_info(MODULE_NAME, 'Model Summary:')
    model.summary(print_fn=(lambda x: logger.log_info(MODULE_NAME, x)))

    return model
Beispiel #25
0
    def replaceword(self, path, oldstart, new):
        '''
        替换文件中的某一行数据
        :param path: 文件路径
        :param old: 替换前的词语
        :param new: 替换后的词语
        :return:替换的后的文件
        '''
        try:
            with open(path, "r") as f:
                lines = f.readlines()

            with open(path, "w") as f_w:
                for line in lines:
                    if line.startswith(oldstart):
                        line = new + '\n'
                    f_w.writelines(line)

            with open(path, "r") as f:
                lines = f.read()

            if new in lines:
                return 0
            else:
                logger.log_info("修改%s中%s行失败" % (path, oldstart))
                return 1
        except Exception, e:
            logger.log_error("修改%s中%s行失败:%s" % (path, oldstart, str(e)))
            return 1
Beispiel #26
0
 def _play_round(self):
     ''' Plays the next round in the game. '''
     logger.log_info('\nRound ' + str(self._round + 1))
     logger.log_info('Dealer: ' + self._names[self._dealer])
     self._play_bids()
     self._play_results()
     self._advance_round()
Beispiel #27
0
def scan_file(file_path):
    file_path = u"{}".format(file_path)

    if file_path is None or not os.path.isfile(file_path):
        msg = "The provided path '{}' is invalid.".format(file_path)
        logger.log_error(msg, module_name)
        print('[-] ERROR: {}'.format(msg))
        raise Exception(msg)

    # Check if there are any rules in yara-rules-src dir and compile them
    common_functions.compile_yara_rules_src_dir()
    try:
        logger.log_info('Single file scan started', module_name)
        print('[+] Single file scan started')

        logger.log_debug('Getting Yara-Rules', module_name)
        common_functions.print_verbose('[+] Getting Yara-Rules..')
        yara_rule_path_list = get_file_path_list(settings.yara_rules_directory,
                                                 True, '*.yar')

        match_list = match([file_path], yara_rule_path_list)
        print('[+] File scan complete.')
        logger.log_info('File scan complete', module_name)
        return match_list

    except Exception as e:
        common_functions.print_verbose('[-] ERROR: {}'.format(e))
        logger.log_error(e, module_name)
        raise
Beispiel #28
0
def show_police_answer(player_data, target):
    ''' Console alerts whether the person interrogated by the police is an
    assassin or not. '''
    if player_data[target].is_assn():
        logger.log_info('The person you queried is an assassin.\n')
    else:
        logger.log_info('The person you queried is NOT an assassin.\n')
Beispiel #29
0
def build_and_run(gdb_man, logger, loop_offset, loop_code, loop_size):
    output_code(loop_offset, loop_code)

    compiler_bin = arm_platform.compiler_bin_for('gcc')
    build_flags = arm_platform.build_flags_for('gcc')

    to_run = [compiler_bin]
    to_run += build_flags.split()
    to_run += ['source.s', '-o', 'binary']

    try:
        os.remove('binary')
    except OSError:
        # We don't care if the file doesn't already exist
        pass

    logger.log_info("builder", "Executing {}".format(" ".join(to_run)))
    p = subprocess.Popen(to_run,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
    make_out, make_err = p.communicate()

    if p.returncode != 0:
        logger.log_warn("builder", "Build failed!")
        logger.log_warn(
            "builder", "{}\n{}".format(make_out.decode("utf-8"),
                                       make_err.decode("utf-8")))
        return

    run_id = logger.add_run(loop_offset, loop_size)
    run_obj = run.Run("loop", 'gcc', arm_platform, [], run_id)
    return gdb_man.read_energy('binary', run_obj)
def test_reader():
    from sys import argv, exit
    import traceback

    if len(argv) < 4:
        print(argv[0], '<input_file>', '<memory_file>', '<bin_dir>')
        exit(0)

    logger.log_start(logging.DEBUG)

    try:
        ofile = tempfile.mkstemp(text=True)
        ofilefd = fdopen(ofile[0], 'w')

        mem_map = read_memory_file(argv[2])

        for tuple in disasm_pt_file(argv[1], argv[3], mem_map):
            if tuple is None:
                break
            ofilefd.write(str(tuple) + "\n")

        ofilefd.close()
    except:
        traceback.print_exc()
        ofilefd.close()
        remove(ofile[1])
        logger.log_stop()
        exit(1)

    logger.log_info(module_name, 'Wrote generated tuples to ' + str(ofile[1]))
    logger.log_stop()
Beispiel #31
0
def evaluate_file(file_name):
    """Evaluates the conditionals listed in a file and returns their
    results in a dict."""

    log_info('Loading conditionals from file "{0}"'.format(file_name))

    results = {}

    if not is_file(file_name):
        log_error('File "{0}" does not exist'.format(file_name))
        return results

    try:
        from yaml import safe_load as yaml_safe_load
        data = yaml_safe_load(open(file_name, 'r', encoding='utf-8').read())
    except Exception as e:
        log_error(e)
        return results

    for c in (data or []):
        if 'name' not in c:
            log_error('Ignoring a conditional without name, skipping')
            continue

        c_name = c['name']

        if c_name in results:
            log_error('Duplicate conditional "{0}", skipping'.
                      format(c_name))
            continue

        if 'method' not in c:
            log_error('Conditional "{0}" has no method defined, skipping'.
                      format(c_name))
            continue

        c_method = c['method']

        if c_method not in __METHODS:
            log_error('Conditional "{0}" has an unknown method "{1}", '
                      'skipping'.format(c_name, c_method))
            continue

        if ('params' not in c) or (c['params'] is None):
            log_error('Conditional "{0}" has no "params" block, skipping'.
                      format(c_name))
            continue

        try:
            results[c_name] = __METHODS[c_method](c_name, c['params'][0])
        except Exception as e:
            # Don't let a single conditional failure remove
            # everything in this file
            log_error(e)

    for k, v in results.items():
        log_debug('Conditional: name="{0}", result={1}'.format(k, v))

    return results
Beispiel #32
0
def scan_access_logs(access_logs_file_path, www_dir_path, tail=0):
    """
    Attempt to match accessed files access logs with Yara-Rules
    :param access_logs_file_path: path to access log file
    :param www_dir_path: path to public web directory ex; www, public_html
    :param tail: read last n lines from access log. if value is 0 then will read the whole file
    :return: list of dictionaries containing match details for each file. example: {"file": file_path, "yara_rules_file": rule_path, "match_list": matches}
    """
    try:
        if access_logs_file_path is None or not os.path.isfile(
                access_logs_file_path):
            logger.log_error(
                'The provided path "{}" is invalid '.format(
                    access_logs_file_path), module_name)
            print('[-] ERROR: The provided path "{}" is invalid.'.format(
                access_logs_file_path))
            return None

        logger.log_info('Access logs scan started', module_name)
        print('[+] Access logs scan started')

        logger.log_debug('Reading access logs file', module_name)
        common_functions.print_verbose('[+] Reading access logs file..')

        if tail > 0:
            lines = common_functions.tail(access_logs_file_path, tail)
        else:
            lines = common_functions.read_file_lines(access_logs_file_path)

        logger.log_debug(
            'Attempting to parse accessed files path(s) from access logs',
            module_name)
        common_functions.print_verbose(
            '[+] Attempting to parse accessed files path(s) from access logs..'
        )

        # combine file path with www dir path
        file_path_set = combine_file_path_list_with_dir(
            access_log_parser.get_accessed_files_list(lines), www_dir_path)

        logger.log_debug('[+] {} File to process'.format(len(file_path_set)),
                         module_name)
        print('[+] {} File to process.'.format(len(file_path_set)))

        logger.log_debug('Getting Yara-Rules', module_name)
        common_functions.print_verbose('[+] Getting Yara-Rules..')
        yara_rule_path_list = get_file_path_list(settings.yara_rules_directory,
                                                 True, ['*.yar'])
        match_list = match(file_path_set, yara_rule_path_list)

        print('[+] Access logs scan complete.')
        logger.log_info('Access logs scan complete', module_name)

        return match_list

    except Exception as e:
        print('[-] ERROR: {}'.format(e))
        logger.log_error(e, module_name)
        return None
Beispiel #33
0
def load_resource_matrix(resource_mat_file, allow_reversed_edges):
    """Loads the resource matrix
    :param resource_mat_file -- the resource adjacency matrix file
    :param allow_reversed_edges -- whether reversed edges are allowed in this resource
    """
        
    # Read the matrix from the file to compressed format
    compressed_file = resource_mat_file
    
    if not resource_mat_file.endswith('.npz'):
        adjacency_matrix = read_matrix(resource_mat_file)
        log_info('Read matrix.')
        save_matrix(resource_mat_file + '.tmp', adjacency_matrix)
        log_info('Saved matrix.')
        compressed_file = compressed_file + '.tmp'
        
    adjacency_matrix = load_matrix(compressed_file)
    log_info('Loaded matrix.')
    
    # Add the transposed matrix, since we are looking for paths
    # in both directions
    if allow_reversed_edges:
        adjacency_matrix = adjacency_matrix + adjacency_matrix.T
        log_info('Added transpose.')
        
    return adjacency_matrix
Beispiel #34
0
def learner(db):
    while True:
        command = (yield)
        try:
            if command["action"] == "put":
                log_info("setting db['%s'] to %s" % (command["key"], command["value"]))
                db[command["key"]] = command["value"]
            elif command["action"] == "incr":
                db[command["key"]] = str(int(db[command["key"]]) + 1)
                log_info("incrementing db['%s']" % command["key"])
            elif command["action"] == "get":
                # TODO only needs to execute on one
                log_fail("db['%s'] is %s" % (command["key"], db[command["key"]]))
        except KeyError:
            log_fail("key %s is not in db" % command["key"])
  def invoke_conversion(self, file_sha1, page, fileinfo):
    """entry point for processing a valid request 
       by sha1 checksum for the file, a page number and the fileinfo dictionary"""
    log_info(file_sha1, fileinfo['filename'], None, "Starting new task to download and convert the file.")
    #download the file
    data_ops.download_file(fileinfo['url'], fileinfo['filename'])
    log_info(file_sha1, fileinfo['filename'], None, "Succesfully downloaded file.")

    #first convert the desired page and the -cache_before and +cache_after pages around it
    for p in range(page - config.server['cache_before'], page + config.server['cache_after'] + 1):
      if p > 1 and p <= fileinfo['pagecount']:
        self.process_single_page(fileinfo, file_sha1, p)
    #then convert all other pages from that file
    for p in range(1, fileinfo['pagecount'] + 1):
      if not data_ops.json_page_is_cached(file_sha1, p, self.datadir):
        self.process_single_page(fileinfo, file_sha1, p)

    data_ops.clean_up(fileinfo['filename'])
    log_info(file_sha1, fileinfo['filename'], None, "Succesfully converted file to JSON.")
 def return_cached_page(self, sha1, page):
   """return the cached JSON page"""
   log_info(sha1, "", page, "Page succesfully returned.")
   return data_ops.get_cached_json_page(sha1, page, self.datadir)