Ejemplo n.º 1
0
def runTestsAndPrintReport(trainingCategories, testingCategories):
    
    allPositiveTrainingReviews = list();
    allNegativeTrainingReviews = list();
    allPositiveTestingReviews = list();
    allNegativeTestingReviews = list();
    
    for category in trainingCategories:
        (allPositiveTrainingReviewsTmp, allNegativeTrainingReviewsTmp) = getAllClassifiedReviews(AmazonReviewCorpusReader(), category)
        allPositiveTrainingReviews += allPositiveTrainingReviewsTmp;
        allNegativeTrainingReviews += allNegativeTrainingReviewsTmp;
    
    for category in testingCategories:
        (allPositiveTestingReviewsTmp, allNegativeTestingReviewsTmp) = getAllClassifiedReviews(AmazonReviewCorpusReader(), category)
        allPositiveTestingReviews += allPositiveTestingReviewsTmp;
        allNegativeTestingReviews += allNegativeTestingReviewsTmp;
    
    datasets = [ClassifiedDataset(0.1, 'POS', allPositiveTrainingReviews, allPositiveTestingReviews),
            ClassifiedDataset(0.1, 'NEG', allNegativeTrainingReviews, allNegativeTestingReviews)
            ]
    
    print 'training: %s vs testing: %s' % (trainingCategories, testingCategories);

    report = Report(datasets, factories)
    
    reportStats = report.getReport();
    
    Report.printReport(reportStats);
Ejemplo n.º 2
0
    def update_ui():
        if items[current].get('audit_radcat'):
            radcat_entry.set("RADCAT{}".format(items[current]['audit_radcat']))
            fu_entry.set(items[current]['audit_radcat3'] == "Yes")
            unscored_btn.state(['!disabled'])
        else:
            radcat_entry.set('')
            fu_entry.set(False)

        item = items[current]

        r = Report(text=item['report_text'])
        # logging.debug(r.text)

        # extractions = r.extractions()
        # item['radcat'] = int(extractions.get('radcat'))
        # item['radcat3'] = "Yes" if extractions.get('radcat3') else "No"

        complete = [k for k in items if k.get('audit_radcat')]
        task_label_str.set("Report {} of {} ({} complete)\nStatus: {}".format(
            current + 1, len(items), len(complete), item['status']))

        report_text['state'] = 'normal'
        report_text.delete('1.0', 'end')
        report_text.insert('1.0', r.anonymized())
        report_text['state'] = 'disabled'
Ejemplo n.º 3
0
    def run(self):
        # Set input files
        InputUtils.set_processor(self)
        InputUtils.set_memory(self)
        InputUtils.set_tasks(self)
        self.setup_tasks()

        # Run simulator...
        while self.time < self.end_sim_time:
            if self.verbose == System.V_DETAIL:
                print(f'\ntime = {self.time}')
                self.print_queue()

            # time 부터 (time+1)동안 실행될 task 코어의 개수만큼 고르기.
            exec_task_list = []
            if len(self.queue) < self.CPU.n_core:
                # 큐에 있는 것 모두 실행가능(코어의 개수보다 적으므로)
                for tup in self.queue:
                    exec_task_list.append(tup[1])
                self.queue = []

                # self.CPU.n_core - len(self.queue)개의 코어는 idle로 실행
                for i in range(self.CPU.n_core - len(self.queue)):
                    self.CPU.exec_idle(time=1)
            else:
                for i in range(self.CPU.n_core):
                    exec_task_list.append(self.pop_queue())

            # for active tasks (1 unit 실행)
            for exec_task in exec_task_list:
                exec_task.exec_active(system=self, time=1)

            # for other idle tasks (전력 소모 계산 및 1초 흐르기)
            for i in range(len(self.queue)):
                task = self.queue[i][1]
                task.exec_idle(time=1, update_deadline=True)
                self.queue[i] = (task.calc_priority(), task)
            heapq.heapify(self.queue)  # 재정렬 필요
            for tup in self.wait_period_queue:
                tup[1].exec_idle(time=1, update_deadline=False)

            self.add_utilization()

            # 실행된 task의 주기 끝났는지 확인해서 끝났으면 초기화 시키고 wait으로
            for exec_task in exec_task_list:
                if exec_task.det_remain == 0:
                    exec_task.period_start += exec_task.period
                    exec_task.det_remain = exec_task.det
                    exec_task.deadline = exec_task.period
                    self.push_wait_period_queue(exec_task)
                else:
                    self.push_queue(exec_task)

            self.check_queued_tasks()
            self.time += 1
            self.check_wait_period_queue()

        report = Report(self)
        report.print_console()
        return report
Ejemplo n.º 4
0
 def make_report(n_best, n_worst):
     report = Report()
     make_header(report)
     details = make_details(data, control.test_months, n_best, n_worst)
     for line in details.iterlines():
         report.append(line)
     return report
Ejemplo n.º 5
0
    def reportBasicInfo(self, printOnScreen=True):

        featuresType = []
        featuresMin = []
        featuresMax = []
        featuresMean = []

        for feature in self.features:
            featuresType.append(type(self.dataFrame[feature].iloc[0]).__name__)
            featuresMin.append(self.dataFrame[feature].min())
            featuresMax.append(self.dataFrame[feature].max())

            if type(self.dataFrame[feature].iloc[0]).__name__ != 'str':
                featuresMean.append(self.dataFrame[feature].mean())

            else:
                featuresMean.append('none')

        name = 'BasicInfo' + str(self.numberOfReports('Basic') + 1)

        report = Report(
            name,
            cols=zip(self.features, featuresType, featuresMin, featuresMax,
                     featuresMean, self.n_nulls),
            headers=["Feature", "Type", "Min", "Max", "Mean", "Num Nulls"],
            typeReport='Basic')

        if printOnScreen:
            report.showReport()
        self.reports.append(report)
 def PrintReport(self):
     data = list(self.worldData.plants)
     data.extend(list(self.worldData.grazers))
     data.extend(list(self.worldData.predators))
     data.extend(list(self.worldData.obstacles))
     report = Report()
     report.create(data, self.simTime)
Ejemplo n.º 7
0
def main():
    # Delete portfolio.db if it exists
    os.remove("portfolio.db")
    print("portfolio.db removed successfully")

    # Get data paths
    stock_filename = "data_stocks.csv"
    bond_filename = "data_bonds.csv"

    # Initialize dataReader
    dataReader = DataReader(stock_filename, bond_filename)

    # Get stock and bond data
    stockData = dataReader.getStockData()
    bondData = dataReader.getBondData()

    # Initialize an investor
    investor = Investor("Bob", "Smith", "123 Fake St, Denver, CO 80221",
                        "303.777.1234")

    # Initialize a portfolio
    portfolio = Portfolio(investor)

    # Add the stocks and bonds to the portfolio
    portfolio.addStocks(stockData)
    portfolio.addBonds(bondData)

    # Initialize a report
    report = Report(portfolio)

    # Print the report
    report.print()
Ejemplo n.º 8
0
 def make_report(n_best, n_worst):
     report = Report()
     make_header(report)
     details, extra_info = make_details(data, control.test_months, n_best, n_worst)
     for line in details.iterlines():
         report.append(line)
     make_plt(data, extra_info, n_best, n_worst)
     return report
Ejemplo n.º 9
0
def reportPPTX(cf,servers,switches,zfs):
	print("* Generate Final Report  ...")
	date = datetime.now().strftime("%d%m%Y-%H%M%S")
	name = cf.variables['pptx_report'] + '/' + 'Exalogic_Report_'+ date + '.pptx'
	r = Report()
	r.loadreport(date,name,"Informe de desempeño de nodos Exalogic", servers, switches, zfs, cf)
	r.generatepptxreport()
	print("*  ENJOY :) * ")
Ejemplo n.º 10
0
    def __init__(self,
                 api_aid=None,
                 api_sec=None,
                 api_org=None,
                 api_url=None,
                 api_token=None,
                 api_token_expires=None):
        """ """
        # logger
        self.log_level = {
            'debug': logging.DEBUG,
            'info': logging.INFO,
            'warning': logging.WARNING,
            'error': logging.ERROR,
            'critical': logging.CRITICAL
        }
        self.formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(funcName)s:%(lineno)d)'
        )
        self.tcl = tc_logger()

        # debugging
        self._memory_monitor = True

        # credentials
        self._api_aid = api_aid
        self._api_sec = api_sec
        self._api_token = api_token
        self._api_token_expires = api_token_expires

        # user defined values
        self._api_org = api_org
        self._api_url = api_url
        self._api_result_limit = 200

        # default values
        self._activity_log = False
        self._api_request_timeout = 30
        self._api_retries = 5  # maximum of 5 minute window
        self._api_sleep = 59  # seconds
        self._bulk_on_demand = False
        self._enable_report = False
        self._indicators_regex = indicators_regex
        self._proxies = {'https': None}
        self._retype = type(re.compile(''))

        # config items
        self._report = []
        self._verify_ssl = False

        # initialize request session handle
        self._session = Session()

        # instantiate report object
        self.report = Report()

        # save custom types for later
        self._indicator_parser = IndicatorObjectParser(self)
Ejemplo n.º 11
0
def reportTest(fileName):
    with open(fileName, "r") as f:
        s = f.read()
        chronicles = Chronicles()
        chronicles.fromJSON(s)
        report = Report(chronicles)
        report.image_file_types = [".png"]
        report.htmlReport("../test/report.html")
        webbrowser.open("../test/report.html")
Ejemplo n.º 12
0
 def _make_report(self, counters):
     r = Report()
     r.append('Records retained while reducing input file')
     for path, counter in counters.iteritems():
         r.append(' ')
         r.append('path %s' % path)
         for tag, value in counter.iteritems():
             r.append('%30s: %d' % (tag, value))
     return r
Ejemplo n.º 13
0
 def __init__(self,
              header_lines,
              column_defs,
              print_as_spaces,
              verbose=True):
     self._report = Report()
     self._header(header_lines)
     self._ct = ColumnsTable(column_defs, verbose)
     self._print_as_spaces = print_as_spaces
Ejemplo n.º 14
0
 def CreateFeeReport(self, validfromdate, receiver, calcfees, income, notes):
     ch = Report()
     ch.createFeeReport(
         validfromdate=validfromdate, 
         receiver=receiver, 
         fees=calcfees,
         income=income, 
         notes=notes
     )
Ejemplo n.º 15
0
    def __init__(self):

        self.__report = Report()
        self.__csvFile = None
        self.__arrayOfDates = self.__report.getDates()
        self.__avgTemperature = None
        self.__avgHumidity = None
        self.__status = None
        self.__message = None
Ejemplo n.º 16
0
 def reportInfoRelevancies(self, printOnScreen=True):
     name = 'RelevanciesInfo' + str(self.numberOfReports('Relevancies') + 1)
     report = Report(name,
                     cols=zip(self.features,
                              self.regressor.feature_importances_),
                     headers=["Feature", "Relevancy"],
                     typeReport='Relevancies')
     if printOnScreen:
         report.showReport()
     self.reports.append(report)
Ejemplo n.º 17
0
def main():
    # Get data path
    filename = "allStocks.json"

    # Initialize dataReader and get data
    dataReader = DataReader(filename)
    data = dataReader.getData()

    # Initialize a report and call print
    report = Report(data)
    report.print()
Ejemplo n.º 18
0
def main():
    analyze = Analyze()

    data = analyze.market.update_chart_data(start=DAY, period=HOUR / 12)

    report = Report()
    header = [
        'date', 'volume', 'open', 'high', 'low', 'close', 'quoteVolume',
        'weightedAverage'
    ]
    report.write_csv(data=data, header=header, file_name='somefile')
    print("Done!")
Ejemplo n.º 19
0
 def test_report_structure(self):
     lista = ["pippo", 5, "PAOLO"]
     thisdict = {"brand": "Ford", "model": "Mustang", "year": 1964}
     thisdict2 = {"brand": "Ferrari", "model": "408", "year": 1997}
     rp = ReportPagine(lista, thisdict, thisdict2)
     thisdict3 = {"brand": "Armani", "model": "jeans", "year": 2000}
     thisdict4 = {
         "brand": "The north face",
         "model": "Jacket",
         "year": 2010
     }
     rf = ReportFoto(thisdict3, thisdict4)
     report = Report("sito", rp, rf)
     print(report.toJSON())
Ejemplo n.º 20
0
 def __init__(self, k, validation_month, ensemble_weighting,
              column_definitions, test):
     self._column_definitions = column_definitions
     self._report = Report()
     self._test = test
     self._header(k, validation_month, ensemble_weighting)
     cd = self._column_definitions.defs_for_columns(
         'description',
         'mae_validation',
         'mae_query',
         'mare_validation',
         'mare_query',
     )
     self._ct = ColumnsTable(columns=cd, verbose=True)
Ejemplo n.º 21
0
    def make_report(title, ordered_cities):
        def make_detail_line(city):
            return {
                'city': city,
                'median_price': median_prices[city],
                'median_price_index': median_prices_indices[city],
                'n_trades': n_trades[city],
                'n_trades_index': n_trades_indices[city],
            }

        c = ColumnsTable((
            ('city', 30, '%30s', ('', '', '', '', '', 'City'), 'city name'),
            ('median_price', 7, '%7.0f', ('', '', '', '', 'median', 'price'),
             'median price in city'),
            ('median_price_index', 7, '%7.2f', ('median', 'price', '/',
                                                'overall', 'median', 'price'),
             'median price as fraction of overall median price'),
            ('n_trades', 7, '%7.0f', ('', '', '', '', 'number', 'trades'),
             'number of trades across all months'),
            ('n_trades_index', 7, '%7.2f', ('number', 'trades', '/ ',
                                            'overall', 'median', 'trades'),
             'median number trades as fraction of overall median number of trades'
             ),
        ))
        for city in ordered_cities:
            c.append_detail(**make_detail_line(city))
        c.append_legend(40)

        r = Report()
        r.append(title)
        r.append(' ')
        for line in c.iterlines():
            r.append(line)
        return r
Ejemplo n.º 22
0
 def __init__(self, validation_month, k, column_definitions, test):
     self._report = Report()
     self._header(validation_month, k)
     self._column_definitions = column_definitions
     self._test = test
     cd = self._column_definitions.defs_for_columns(
         'median_absolute_error',
         'model',
         'n_months_back',
         'max_depth',
         'n_estimators',
         'max_features',
         'learning_rate',
     )
     self._ct = ColumnsTable(columns=cd, verbose=True)
Ejemplo n.º 23
0
    def doPoll(self):

        oneDay = 60*60*24
        startTime = (time.time() - 7*oneDay) * 1000
        reports = self.api.getReports(startTime).json()
        if (len(reports) == 0):
            return

        for report in reports:
            id = report['id']
            fights = self.api.getFights(id)
            if not report['id'] in self.data.reports.keys():
                self.data.reports[id] = Report(
                    id,
                    report['title'],
                    report['owner'],
                    int(report['start']))
                print("New log!")
            if len(self.data.reports[id].fights) < len(fights):
                print("New fight!")
                self.data.reports[id].dirty = True
                for fight in fights:
                    fightId = fight['id']
                    if not fightId in self.data.reports[id].fights.keys():
                        newFight = Fight(fightId, fight['name'],id)
                        self.data.reports[id].addFight(newFight)
            for id, report in self.data.reports.items():
                if report.isDirty():
                    self.data.reports[id].message = report.getFormattedChatMessage()
                if report.startTime < (time.time() - 14*oneDay) * 1000:
                    self.data.reports.pop(id, None)
Ejemplo n.º 24
0
def generateUniqueKeys(params, data):
    report_name = Report.getReportName(params)
    structure_keymap = fn.getNestedElement(params, 'structure_keymap')
    unique_keys = []

    for idx in range(0, len(data)):
        row = data[idx]
        key = {}
        key['facility_type'] = fn.getNestedElement(row, 'facility_type')
        key['facility_code'] = fn.getNestedElement(row, 'facility_code')
        key['budget_type_code'] = fn.getNestedElement(row, 'budget_type_code')
        key['object_code'] = fn.getNestedElement(row, 'object_code')
        key['drug_code'] = fn.getNestedElement(row, 'drug_code')
        key['item_group_code'] = fn.getNestedElement(row, 'item_group_code')
        key['state_code'] = fn.getNestedElement(row, 'state_code')

        push_data = []
        for sm in global_group_order[report_name]:
            mapped_key = global_group_order_kepmap[sm]
            push_data.append(key[mapped_key])
            unique_keys.append('_'.join(push_data))

    # Logger.v('unique_keys', len(unique_keys));
    unique_keys = sorted(list(set(unique_keys)))
    # Logger.v('unique_keys', len(unique_keys));
    return unique_keys
Ejemplo n.º 25
0
    def __init__(self):
        wx.Frame.__init__(self, None, -1, "PyLoad", size=(800, 600))

        self.InitProject()
        self.project = Project()
        self.reportPath = 'reports/last-report.db'
        self.report = Report(self.reportPath)
        # set self.report to None if you don't want to generate report
        #self.report = None
        self.path = None

        self.nb = NoteBook(self, -1, self.project, self.report)

        self.InitIcons()
        self.UseMenuBar()
        self.UseToolBar()

        self.Bind(wx.EVT_CLOSE, self.OnClose)
        self.Bind(EVT_PLAY_STOPPED, self.OnPlayStopped)

        register_changed_callback(self.SetChanged)

        #TODO: put in tabs' constructors
        self.nb.recordTab.tree.project = self.project
        self.nb.editTab.specialsPanel.project = self.project
        self.proxy = None
Ejemplo n.º 26
0
def generateItemDetail(params, data):
    result = {}
    report_name = Report.getReportName(params)
    split_uk = fn.getNestedElement(params, 'split_uk')

    for sm in global_group_order[report_name][:len(split_uk)]:
        if sm == 'facility_type':
            smk = 'facility'
        else:
            smk = sm
        if sm == 'facility_type':
            id_ = '{0}_type'.format(smk)
            name_ = '{0}_type'.format(smk)
            name_2 = '{0}_type'.format(smk)
            code_ = '{0}_type'.format(smk)
        else:
            id_ = '{0}_seq_no'.format(smk)
            name_ = '{0}_name'.format(smk)
            name_2 = '{0}_desc'.format(smk)
            code_ = '{0}_code'.format(smk)

    try:
        result['name'] = data[0][name_]
    except Exception as e:
        result['name'] = fn.getNestedElement(
            data[0], name_2, fn.getNestedElement(data[0], code_))

    result['id'] = data[0][id_]
    result['code'] = data[0][code_]

    return result
Ejemplo n.º 27
0
def generateChildren(params, data):
    report_name = Report.getReportName(params)
    min_purchase_amount = fn.getNestedElement(params,
                                              'filter.min_purchase_amount', 0)
    if type(min_purchase_amount) == str and min_purchase_amount:
        min_purchase_amount = int(min_purchase_amount)
    children = []
    for d in data:
        # Logger.v('d', d);
        obj_ = {}
        # Logger.v('global_children_key',global_children_key)
        # Logger.v('report_name',report_name)
        for gck in global_children_key[report_name]:
            # Logger.v('gck', gck)
            value = fn.getNestedElement(d, gck)
            if value or value == 0:
                obj_[gck] = value
            else:
                if report_name == 'budget':
                    # Logger.v('Report.generateChildren: {0} not found, sum up after data cleaning process.'.format(gck));
                    if gck == 'total_allocation':
                        obj_[gck] = d['first_allocation'] + d[
                            'additional_allocation']
                    elif gck == 'balance_amount':
                        obj_[gck] = d['first_allocation'] + d[
                            'additional_allocation'] - d['pending_amount'] - d[
                                'liablity_amount'] - d['utilized_amount']

        # Logger.v('obj_', obj_);
        if report_name == 'procurement':
            if obj_['purchase_amount'] > min_purchase_amount:
                children.append(obj_)
        else:
            children.append(obj_)
    return children
Ejemplo n.º 28
0
def preprocessData(params, data):
    report_name = Report.getReportName(params)
    key_to_join = fn.getNestedElement(global_key_to_join, report_name)
    df = pd.DataFrame(data, dtype=str)
    # Logger.v('df', df);
    joined_key = []
    joined_ = []
    joined_columns_list = [key_to_join[0]]
    df['first_allocation'] = pd.to_numeric(df['first_allocation'])
    df['additional_allocation'] = pd.to_numeric(df['additional_allocation'])
    df['pending_amount'] = pd.to_numeric(df['pending_amount'])
    df['utilized_amount'] = pd.to_numeric(df['utilized_amount'])
    df['liablity_amount'] = pd.to_numeric(df['liablity_amount'])
    df['trans_in_amount'] = pd.to_numeric(df['trans_in_amount'])
    df['trans_out_amount'] = pd.to_numeric(df['trans_out_amount'])
    df['deduction_amount'] = pd.to_numeric(df['deduction_amount'])
    df['current_actual_amount'] = pd.to_numeric(df['current_actual_amount'])
    df['total_allocation'] = pd.to_numeric(df['total_allocation'])
    df['balance_amount'] = pd.to_numeric(df['balance_amount'])
    # df.info();
    for idx in range(0, len(key_to_join)):
        ktj = key_to_join[idx]
        joined_key.append(ktj)

        if idx > 0:
            joined_.append(['_'.join(joined_key[:-1]), ktj])
            columns = joined_[idx - 1]
            joined_columns = '_'.join(columns)
            joined_columns_list.append(joined_columns)
            df[joined_columns] = df[columns[0]].str.cat(df[columns[1]],
                                                        sep="|")
    return df
Ejemplo n.º 29
0
 def __init__(self, k, ensemble_weighting, column_definitions, test):
     self._column_definitions = column_definitions
     self._test = test
     self._report = Report()
     self._header(k, ensemble_weighting)
     cd = self._column_definitions.defs_for_columns(
         'validation_month',
         'mae_index0',
         'mae_ensemble',
         'mae_best_next_month',
         'median_price',
         'fraction_median_price_next_month_index0',
         'fraction_median_price_next_month_ensemble',
         'fraction_median_price_next_month_best',
     )
     self._ct = ColumnsTable(columns=cd, verbose=True)
Ejemplo n.º 30
0
	def report(self):
		r = Report()
		for bus in self.bus_list:
			field_ops = bus.get_field_ops()
			for op in field_ops:
				op.report(r)
		print r
Ejemplo n.º 31
0
    def __init__(self):
        self.config = ConfigManager()
        self.db = DBManager()
        self.report = Report(self.config, self.db)

        if self.config.hostsToPing[0] is '' and len(
                self.config.hostsToPing) is 1:
            print("No hosts to ping")
            sys.exit()

        # Perform pings
        for host in self.config.hostsToPing:
            self.ping(host)

        # self.db.PrintResultsTable() # Used for testing
        self.report.SendReport()  # Send report if needed
Ejemplo n.º 32
0
 def __init__(self,
              pr: int,
              branch: str,
              date: str,
              e_date: float,
              feature_variables: int,
              meta_variables: int,
              passed: bool,
              pr_id: int,
              num_samples: int,
              sha: str,
              time_elapsed: str,
              user: str,
              email: str,
              status: str,
              report: str = None):
     self.pr = pr
     self.branch = branch
     self.date = date
     self.e_date = e_date
     self.feature_variables = feature_variables
     self.meta_variables = meta_variables
     self.passed = passed
     self.pr_id = pr_id
     self.num_samples = num_samples
     self.sha = sha
     self.time_elapsed = time_elapsed
     self.user = user
     self.email = email
     self.status = status
     self.report = Report(report)
Ejemplo n.º 33
0
def make_report(summary):
    r = Report()
    format_header = '%40s %8s %8s %8s %8s %8s %8s %8s'
    format_detail = '%40s %8.0f %8.0f %8.0f %8.0f %8d %8d %8.0f'
    r.append(format_header % ('numeric feature', 'min', 'median', 'mean', 'max', 'distinct', 'NaN', 'std'))
    for row_name, row_value in summary.iterrows():
        r.append(format_detail % (
            row_name,
            row_value['min'],
            row_value['50%'],
            row_value['mean'],
            row_value['max'],
            row_value['number_distinct'],
            row_value['number_nan'],
            row_value['std']))
    return r
Ejemplo n.º 34
0
    def run(self):
        global nodes

        logging.info("Starting FjoSpidie 2.0")
        starttime = datetime.now()
        ids_engine = None
        tempdir = tempfile.mkdtemp(dir="/mnt/fjospidie")

        if self.config.suricata:
            from engine.SuricataEngine import SuricataEngine
        else:
            from engine.SnortEngine import SnortEngine

        report = Report(starttime, self.config)
        proxy_port = random.randint(20000, 65534)
        start_url = urlparse(self.config.url)
        nodes.append(Node(start_url.hostname))
        nodes[0].set_status(200)

        if not self.config.nopcap:
            pcap_engine = PcapEngine(self.config, report, tempdir)
            pcap_engine.start()

        webrunner = WebRunner(report)
        har = webrunner.run_webdriver(start_url, proxy_port, self.config,
                                      tempdir)

        if not self.config.nopcap:
            pcap_engine.stop()
            pcap_path = pcap_engine.pcap_path

        connections = webrunner.find_external_connections(har)
        entries = har.entries
        report.insert_entries(entries)
        if self.config.parsers:
            for parser in self.config.parsers, :
                package = "fjospidie.engine.parser.{}".format(parser)
                try:
                    imported = importlib.import_module(package)
                    parser_class = getattr(imported, parser)
                    parser_engine = parser_class(self.config, report, entries)
                    parsers.append(parser_engine)
                    parser_engine.start()
                except Exception, e:
                    logging.error("Error starting parser {}: {}".format(
                        parser, e))
                    next
Ejemplo n.º 35
0
def testNIC(nic, dict, descr):
    defaults = {
        'bsizes': [64, 128, 256, 512, 1024, 2048, 4096, 8192, 16 * 1024]
    }

    print(descr)
    r = Report(("cpu read", "tot read", "cpu write", "tot write"))
    r.printHeading()

    bsizes = dict['bsizes'] if 'bsizes' in dict else defaults['bsizes']
    for bs in bsizes:
        tr = nic.read_time(bs)
        cr = nic.read_cpu(bs)
        tw = nic.write_time(bs)
        cw = nic.write_cpu(bs)
        r.printLatency(bs, (cr, tr, cw, tw))
    print("")
Ejemplo n.º 36
0
def testHBA(hba, dict, descr):
    defaults = {
        'bsizes': [512, 4096, 128 * 1024, 4096 * 1024]
    }

    print(descr)
    r = Report(("cpu read", "tot read", "cpu write", "tot write"))
    r.printHeading()

    bsizes = dict['bsizes'] if 'bsizes' in dict else defaults['bsizes']
    for bs in bsizes:
        tr = hba.read_time(bs)
        cr = hba.read_cpu(bs)
        tw = hba.write_time(bs)
        cw = hba.write_cpu(bs)
        r.printLatency(bs, (cr, tr, cw, tw))
    print("")
Ejemplo n.º 37
0
def make_chart_a(control, data):
    'return a Report'
    def make_header(report):
        report.append('Median Absolute Errors for Most Accurate Models')
        report.append('By Month')
        report.append('By Feature Group')
        report.append(' ')

    def make_details(data, control):
        'return a ColumnsTable'
        def append_feature_group_description(ct):
            ct.append_line(' ')
            ct.append_line('Features groups;')
            ct.append_line('s    : only size features')
            ct.append_line('sw   : only size and wealth features')
            ct.append_line('swp  : only size, wealth, and property features')
            ct.append_line('swpn : all features: size, wealth, property, and neighborhood')

        ct = ColumnsTable((
            ('month', 6, '%6s', ('', 'month'), 'training month'),
            ('features', 8, '%8s', ('features', 'group'), 'group of features'),
            ('model', 5, '%5s', ('best', 'model'), 'family of best model'),
            ('mae', 6, '%6.0f', ('', 'mae'), 'mae of best model in month using features'),
            ),
            verbose=True,
            )
        for month in control.test_months:
            for features in control.feature_groups:
                mae_model = data[month][features]
                ct.append_detail(
                    month=month,
                    features=features,
                    model=mae_model.model,
                    mae=mae_model.mae,
                    )
            ct.append_detail()  # blank line separates each month
        ct.append_legend()
        append_feature_group_description(ct)

        return ct

    report = Report()
    make_header(report)
    for line in make_details(data, control).iterlines():
        report.append(line)
    return report
Ejemplo n.º 38
0
def tptest(disk, dict, descr="Estimated Throughput"):
    """
    run a standard set of throughputs against a specified device
        disk -- device to be tested
        dict --
            FioRsize ... size of test file
            FioRdepths ... list of request depths
            FioRbs ... list of block sizes

        filesize -- size of the file used for the test
        depth -- number of queued parallel operations
    """

    dflt = {        # default throughput test parameters
        'FioRsize': 16 * GIG,
        'FioRdepth': [1, 32],
        'FioRbs': [4096, 128 * 1024, 4096 * 1024],
    }

    sz = dict['FioRsize'] if 'FioRsize' in dict else dflt['FioRsize']
    depths = dict['FioRdepth'] if 'FioRdepth' in dict else dflt['FioRdepth']
    bsizes = dict['FioRbs'] if 'FioRbs' in dict else dflt['FioRbs']
    r = Report(("seq read", "seq write", "rnd read", "rnd write"))

    for depth in depths:
        print("%s (%s), depth=%d" % (descr, disk.desc, depth))
        r.printHeading()
        for bs in bsizes:
            # run the simulations
            tsr = disk.avgTime(bs, sz, read=True, seq=True, depth=depth)
            tsw = disk.avgTime(bs, sz, read=False, seq=True, depth=depth)
            trr = disk.avgTime(bs, sz, read=True, seq=False, depth=depth)
            trw = disk.avgTime(bs, sz, read=False, seq=False, depth=depth)

            # compute the corresponding bandwidths
            bsr = bs * SECOND / tsr
            bsw = bs * SECOND / tsw
            brr = bs * SECOND / trr
            brw = bs * SECOND / trw
            r.printBW(bs, (bsr, bsw, brr, brw))

            # compute the corresponding IOPS
            isr = SECOND / tsr
            isw = SECOND / tsw
            irr = SECOND / trr
            irw = SECOND / trw
            r.printIOPS(0, (isr, isw, irr, irw))

            # print out the latencies
            r.printLatency(0, (tsr, tsw, trr, trw))
        print("")
Ejemplo n.º 39
0
 def testGetReport(self):
     rep = Report()
     self.assertTrue(rep.saveReport("Escola Santa Clara","Juan","professor",0,1,0,1,0,2,1,0,0,1))
     form = {}
     form['qualification'] = 'professor'
     form['teacherName'] = 'Juan'
     form['question0'] = 0
     form['question1'] = 1
     form['question2'] = 0
     form['question3'] = 1
     form['question4'] = 0
     form['question5'] = 2
     form['question6'] = 1
     form['question7'] = 0
     form['question8'] = 0
     form['question9'] = 1
     self.assertEqual(rep.getReport('Escola Santa Clara'),form)
Ejemplo n.º 40
0
    def CreateQuarterAnnouncement(self, date):
        setattr(self.db, 'querydate', date)
        isodate = '{:%Y-%m-%d}'.format(date)
        children = self.db.getchildren_for_quarterannouncement(isodate)
        ch = Report()
        ch.createQAReport(children, isodate)


        

            


        


        
 def __init__(self):
     self.seek_list = None
     self.comment_file = None
     self.index_file = None
     self.symbol_to_encoding_dict = None
     self.cids = None
     self.comment_offsets_cid = None
     self.comment_offsets = None
     self.comment_term_counts = None
     self.comment_csv_reader = None
     self.authors_list = None
     self.articles_list = None
     self.reply_to_index = None
     self.collection_term_count = 0
     self.stemmer = Stemmer.Stemmer('english')
     self.tokenizer = nltk.tokenize.ToktokTokenizer()
     self.report = Report()
Ejemplo n.º 42
0
    def lint(self):
        wire_to_field_op = {}  # maps wires to the field_op that output it
        specified_outputs = {}  # maps wires to buses
        field_ops = []
        # check that each wire is specified no more than once
        for bus in self.bus_list:
            for field_op in bus.get_field_ops():
                field_ops.append(field_op)
                output_wires = field_op.output_wires()
                #				print "field_op %s" % field_op
                #				print "  %s" % output_wires
                for output in output_wires:
                    if (output in specified_outputs):
                        print "LINT: bus %s re-specifies wire %s, already set by bus %s" % (
                            bus, output, specified_outputs[output])
                    else:
                        specified_outputs[output] = bus
                    wire_to_field_op[output] = field_op

        # check that each wire is specified at least once
        for idx in range(self.total_wire_count):
            wire = Wire(idx)
            if (wire not in specified_outputs):
                print "LINT: no bus specifies output %s" % wire

        # check that each field_op computes a value that eventually reaches
        # an output.
        useful_field_ops = set(
            filter(lambda f: isinstance(f, FieldOutput), field_ops))
        done_field_ops = set()
        while (len(useful_field_ops) > 0):
            #print "Considering %s field_ops" % len(useful_field_ops)
            required_wires = set()
            for field_op in useful_field_ops:
                #print "inputs of %s are %s" % (field_op, field_op.input_wires())
                required_wires.update(field_op.input_wires())
            done_field_ops.update(useful_field_ops)

            #print "Considering %s required_wires" % len(required_wires)
            useful_field_ops = set()
            for wire in required_wires:
                print wire
                field_op = wire_to_field_op[wire]
                if (field_op not in done_field_ops):
                    useful_field_ops.add(field_op)

        all_field_ops = set(field_ops)
        unused_field_ops = all_field_ops.difference(done_field_ops)
        r = Report()
        for field_op in unused_field_ops:
            field_op.report(r)
        if (len(r) > 0):
            print "LINT: %d unused field ops; cost:\n%s" % (
                len(unused_field_ops), r)
            # print "LINT: %s" % unused_field_ops

        print "(info) Linted %s field ops from %s buses" % (len(field_ops),
                                                            len(self.bus_list))
Ejemplo n.º 43
0
def make_chart_b(control, data):
    "return a Report"

    def make_header(report):
        report.append("Mean Probability of a Feature Being Included in a Decision Tree")
        report.append("Across the Entire Ensemble of Decisions Trees")
        report.append("For Most Accurate Model in Each Training Month")
        report.append(" ")

    def make_mean_importance_by_feature(test_months):
        "return dict[feature_name] = float, the mean importance of the feature"
        feature_names = Features().ege_names(control.arg.features)
        mean_importance = {}  # key = feature_name
        for feature_index, feature_name in enumerate(feature_names):
            # build vector of feature_importances for feature_name
            feature_importances = np.zeros(len(test_months))  # for feature_name
            for month_index, test_month in enumerate(test_months):
                month_importances = data[ReductionKey(test_month)]  # for each feature
                all_feature_importances = month_importances.importances["feature_importances"]
                feature_importances[month_index] = all_feature_importances[feature_index]
            mean_importance[feature_name] = np.mean(feature_importances)
        return mean_importance

    def make_details(data, test_months):
        "return a ColumnTable"
        columns_table = ColumnsTable(
            (
                ("mean_prob", 5, "%5.2f", ("mean", "prob"), "mean probability feature appears in a decision tree"),
                ("feature_name", 40, "%40s", (" ", "feature name"), "name of feature"),
            ),
            verbose=True,
        )
        mean_importance = make_mean_importance_by_feature(test_months)
        for feature_name in sorted(mean_importance, key=mean_importance.get, reverse=True):
            columns_table.append_detail(mean_prob=mean_importance[feature_name] * 100.0, feature_name=feature_name)
        columns_table.append_legend()
        return columns_table

    report = Report()
    make_header(report)
    details = make_details(data, control.test_months)
    for line in details.iterlines():
        report.append(line)
    return report
Ejemplo n.º 44
0
    def report_for_file(self, file_name):
        print 'report for: ' + file_name
        windows = self._windows
        methods = self._methods
        report = Report(file_name, windows, methods)

        raw_txt = Reader.readFromFile(file_name)
        #print raw_txt

        words = Reader.extractWords(raw_txt, "russian")

        keywords = Reader.meter(words)
        self._keywords = keywords
        self._terms = words

        #инициализация отчета термами с tf
        for term in self._terms:
            report.add_term_tf(term, keywords[term])

        for window in windows:
            for method in methods:
                print method, window
                (array, graph) = self.get_rw_for(method, window)
                report._graph = graph #todo graph как св-во, пересмотреть логику
                for v in array:
                    term = v.term_value
                    report.add_term_rw_stats(term, method, window, v.term_weight_rw)

        self._reports[file_name] = report
Ejemplo n.º 45
0
 def report(self):
     """
     Returns a detailed report on the packing of this structure.
     """
     report = Report()
     report.add_html_body(self.html_report())
     if self.options["reportmode"] == "html":
         return [report.get_html(), ""]
     elif self.options["reportmode"] == "text":
         return [report.get_text(), ""]
     elif self.options["reportmode"] == "both":
         return [report.get_text(), report.get_html()]
     else:
         raise "unknown report mode option '%s'" % options["reportmode"]
Ejemplo n.º 46
0
 def report(self,options):
     """
     Returns a detailed report on the packing of this structure.
     """
     report = Report()
     report.add_html_body(self.html_report(options))
     if options['reportmode'] == 'html':
         return [report.get_html(),None]
     elif options['reportmode'] == 'text':
         return [report.get_text(),None]
     elif options['reportmode'] == 'both':
         return [report.get_text(),report.get_html()]
     else:
         raise "unknown report mode option '%s'"%options['reportmode']
Ejemplo n.º 47
0
    def __init__(self, api_aid, api_sec, api_org, api_url):
        """ """
        # logger
        self.log_level = {
            'debug': logging.DEBUG,
            'info': logging.INFO,
            'warning': logging.WARNING,
            'error': logging.ERROR,
            'critical': logging.CRITICAL}
        self.formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(funcName)s:%(lineno)d)')
        self.tcl = tc_logger()

        # debugging
        self._memory_monitor = True

        # credentials
        self._api_aid = api_aid
        self._api_sec = api_sec

        # user defined values
        self._api_org = api_org
        self._api_url = api_url
        self._api_result_limit = 200

        # default values
        self._activity_log = False
        self._api_request_timeout = 30
        self._api_retries = 5  # maximum of 5 minute window
        self._api_sleep = 59  # seconds
        self._bulk_on_demand = False
        self._enable_report = False
        self._indicators_regex = indicators_regex
        self._proxies = {'https': None}
        self._retype = type(re.compile(''))

        # config items
        self._report = []
        self._verify_ssl = False

        # initialize request session handle
        self._session = Session()

        # instantiate report object
        self.report = Report()
Ejemplo n.º 48
0
def tptest(disk, filesize, depth=1, bsizes=(4096, 128 * 1024, 4096 * 1024)):
    """
    run a standard set of throughputs against a specified device
        disk -- device to be tested
        filesize -- size of the file used for the test
        depth -- number of queued parallel operations
    """
    r = Report(("seq read", "seq write", "rnd read", "rnd write"))
    r.printHeading()
    for bs in bsizes:
        tsr = disk.avgTime(bs, filesize, read=True, seq=True, depth=depth)
        tsw = disk.avgTime(bs, filesize, read=False, seq=True, depth=depth)
        trr = disk.avgTime(bs, filesize, read=True, seq=False, depth=depth)
        trw = disk.avgTime(bs, filesize, read=False, seq=False, depth=depth)

        # compute the corresponding bandwidths
        bsr = bs * SECOND / tsr
        bsw = bs * SECOND / tsw
        brr = bs * SECOND / trr
        brw = bs * SECOND / trw

        r.printBW(bs, (bsr, bsw, brr, brw))
        r.printIOPS(bs, (bsr, bsw, brr, brw))
        r.printLatency(bs, (tsr, tsw, trr, trw))
Ejemplo n.º 49
0
    def get_surfdist_report(self,surfdist,options):
        bins = options['surfdist_bins']
        maxdepth = options['surfdist_maxdepth']
        step = maxdepth/bins
        count = [0] * bins
        bigger = 0
        total_count = 0
        for s in surfdist:
            if s == -1 or s == 9999: continue
            for i in range(bins):
                if s<=(i+1)*step:
                    total_count += 1
                    count[i] += 1
                    break
            if s>maxdepth: bigger += 1

        # write table
        report = """<h2>Distances to next surface atom</h2>

<table>
<tr><th>bin</th><th>dist</th><th>#atoms</th></tr>
"""
        for i in range(bins):
            report += "<tr><td>%i</td><td>%5.2f</td><td>%i</td></tr>\n"%(i+1,step*(i+1),count[i])

        report += "</table><p>total atoms counted: %i<br>"%total_count
        report += "\natoms out of range: %i</p>\n"%bigger

        hreport = Report()
        hreport.add_html_body(report)
        if options['reportmode'] == 'html':
            return [hreport.get_html(),None]
        elif options['reportmode'] == 'text':
            return [hreport.get_text(),None]
        elif options['reportmode'] == 'both':
            return [hreport.get_text(),hreport.get_html()]
        else:
            raise "unknown report mode option '%s'"%options['reportmode']
        return hreport
Ejemplo n.º 50
0
def make_chart_stats(data, control, filter_f):
    'return Report with statistics for years and months that obey the filter'
    r = Report()
    r.append('Prices by Month')
    r.append('')
    ct = ColumnsTable((
            ('year', 4, '%4d', (' ', ' ', 'year'), 'year of transaction'),
            ('month', 5, '%5d', (' ', ' ', 'month'), 'month of transaction'),
            ('mean_price', 6, '%6.0f', (' ', ' mean', 'price'), 'mean price in dollars'),
            ('median_price', 6, '%6.0f', (' ', 'median', 'price'), 'median price in dollars'),
            ('mean_price_ratio', 6, '%6.3f', (' mean', ' price', ' ratio'), 'ratio of price in current month to prior month'),
            ('median_price_ratio', 6, '%6.3f', ('median', ' price', ' ratio'), 'ratio of price in current month to prior month'),
            ('number_trades', 6, '%6d', ('number', 'of', 'trades'), 'number of trades in the month'),
            ))

    prior_mean_price = None
    prior_median_price = None
    for year in xrange(2003, 2010):
        for month in xrange(1, 13):
            if filter_f(year, month):
                value = data[make_reduction_key(year, month)]
                mean_price = value['mean']
                median_price = value['median']
                number_trades = value['count']
                ct.append_detail(
                        year=year,
                        month=month,
                        mean_price=mean_price,
                        median_price=median_price,
                        mean_price_ratio=None if prior_mean_price is None else mean_price / prior_mean_price,
                        median_price_ratio=None if prior_median_price is None else median_price / prior_median_price,
                        number_trades=number_trades,
                        )
                prior_mean_price = mean_price
                prior_median_price = median_price
    ct.append_legend()
    for line in ct.iterlines():
        r.append(line)
    return r
Ejemplo n.º 51
0
def loop():
    global CURRENT_DAY
    global CURRENT_MON
    global CURRENT_HOUR
    global CROP
    global SYSTEM_CONTROLLER
    global SOIL_HUMIDITY
    global CURRENT_TEMPERATURE
    global IS_RAINING
    global CURRENT_ILLUMINATION
    global READINGS_DELAY
    global DEBUG
    global DEBUG_DAY
    global DEBUG_MONTH
    global DEBUG_HOUR
    global SYSTEM_OVERRIDE
    global CURRENT_CROP_REPORT
    global CURRENT_SYSTEM_REPORT
    global UPDATE_SYSTEM

    while True:
        if read_rain():
            UPDATE_SYSTEM = True
        if read_temperature():
            UPDATE_SYSTEM = True
        if read_soil_humiture():
            UPDATE_SYSTEM = True
        if read_illumination():
            UPDATE_SYSTEM = True

        if UPDATE_SYSTEM:
            print_line('UPDATING SYSTEM')
            CURRENT_DAY = datetime.datetime.today().day
            CURRENT_MON = datetime.datetime.today().month
            CURRENT_HOUR = datetime.datetime.today().hour

            # Overriding date/time if debugging
            if DEBUG:
                override_debug_values()
                if DEBUG_DAY is not False:
                    CURRENT_DAY = DEBUG_DAY
                    print_line('DEBUGGING WITH DAY AS: ' + str(CURRENT_DAY))
                if DEBUG_MONTH is not False:
                    CURRENT_MON = DEBUG_MONTH
                    print_line('DEBUGGING WITH MONTH AS: ' + str(CURRENT_MON))
                if DEBUG_HOUR is not False:
                    CURRENT_HOUR = DEBUG_HOUR
                    print_line('DEBUGGING WITH HOUR AS: ' + str(CURRENT_HOUR))

            report = Report(SOIL_HUMIDITY, CURRENT_TEMPERATURE, IS_RAINING,
                            CURRENT_ILLUMINATION)
            report.build_crop_status(CROP, CURRENT_MON, CURRENT_DAY,
                                     CURRENT_HOUR)
            crop_report = report.get()
            update_status = SYSTEM_CONTROLLER.update_status(crop_report,
                                                            SYSTEM_OVERRIDE)

            CURRENT_CROP_REPORT = build_crop_report(crop_report)

            CURRENT_SYSTEM_REPORT = ''
            for status in update_status:
                print_line(' .' + status)
                CURRENT_SYSTEM_REPORT += ' .' + status + '\n'

            UPDATE_SYSTEM = False
        time.sleep(READINGS_DELAY)
Ejemplo n.º 52
0
    def compare(self):
        for dirname, dirnames, filenames in walk(self.__report_folder_path):
            report_under_test = Report(dirname)
            pdf_results = TemplateVariables()
            ppt_results = TemplateVariables()
            xls_results = TemplateVariables()
            doc_results = TemplateVariables()
            for filename in filenames:
                convert_to_images(dirname, filename)

            if '\\2010' in dirname:
                for reportdir, reportdirs, reportfiles in walk(dirname):
                    for report in reportfiles:
                        report_path = join(reportdir, report)
                        etalon_report = report_path.replace('\\2010\\', '\\2005\\')
                        if report.endswith(FileFormat.PDF + FileFormat.POWERPOINT + FileFormat.WORD):
                            self.__run_bcomp__(report_path, etalon_report, self.__text_compare_settings)
                            bat_name = create_batch(report_path, etalon_report)
                            if Report.get_report_name(reportdir) == report_under_test.report_title and \
                                    report.endswith(FileFormat.PDF):
                                pdf_results.old_report = etalon_report
                                pdf_results.new_report = report_path
                                pdf_results.bat_list.append(bat_name)
                                pdf_results.html_list.append(self.compare_report)

                            elif report.endswith(".ppt"):
                                ppt_results.old_report = etalon_report
                                ppt_results.new_report = report_path
                                ppt_results.bat_list.append(bat_name)
                                ppt_results.html_list.append(self.compare_report)

                            elif report.endswith(".doc"):
                                doc_results.old_report = etalon_report
                                doc_results.new_report = report_path
                                doc_results.bat_list.append(bat_name)
                                doc_results.html_list.append(self.compare_report)

                        elif report.endswith(FileFormat.EXCEL):
                            xls_results.old_report = etalon_report
                            xls_results.new_report = report_path
                        elif report.endswith(FileFormat.PNG):
                            self.__run_bcomp__(report_path, etalon_report, self.__picture_compare_settings)
                            bcomp_clean(reportdir, self.compare_report)
                            bat_name = create_batch(report_path, etalon_report)
                            if reportdir.endswith("-xls"):
                                xls_results.bat_list.append(bat_name)
                                xls_results.html_list.append(self.compare_report)

                        if reportdir.endswith("-pdf"):
                            pdf_results.bat_list.append(bat_name)
                            pdf_results.html_list.append(self.compare_report)
                        elif reportdir.endswith("-ppt"):
                            ppt_results.bat_list.append(bat_name)
                            ppt_results.html_list.append(self.compare_report)
                        elif reportdir.endswith("-doc"):
                            doc_results.bat_list.append(bat_name)
                            doc_results.html_list.append(self.compare_report)

                template_vars = {
                    "report_title": report_under_test.report_title,
                    "pdf_old": pdf_results.old_report,
                    "pdf_new": pdf_results.new_report,
                    "pdf_bats": pdf_results.bat_list,
                    "pdf_htmls": pdf_results.html_list,

                    "excel_old": xls_results.old_report,
                    "excel_new": xls_results.new_report,
                    "xls_bats": xls_results.bat_list,
                    "xls_htmls": xls_results.html_list,

                    "ppt_old": ppt_results.old_report,
                    "ppt_new": ppt_results.new_report,
                    "ppt_bats": ppt_results.bat_list,
                    "ppt_htmls": ppt_results.html_list,

                    "doc_old": doc_results.old_report,
                    "doc_new": doc_results.new_report,
                    "doc_bats": doc_results.bat_list,
                    "doc_htmls": doc_results.html_list,
                }
                Template(template_vars, dirname, report_under_test.report_title).create_template()
    def translate(self, message):

        hexMessageIN = []
        isRequest = []
        # turn into bitstrings
        for i in message:
            value = BitArray(hex=i[0].replace(" ", ""))
            hexMessageIN.append(value)
            isRequest.append(i[1])

        # detect multiple messages
        hexMessages = []
        temp = []
        for i in hexMessageIN:
            print(i[0:16].hex)
            if i[0:16].hex == BitArray("0x0564").hex:
                if len(temp) > 0:
                    hexMessages.append(list(temp))
                temp = []
            temp.append(i)
        if len(temp) > 0:
            hexMessages.append(list(temp))

        allMessages = Report("All Messages", "The entirity of the message", "", "")
        messageCount = -1
        for hexMessage in hexMessages:
            messageCount += 1
            hexString = ""
            for msg in hexMessage:
                hexString += msg.hex + ", "
            thisMessage = Report("Message {}".format(messageCount), "", "", hexString)

            # verify correctness
            try:
                if not DataLinkTranslator.DataLayerCorrect(hexMessage[0]):
                    thisMessage.AddNext(
                        Report("ERROR", "This message is not verifyably DNP3, or may be malformed", message[0]), ""
                    )
                    return Report("Invalid Message", "", "", "")
            except:
                return Report("Invalid Input", "", "", "")

            # remove CRC bits for everything (Andy took 'em out)
            # for i in hexMessage:
            #    i = DataLinkTranslator.StripCRCBits(i)

            # Get message length
            thisMessage.length = DataLinkTranslator.DataLayerLength(hexMessage[0][:])
            thisMessage.MessageLength = thisMessage.length.uint
            thisMessage.AddNext(
                Report(
                    "Message length",
                    "Number of octets this message contains that are not CRC related",
                    str(thisMessage.length.uint),
                    "",
                )
            )

            # Get Control Field
            control = DataLinkTranslator.DataLayerControl(hexMessage[0][:])
            thisMessage.AddNext(
                Report("Message Control Data", "Function operations and qualifiers", str(control.hex), "")
            )
            thisMessage.Next[-1].AddNext(DataLinkTranslator.DataLayerControlReport(control))

            # message sender
            thisMessage.sender = DataLinkTranslator.DataLayerSource(hexMessage[0][:])
            thisMessage.AddNext(Report("Message Sender", "ID for sender", str(thisMessage.sender.hex), ""))

            # message reciever
            thisMessage.reciever = DataLinkTranslator.DataLayerDestination(hexMessage[0][:])
            thisMessage.AddNext(Report("Message Reciever", "ID for Reciever", str(thisMessage.reciever.hex), ""))

            # message transport layer
            thisMessage.transport = ""
            if hexMessage[1][0]:
                thisMessage.transport += " FINAL "
            if hexMessage[1][1]:
                thisMessage.transport += " FIRST "
            thisMessage.AddNext(
                Report(
                    "Transport Function",
                    "Links together large messages in sequence",
                    (thisMessage.transport + "Seq {}").format(hexMessage[1][2:8].uint),
                    "",
                )
            )
            hexMessage[1] = hexMessage[1][8:]

            # technically a block, so sue me
            fragment = 1
            baseLayer = thisMessage
            while fragment < len(hexMessage):

                bucket = []
                # requests contain no actual data
                # just outlines for what is expected in responses

                if isRequest[index]:
                    bucket.append(Report("Object Header", "Prefix information on Application layer", "", ""))
                    flags = getAppRequestHeader(hexMessage[fragment])
                    for i in flags:
                        bucket[-1].Next.append(i)
                    bucket.append(translateFuncCode(getFuncCode(hexMessage[fragment])))
                    # skip internal indications, does not exist in requests
                # responses...well...
                else:
                    bucket.append(Report("Object Header", "Prefix information on Application layer", "", ""))
                    flags = getAppRequestHeader(hexMessage[fragment])
                    for i in flags:
                        bucket[-1].Next.append(i)
                    bucket.append(translateFuncCode(getFuncCode(hexMessage[fragment])))

                for i in bucket:
                    temp = i
                    if not isinstance(i, Report):
                        temp = Report(i, "", "", "")
                    baseLayer.Next[-1].AddNext(temp)

                baseLayer.AddNext(Report("Application Fragment {}".format(fragment - 1), "", "", ""))

                fragment += 1

            allMessages.AddNext(thisMessage)

        print(allMessages)
        return allMessages
Ejemplo n.º 54
0
class ThreatConnect:
    """ """

    def __init__(self, api_aid, api_sec, api_org, api_url):
        """ """
        # logger
        self.log_level = {
            'debug': logging.DEBUG,
            'info': logging.INFO,
            'warning': logging.WARNING,
            'error': logging.ERROR,
            'critical': logging.CRITICAL}
        self.formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(funcName)s:%(lineno)d)')
        self.tcl = tc_logger()

        # debugging
        self._memory_monitor = True

        # credentials
        self._api_aid = api_aid
        self._api_sec = api_sec

        # user defined values
        self._api_org = api_org
        self._api_url = api_url
        self._api_result_limit = 200

        # default values
        self._activity_log = False
        self._api_request_timeout = 30
        self._api_retries = 5  # maximum of 5 minute window
        self._api_sleep = 59  # seconds
        self._bulk_on_demand = False
        self._enable_report = False
        self._indicators_regex = indicators_regex
        self._proxies = {'https': None}
        self._retype = type(re.compile(''))

        # config items
        self._report = []
        self._verify_ssl = False

        # initialize request session handle
        self._session = Session()

        # instantiate report object
        self.report = Report()

        #
        # Memory Testing
        #
        # self._p = psutil.Process(os.getpid())
        # self._memory = self._p.memory_info().rss

    def _api_request_headers(self, ro):
        """ """
        timestamp = int(time.time())
        signature = "{0}:{1}:{2}".format(ro.path_url, ro.http_method, timestamp)
        # python 2.7, does not work on 3.x and not tested on 2.6
        # hmac_signature = hmac.new(self._api_sec, signature, digestmod=hashlib.sha256).digest()
        # authorization = 'TC {0}:{1}'.format(self._api_aid, base64.b64encode(hmac_signature))
        # python 3.x
        hmac_signature = hmac.new(self._api_sec.encode(), signature.encode(), digestmod=hashlib.sha256).digest()
        authorization = 'TC {0}:{1}'.format(self._api_aid, base64.b64encode(hmac_signature).decode())

        ro.add_header('Timestamp', timestamp)
        ro.add_header('Authorization', authorization)

    def api_filter_handler(self, resource_obj, filter_objs):
        """ """
        data_set = None

        if not filter_objs:
            # build api call (no filters)
            default_request_object = resource_obj.default_request_object
            data_set = self.api_response_handler(resource_obj, default_request_object)
        else:
            #
            # process each filter added to the resource object for retrieve
            #
            first_run = True

            #
            # each resource object can have x filter objects with an operator to join or intersect results
            #
            for filter_obj in filter_objs:

                obj_list = []  # temp storage for results on individual filter objects
                owners = filter_obj.owners
                if len(owners) == 0:  # handle filters with no owners
                    owners = [self._api_org]  # use default org

                # iterate through all owners
                for o in owners:
                    self.tcl.debug('owner: {0!s}'.format(o))
                    if len(filter_obj) > 0:
                        # request object are for api filters
                        for ro in filter_obj:
                            if ro.owner_allowed:
                                ro.set_owner(o)
                            results = self.api_response_handler(resource_obj, ro)

                            if ro.resource_type not in [ResourceType.OWNERS,
                                                        ResourceType.VICTIMS,
                                                        ResourceType.BATCH_JOBS]:
                                # TODO: should this be done?
                                # post filter owners
                                for obj in results:
                                    if obj.owner_name.upper() != o.upper():
                                        results.remove(obj)

                            obj_list.extend(results)
                    else:
                        ro = filter_obj.default_request_object
                        if ro.owner_allowed:
                            ro.set_owner(o)
                        results = self.api_response_handler(resource_obj, ro)

                        if ro.resource_type not in [ResourceType.OWNERS, ResourceType.VICTIMS]:
                            # TODO: should this be done?
                            # post filter owners
                            for obj in results:
                                if obj.owner_name.upper() != o.upper():
                                    results.remove(obj)

                        obj_list.extend(results)

                    #
                    # post filters
                    #
                    pf_obj_set = set(obj_list)
                    self.tcl.debug('count before post filter: {0:d}'.format(len(obj_list)))
                    for pfo in filter_obj.post_filters:
                        self.tcl.debug('pfo: {0!s}'.format(pfo))

                        #
                        # Report Entry
                        #
                        report_entry = ReportEntry()
                        report_entry.add_post_filter_object(pfo)

                        # current post filter method
                        filter_method = getattr(resource_obj, pfo.method)

                        # current post filter results
                        post_filter_results = set(filter_method(pfo.filter, pfo.operator, pfo.description))

                        pf_obj_set = pf_obj_set.intersection(post_filter_results)

                        self.report.add(report_entry)

                    # set obj_list to post_filter results
                    if filter_obj.post_filters_len > 0:
                        obj_list = list(pf_obj_set)

                    self.tcl.debug('count after post filter: {0:d}'.format(len(obj_list)))

                # no need to join or intersect on first run
                if first_run:
                    data_set = set(obj_list)
                    first_run = False
                    continue

                #
                # depending on the filter type the result will be intersected or joined
                #
                if filter_obj.operator is FilterSetOperator.AND:
                    data_set = data_set.intersection(obj_list)
                elif filter_obj.operator is FilterSetOperator.OR:
                    data_set.update(set(obj_list))

        #
        # only add to report if these results should be tracked (exclude attribute, tags, etc)
        #
        self.report.add_filtered_results(len(data_set))

        #
        # after intersection or join add the objects to the resource object
        #
        for obj in data_set:
            resource_obj.add_obj(obj)

    def api_request(self, ro):
        """ """
        api_response = None
        fail_msg = None
        h_content_length = None
        h_content_type = None
        start = datetime.now()

        #
        # enable activity log
        #
        if self._activity_log:
            ro.enable_activity_log()

        #
        # prepare request
        #
        url = '{0!s}{1!s}'.format(self._api_url, ro.request_uri)
        api_request = Request(ro.http_method, url, data=ro.body, params=ro.payload)
        request_prepped = api_request.prepare()

        #
        # generate headers
        #
        ro.set_path_url(request_prepped.path_url)
        self._api_request_headers(ro)
        request_prepped.prepare_headers(ro.headers)

        #
        # Debug
        #
        self.tcl.debug('request_object: {0!s}'.format(ro))
        self.tcl.debug('url: {0!s}'.format(url))
        self.tcl.debug('path url: {0!s}'.format(request_prepped.path_url))

        #
        # api request (gracefully handle temporary communications issues with the API)
        #
        for i in range(1, self._api_retries + 1, 1):
            try:
                api_response = self._session.send(
                    request_prepped, verify=self._verify_ssl, timeout=self._api_request_timeout,
                    proxies=self._proxies, stream=False)
                break
            except exceptions.ReadTimeout as e:
                self.tcl.error('Error: {0!s}'.format(e))
                self.tcl.error('The server may be experiencing delays at the moment.')
                self.tcl.info('Pausing for {0!s} seconds to give server time to catch up.'.format(self._api_sleep))
                time.sleep(self._api_sleep)
                self.tcl.info('Retry {0!s} ....'.format(i))

                if i == self._api_retries:
                    self.tcl.critical('Exiting: {0!s}'.format(e))
                    raise RuntimeError(e)
            except exceptions.ConnectionError as e:
                self.tcl.error('Error: {0!s}'.format(e))
                self.tcl.error('Connection Error. The server may be down.')
                self.tcl.info('Pausing for {0!s} seconds to give server time to catch up.'.format(self._api_sleep))
                time.sleep(self._api_sleep)
                self.tcl.info('Retry {0!s} ....'.format(i))
                if i == self._api_retries:
                    self.tcl.critical('Exiting: {0!s}'.format(e))
                    raise RuntimeError(e)
            except socket.error as e:
                self.tcl.critical('Exiting: {0!s}'.format(e))
                raise RuntimeError(e)

        #
        # header values
        #
        if 'content-length' in api_response.headers:
            h_content_length = api_response.headers['content-length']
        if 'content-type' in api_response.headers:
            h_content_type = api_response.headers['content-type']

        #
        # raise exception on *critical* errors
        #
        non_critical_errors = [
            b'The MD5 for this File is invalid, a File with this MD5 already exists',  # 400 (application/json)
            b'The SHA-1 for this File is invalid, a File with this SHA-1 already exists',  # 400 (application/json)
            b'The SHA-256 for this File is invalid, a File with this SHA-256 already exists',  # 400 (application/json)
            b'The requested resource was not found',  # 404 (application/json)
            b'Could not find resource for relative',  # 500 (text/plain)
            b'The requested Security Label was not removed - access was denied',  # 401 (application/json)
        ]

        #
        # TODO: work out some logic to improve the API error handling, possible area where API could improve
        #

        # valid status codes 200, 201, 202
        # if api_response.status_code in [400, 401, 403, 500, 503]:
        if api_response.status_code not in [200, 201, 202]:
            # check for non critical errors that have bad status codes
            nce_found = False
            fail_msg = api_response.content
            for nce in non_critical_errors:
                # api_response_dict['message'] not in non_critical_errors:
                if re.findall(nce, api_response.content):
                    nce_found = True
                    break

            if ro.failure_callback is not None:
                ro.failure_callback(api_response.status_code)

            # raise error on bad status codes that are not defined as nce
            if not nce_found:
                self.tcl.critical('Status Code: {0:d}'.format(api_response.status_code))
                self.tcl.critical('Failed API Response: {0!s}'.format(api_response.content))
                if ro.failure_callback is not None:
                    ro.failure_callback(api_response.status_code)
                raise RuntimeError(api_response.content)

        #
        # set response encoding (best guess)
        #
        if api_response.encoding is None:
            api_response.encoding = api_response.apparent_encoding

        #
        # Debug
        #
        self.tcl.debug('url: %s', api_response.url)
        self.tcl.debug('status_code: %s', api_response.status_code)
        self.tcl.debug('content-length: %s', h_content_length)
        self.tcl.debug('content-type: %s', h_content_type)

        #
        # Report
        #
        self.report.add_api_call()  # count api calls
        self.report.add_request_time(datetime.now() - start)
        self.tcl.debug('Request Time: {0!s}'.format(datetime.now() - start))

        if self._enable_report:
            report_entry = ReportEntry()
            report_entry.add_request_object(ro)
            report_entry.set_request_url(api_response.url)
            report_entry.set_status_code(api_response.status_code)
            report_entry.set_failure_msg(fail_msg)
            self.report.add(report_entry)

        #
        # return response
        #
        return api_response

    def api_response_handler(self, resource_obj, ro):
        """ """
        #
        # initialize vars
        #
        api_response_dict = {}
        obj_list = []
        # only track filter counts on request from this method
        ro.enable_track()

        #
        # debug
        #
        self.tcl.debug('Results Limit: {0!s}'.format(self._api_result_limit))

        # only resource supports pagination
        if ro.resource_pagination:
            ro.set_result_limit(self._api_result_limit)
            ro.set_result_start(0)
        else:
            ro.set_remaining_results(1)

        while ro.remaining_results > 0:
            #
            # api request
            #
            api_response = self.api_request(ro)
            # self.tcl.debug('Results Content: {0!s}'.format(api_response.content))
            self.tcl.debug('Status Code: {0!s}'.format(api_response.status_code))
            self.tcl.debug('Content Type: {0!s}'.format(api_response.headers['content-type']))

            #
            # Process API response
            #
            if api_response.headers['content-type'] == 'application/json':
                api_response_dict = api_response.json()
                
                # try and free memory for next api request
                api_response.close()
                del api_response  # doesn't appear to clear memory

                #
                # BULK INDICATOR (does not have status)
                #
                if 'indicator' in api_response_dict:
                    if ro.resource_type == ResourceType.INDICATORS:
                        data = api_response_dict['indicator']
                        for item in data:
                            obj_list.append(parse_indicator(
                                    item, resource_obj, ro.description, ro.request_uri, self._indicators_regex))

                            if len(obj_list) % 500 == 0:
                                self.tcl.debug('obj_list len: {0!s}'.format(len(obj_list)))

                elif api_response_dict['status'] == 'Failure':
                    # handle failed request (404 Resource not Found)
                    if 'message' in api_response_dict:
                        self.tcl.error('{0!s} "{1!s}"'.format(api_response_dict['message'], ro.description))
                    ro.set_remaining_results(0)
                    continue

                #
                # ADVERSARIES
                #
                elif ro.resource_type == ResourceType.ADVERSARIES:
                    data = api_response_dict['data']['adversary']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        obj_list.append(
                            parse_group(item, ResourceType.ADVERSARIES, resource_obj, ro.description, ro.request_uri))

                #
                # INDICATORS
                #
                elif ro.resource_type == ResourceType.INDICATORS:
                    data = api_response_dict['data']['indicator']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        obj_list.append(parse_indicator(
                                item, resource_obj, ro.description, ro.request_uri, self._indicators_regex))

                #
                # ADDRESSES
                #
                elif ro.resource_type == ResourceType.ADDRESSES:
                    data = api_response_dict['data']['address']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        obj_list.append(parse_indicator(
                                item, resource_obj, ro.description, ro.request_uri, self._indicators_regex))

                #
                # DOCUMENTS
                #
                elif ro.resource_type == ResourceType.DOCUMENTS:
                    data = api_response_dict['data']['document']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        obj_list.append(
                            parse_group(
                                item, ResourceType.DOCUMENTS, resource_obj, ro.description, ro.request_uri))

                #
                # EMAILS
                #
                elif ro.resource_type == ResourceType.EMAILS:
                    data = api_response_dict['data']['email']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        obj_list.append(
                            parse_group(
                                item, ResourceType.EMAILS, resource_obj, ro.description, ro.request_uri))

                #
                # EMAIL ADDRESSES
                #
                elif ro.resource_type == ResourceType.EMAIL_ADDRESSES:
                    data = api_response_dict['data']['emailAddress']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        obj_list.append(parse_indicator(
                            item, resource_obj, ro.description, ro.request_uri, self._indicators_regex))

                #
                # GROUPS
                #
                elif ro.resource_type == ResourceType.GROUPS:
                    data = api_response_dict['data']['group']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        obj_list.append(
                            parse_group(item, ResourceType.GROUPS, resource_obj, ro.description, ro.request_uri))

                #
                # FILES
                #
                elif ro.resource_type == ResourceType.FILES:
                    data = api_response_dict['data']['file']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        obj_list.append(parse_indicator(
                            item, resource_obj, ro.description, ro.request_uri, self._indicators_regex))

                #
                # HOSTS
                #
                elif ro.resource_type == ResourceType.HOSTS:
                    data = api_response_dict['data']['host']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        obj_list.append(parse_indicator(
                            item, resource_obj, ro.description, ro.request_uri, self._indicators_regex))

                #
                # DNSResolutions
                #
                elif ro.resource_type == ResourceType.DNS_RESOLUTIONS:
                    data = api_response_dict['data']['dnsResolution']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        if 'addresses' in item:  # don't process dns resolutions that have no addresses
                            obj_list.append(parse_dns_resolution(item))
                #
                # INCIDENTS
                #
                elif ro.resource_type == ResourceType.INCIDENTS:
                    data = api_response_dict['data']['incident']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        obj_list.append(
                            parse_group(item, ResourceType.INCIDENTS, resource_obj, ro.description, ro.request_uri))
                            
                #
                # METRICS
                #
                # elif ro.resource_type == ResourceType.OWNER_METRICS:
                #     data = api_response_dict['data']['ownerMetric']
                #     if not isinstance(data, list):
                #         data = [data]  # for single results to be a list
                #     for item in data:
                #         obj_list.append(
                #             parse_metrics(item, resource_obj, ro.description, ro.request_uri))
                            
                #
                # MINE
                #
                # elif ro.resource_type == ResourceType.OWNER_MINE:
                #     data = api_response_dict['data']['owner']
                #     if not isinstance(data, list):
                #         data = [data]  # for single results to be a list
                #     for item in data:
                #         obj_list.append(
                #             parse_metrics(item, resource_obj, ro.description, ro.request_uri))
                            
                #
                # MEMBERS
                #
                # elif ro.resource_type == ResourceType.OWNER_MEMBERS:
                #     data = api_response_dict['data']['user']
                #     if not isinstance(data, list):
                #         data = [data]  # for single results to be a list
                #     for item in data:
                #         obj_list.append(
                #             parse_metrics(item, resource_obj, ro.description, ro.request_uri))
                            
                #
                # OWNERS
                #
                elif ro.resource_type == ResourceType.OWNERS:
                    data = api_response_dict['data']['owner']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        obj_list.append(
                            parse_owner(item, resource_obj, ro.description, ro.request_uri))

                #
                # SIGNATURES
                #
                elif ro.resource_type == ResourceType.SIGNATURES:
                    data = api_response_dict['data']['signature']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        obj_list.append(
                            parse_group(item, ResourceType.SIGNATURES, resource_obj, ro.description, ro.request_uri))

                #
                # TASKS
                #
                elif ro.resource_type == ResourceType.TASKS:
                    data = api_response_dict['data']['task']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        obj_list.append(
                            parse_task(item, ResourceType.TASKS, resource_obj, ro.description, ro.request_uri))

                #
                # THREATS
                #
                elif ro.resource_type == ResourceType.THREATS:
                    data = api_response_dict['data']['threat']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        obj_list.append(
                            parse_group(item, ResourceType.THREATS, resource_obj, ro.description, ro.request_uri))

                #
                # URLS
                #
                elif ro.resource_type == ResourceType.URLS:
                    data = api_response_dict['data']['url']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        obj_list.append(parse_indicator(
                            item, resource_obj, ro.description, ro.request_uri, self._indicators_regex))

                #
                # VICTIMS
                #
                elif ro.resource_type == ResourceType.VICTIMS:
                    data = api_response_dict['data']['victim']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        # victims data comes back with no owner, manually add owner here
                        item['owner'] = ro.owner
                        obj_list.append(parse_victim(item, resource_obj, ro.description, ro.request_uri))

                #
                # BatchJobs
                #
                elif ro.resource_type == ResourceType.BATCH_JOBS:
                    data = api_response_dict['data']['batchStatus']
                    if not isinstance(data, list):
                        data = [data]  # for single results to be a list
                    for item in data:
                        # victims data comes back with no owner, manually add owner here
                        item['owner'] = ro.owner
                        obj_list.append(parse_batch_job(item, resource_obj, ro.description, ro.request_uri))

            elif api_response.headers['content-type'] == 'text/plain':
                self.tcl.error('{0!s} "{1!s}"'.format(api_response.content, ro.description))
                ro.set_remaining_results(0)
                continue

            # add_obj resource_pagination if required
            if ro.resource_pagination:
                # get the number of results returned by the api
                if ro.result_start == 0:
                    ro.set_remaining_results(api_response_dict['data']['resultCount'] - ro.result_limit)
                else:
                    ro.set_remaining_results(ro.remaining_results - ro.result_limit)

                # increment the start position
                ro.set_result_start(ro.result_start + ro.result_limit)
            else:
                ro.set_remaining_results(0)

        self.tcl.debug('Result Count: {0!s}'.format(len(obj_list)))
        self.report.add_unfiltered_results(len(obj_list))
        return obj_list

    #
    # api / sdk settings
    #

    def result_pagination(self, ro, identifier):
        data = []

        ro.set_result_limit(self._api_result_limit)
        ro.set_result_start(0)

        while ro.remaining_results > 0:
            api_response = self.api_request(ro)

            if api_response.headers['content-type'] == 'application/json':
                api_response_dict = api_response.json()
                if api_response_dict['status'] == 'Success':
                    data.extend(api_response_dict['data'][identifier])

            # get the number of results returned by the api
            if ro.result_start == 0:
                ro.set_remaining_results(api_response_dict['data']['resultCount'] - ro.result_limit)
            else:
                ro.set_remaining_results(ro.remaining_results - ro.result_limit)

            # increment the start position
            ro.set_result_start(ro.result_start + ro.result_limit)

        return data

    def report_enable(self):
        """ """
        self._enable_report = True

    def report_disable(self):
        """ """
        self._enable_report = False

    def set_activity_log(self, data_bool):
        """ enable or disable api activity log """
        if isinstance(data_bool, bool):
            self._activity_log = data_bool

    def set_api_request_timeout(self, data_int):
        """ set timeout value for the requests module """
        if isinstance(data_int, int):
            self._api_request_timeout = data_int
        else:
            raise AttributeError(ErrorCodes.e0110.value.format(data_int))

    def set_api_retries(self, data):
        """ set the number of api retries before exception is raised """
        if isinstance(data, int):
            self._api_retries = data
        else:
            raise AttributeError(ErrorCodes.e0120.value.format(data))

    def set_api_sleep(self, data):
        """ set the amount of time between retries """
        if isinstance(data, int):
            self._api_sleep = data
        else:
            raise AttributeError(ErrorCodes.e0130.value.format(data))

    def set_api_result_limit(self, data_int):
        """ set the number of result to return per api request (500 max) """
        if isinstance(data_int, int):
            self._api_result_limit = data_int
        else:
            raise AttributeError(ErrorCodes.e0140.value.format(data_int))

    def set_proxies(self, proxy_address, proxy_port, proxy_user=None, proxy_pass=None):
        """ define proxy server to use with the requests module """
        # "http": "http://*****:*****@10.10.1.10:3128/",
        
        # accept host with http(s) or without
        proxy_method = 'http://'
        if re.match('^http', proxy_address):
            proxy_method, proxy_host = proxy_address.split('//')
            proxy_method += '//'
            proxy_address = proxy_host
        
        # TODO: add validation
        if proxy_user is not None and proxy_pass is not None:
            self._proxies['https'] = '{0!s}{1!s}:{2!s}@{3!s}:{4!s}'.format(
                proxy_method, proxy_user, proxy_pass, proxy_address, proxy_port)
        else:
            self._proxies['https'] = '{0!s}{1!s}:{2!s}'.format(
                proxy_method, proxy_address, proxy_port)
            
    def get_proxies(self):
        """ get proxy settings """
        return self._proxies

    def set_tcl_file(self, fqpn, level='info'):
        """ set the log file destination and log level """
        file_path = os.path.dirname(fqpn)
        if os.access(file_path, os.W_OK):
            if self.tcl.level > self.log_level[level]:
                self.tcl.setLevel(self.log_level[level])
            fh = logging.FileHandler(fqpn)
            # fh.set_name('tc_log_file')  # not supported in python 2.6
            if level in self.log_level.keys():
                fh.setLevel(self.log_level[level])
            else:
                fh.setLevel(self.log_level['info'])
            fh.setFormatter(self.formatter)
            self.tcl.addHandler(fh)

    # def set_tcl_level(self, level):
    #     """ """
    #     if level in self.log_level.keys():
    #         if self.tcl.level > self.log_level[level]:
    #             self.tcl.setLevel(self.log_level[level])
    #         self.tcl.handlers[0].setLevel(self.log_level[level])

    def set_tcl_console_level(self, level):
        """ set the console log level """
        if level in self.log_level.keys():
            if self.tcl.level > self.log_level[level]:
                self.tcl.setLevel(self.log_level[level])
            ch = logging.StreamHandler()
            # ch.set_name('console')  # not supported in python 2.6
            ch.setLevel(self.log_level[level])
            ch.setFormatter(self.formatter)
            self.tcl.addHandler(ch)

    def set_indicator_regex(self, type_enum, compiled_regex):
        """ overwrite default SDK regex """
        self.tcl.debug('overwrite regex for {0!s}'.format(type_enum.name))
        if not isinstance(type_enum, IndicatorType):
            raise AttributeError(ErrorCodes.e0150.value.format(type_enum))

        if not isinstance(compiled_regex, list):
            compiled_regex = [compiled_regex]

        cr_list = []
        for cr in compiled_regex:
            if isinstance(cr, self._retype):
                cr_list.append(cr)
            else:
                raise AttributeError(ErrorCodes.e0160.value.format(cr))

        self._indicators_regex[type_enum.name] = cr_list

    #
    # Resources
    #

    def adversaries(self):
        """ return an adversary container object """
        return Adversaries(self)

    def bulk(self):
        """ return a bulk container object """
        return Bulk(self)

    def bulk_indicators(self, on_demand=False):
        """ return a bulk indicator container object """
        return BulkIndicators(self, on_demand)

    def documents(self):
        """ return a document container object """
        return Documents(self)

    def emails(self):
        """ return an email container object """
        return Emails(self)

    def groups(self):
        """ return an group container object """
        return Groups(self)

    def incidents(self):
        """ return an incident container object """
        return Incidents(self)

    def indicators(self):
        """ return an indicator container object """
        return Indicators(self)

    def owners(self):
        """ return an owner container object """
        return Owners(self)

    def signatures(self):
        """ return a signature container object """
        return Signatures(self)
        
    def tasks(self):
        """ return a task container object """
        return Tasks(self)

    def threats(self):
        """ return a threat container object """
        return Threats(self)

    def victims(self):
        """ return a victim container object """
        return Victims(self)

    def batch_jobs(self):
        return BatchJobs(self)
Ejemplo n.º 55
0
def play_project(project, reporter = None, project_path = 'unkown', variables = None):
	assert isinstance(project, Project)

	if project.current_special == None:
		raise RuntimeError('No special to start!')

	user_count = project.user_count
	iteration_count = project.iteration_count
	special = project.current_special
	user_factory = project.user_factory
	iteration_factory = project.iteration_factory
	global_factory = project.global_factory

	if reporter != None:
		from Report import Report
		if type(reporter) in (str, unicode):
			reporter = Report(reporter)
		assert isinstance(reporter, Report)

		import datetime
		start_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
		info = {
				'Start Time': start_time,
				'User Count': user_count,
				'Iteration Count': iteration_count,
				'Special': special.label,
				'Project Path': project_path,
			}

		import ReportManager
		ReportManager.start_report(reporter = reporter, project = project, info = info)

	import PlayPolicy
	policy = PlayPolicy.IterationBasedPlayPolicy(
			player = special,
			user_count = user_count,
			iteration_count = iteration_count,
			user_factory = user_factory,
			iteration_factory = iteration_factory,
			global_factory = global_factory,
			reporter = reporter
			)

	import sys
	sys.path.append(os.path.join(sys.path[0], 'runtime'))
	sys.path.append(os.path.join(sys.path[0], 'plugin'))

	if variables:
		from Scope import Scope
		scope = Scope()
		scope.variables = variables
		policy.play(scope)
	else:
		policy.play()

	if len(sys.path) >= 2 and sys.path[-2:] == ['runtime', 'plugin']:
		sys.path.pop()
		sys.path.pop()

	if reporter:
		reporter.finish()
Ejemplo n.º 56
0
 def restore(self, diff_snapshot):
     report = Report()
     for report_class in self.list_subclass:
         report.union(report_class.restore(diff_snapshot))
     return report
Ejemplo n.º 57
0
def fstest(fs, filesize=16 * MEG, depth=1, direct=False,
           sync=False, crtdlt=False,
           bsizes=(4096, 128 * 1024, 4096 * 1024)):
    """ compute & display standard fio to filesystem on a disk
        fs -- file system to be tested
        filesize -- size of file in which I/O is being done
        depth -- number of concurrent requests
        direct -- I/O is direct (not buffered)
        sync -- updates are immediately flushed
    """

    if crtdlt:
        (tc, bwc, loadc) = fs.create(sync=sync)
        (td, bwd, loadd) = fs.delete(sync=sync)

        r = Report(("create", "delete"))
        r.printHeading()
        r.printIOPS(1, (bwc, bwd))
        r.printLatency(1, (tc, td))

    r = Report(("seq read", "seq write", "rnd read", "rnd write"))
    r.printHeading()
    for bs in bsizes:
        (tsr, bsr, lsr) = fs.read(bs, filesize, seq=True,
                                  depth=depth, direct=direct)
        (tsw, bsw, lsw) = fs.write(bs, filesize, seq=True,
                                   depth=depth, direct=direct, sync=sync)
        (trr, brr, lrr) = fs.read(bs, filesize, seq=False, depth=depth,
                                  direct=direct)
        (trw, brw, lrw) = fs.write(bs, filesize, seq=False, depth=depth,
                                   direct=direct, sync=sync)

        r.printBW(bs, (bsr, bsw, brr, brw))
        r.printIOPS(bs, (bsr, bsw, brr, brw))
        r.printLatency(bs, (tsr, tsw, trr, trw))
Ejemplo n.º 58
0
def servertest(fs, depth=1, crtdlt=False,
               bsizes=(4096, 128 * 1024, 4096 * 1024)):
    """ compute & display standard test results """

    if crtdlt:
        tc = fs.create()
        td = fs.delete()

        r = Report(("create", "delete"))
        r.printHeading()
        r.printIOPS(1, (SECOND / tc, SECOND / td))
        r.printLatency(1, (tc, td))

    r = Report(("seq read", "seq write", "rnd read", "rnd write"))
    r.printHeading()
    for bs in bsizes:
        (tsr, bsr, rload) = fs.read(bs, depth=depth, seq=True)
        (tsw, bsw, wload) = fs.write(bs, depth=depth, seq=True)
        (trr, brr, rload) = fs.read(bs, depth=depth, seq=False)
        (trw, brw, wload) = fs.write(bs, depth=depth, seq=False)
        r.printBW(bs, (bsr, bsw, brr, brw))
        r.printIOPS(bs, (bsr, bsw, brr, brw))
        r.printLatency(bs, (tsr, tsw, trr, trw))
Ejemplo n.º 59
0
 def testSaveReport(self):
     rep = Report()
     self.assertTrue(rep.saveReport("Escola Santa Clara","Juan","professor",0,1,0,1,0,2,1,0,0,1))
Ejemplo n.º 60
0
#
# basic unit test exerciser
#
if __name__ == '__main__':

    cpu = makeCPU([])
    print("%s w/%dGB of DDR3-%d RAM" %
          (cpu.desc, cpu.mem_size / GIG, cpu.mem_speed))
    print
    print("    thread switch   %dus" % (cpu.thread_us()))
    print("    process switch  %dus" % (cpu.proc_us()))
    print("    DMA start/intr  %dus" % (cpu.dma_us()))

    from Report import Report

    r = Report(("mem-rd", "mem-wrt", "process", "instrs"))
    print
    r.printHeading()
    sizes = [1024, 4096, 128*1024, 1024*1024]
    for bs in sizes:
        mem_r = cpu.mem_read(bs)
        mem_w = cpu.mem_write(bs)
        mem_p = cpu.process(bs)
        mem_x = cpu.execute(bs)
        r.printLatency(bs, (mem_r, mem_w, mem_p, mem_x))

    r = Report(("sha-1", "comp", "decomp", "RAID-6"))
    print
    r.printHeading()
    sizes = [1024, 4096, 128*1024, 1024*1024]
    for bs in sizes: