Пример #1
0
 def setup(self):
     l = 'abc'
     self.report = Reporting()
     self.empty_report = Reporting()
     for i in range(1, 4):
         user = User(100 * i)
         self.report.user_objects["alice" + l[i - 1]] = user
         user.create("alice" + l[i - 1], "*****@*****.**")
         merchant = Merchant(1.25 + float(i))
         self.report.merchant_objects["jack" + l[i - 1]] = merchant
         merchant.create("jack" + l[i - 1], "*****@*****.**")
         transact = Transact(user, merchant, 100, "withdrawl")
         self.report.transact_objects[i] = transact
         transact.check_transact()
Пример #2
0
def instantiate_testrail(request):
    if request.config.getoption("--skip-testrail"):
        tr_client = Reporting()
    else:
        tr_client = APIClient(request.config.getini("tr_url"), request.config.getini("tr_user"),
                              request.config.getini("tr_pass"), request.config.getini("tr_project_id"))
    yield tr_client
    def __init__(self, configuration, modelTime):
        DynamicModel.__init__(self)

        self.modelTime = modelTime
        self.model = ModflowOfflineCoupling(configuration, modelTime)

        self.reporting = Reporting(configuration, self.model, modelTime)
Пример #4
0
    def __init__(self, data_loader, config):
        """Initialize configurations."""

        # Data loader.
        self.data_loader = data_loader

        # Model configurations.
        self.image_size = config.image_size
        self.encoder_mode = config.encoder_mode
        self.encoder_last = config.encoder_last
        self.encoder_start_ch = config.encoder_start_ch
        self.encoder_target_ch = config.encoder_target_ch
        self.image_layer = config.image_layer

        # Training configurations.
        self.dataset = config.dataset
        self.batch_size = config.batch_size
        self.num_iters = config.num_iters
        self.num_iters_decay = config.num_iters_decay
        self.e_lr = config.e_lr
        self.beta1 = config.beta1
        self.beta2 = config.beta2
        self.resume_iters = config.resume_iters

        # Test configurations.
        self.test_iters = config.test_iters

        # Miscellaneous.
        self.use_tensorboard = config.use_tensorboard
        #self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.device = torch.device('cuda:' + config.gpu)
        self.email_address = config.email_address
        self.image_sending = config.image_sending

        # Directories.
        self.train_dir = config.train_dir
        self.log_dir = config.log_dir
        self.sample_dir = config.sample_dir
        self.model_save_dir = config.model_save_dir
        self.result_dir = config.result_dir

        # Step size.
        self.log_step = config.log_step
        self.sample_step = config.sample_step
        self.model_save_step = config.model_save_step
        self.model_save_start = config.model_save_start
        self.lr_update_step = config.lr_update_step

        # Reporting
        self.reporting = Reporting(
            self.email_address, self.image_sending,
            os.path.join(self.train_dir, self.sample_dir))

        # Build the model and tensorboard.
        self.build_model()
        if self.use_tensorboard:
            self.build_tensorboard()
Пример #5
0
    def __init__(self,
                 configuration,
                 modelTime,
                 initialState=None,
                 system_argument=None):
        DynamicModel.__init__(self)

        self.modelTime = modelTime
        self.model = PCRGlobWB(configuration, modelTime, initialState)
        self.reporting = Reporting(configuration, self.model, modelTime)

        # the model will set paramaters based on global pre-multipliers given in the argument:
        self.adusting_parameters(configuration, system_argument)

        # make the configuration available for the other method/function
        self.configuration = configuration
    def __init__(self, configuration, modelTime, initialState = None, system_argument = None):
        DynamicModel.__init__(self)

        self.modelTime = modelTime        
        self.model = PCRGlobWB(configuration, modelTime, initialState)
        self.reporting = Reporting(configuration, self.model, modelTime)
        
        # the model will set paramaters based on global pre-multipliers given in the argument:
        if system_argument != None: self.adusting_parameters(configuration, system_argument)

        # option to include merging processes for pcraster maps and netcdf files:
        self.with_merging = True
        if ('with_merging' in configuration.globalOptions.keys()) and (configuration.globalOptions['with_merging'] == "False"):
            self.with_merging = False

        # make the configuration available for the other method/function
        self.configuration = configuration
Пример #7
0
 def setup(self):
     self.report = Reporting()
     self.class_map = {
         "user": {
             "class_name": User,
             "report_object": self.report.user_objects
         },
         "merchant": {
             "class_name": Merchant,
             "report_object": self.report.merchant_objects
         },
         "txn": {
             "class_name": Transact,
             "report_object": self.report.transact_objects
         },
         "report": {
             "class_name": Reporting,
             "report_object": self.report
         }
     }
Пример #8
0
    def __init__(self, config, logger_obj, api_watch_list):

        self.config = config
        self.logger = logger_obj
        self.api_watch_list = api_watch_list
        self.execution_timeout = self.config.get_execution_timeout()
        self.capture_basic_behavior = self.config.get_capture_behavior_report_basic_flag()
        self.capture_complete_behavior = self.config.get_capture_behavior_report_complete_flag()
        self.process_list = []
        self.target_process_pid = None

        # Initialize Reporting Module
        self.report_generator = Reporting(config, logger_obj)

        # Initialize Queue for FriSpyGUI to receive the events
        self.event_queue_name = self.config.get_event_queue_name()
        self.msmqueue_event = MSMQCustom(self.event_queue_name)

        self.config_queue_name = self.config.get_config_queue_name()
        self.msmqueue_config = MSMQCustom(self.config_queue_name)

        # Initialize Controller
        self._stop_requested = threading.Event()
        self._reactor = Reactor(run_until_return=lambda reactor: self._stop_requested.wait())

        self._device = frida.get_local_device()
        self._sessions = set()
        try:

            self._device.on("child-added", lambda child: self._reactor.schedule(lambda: self._on_child_added(child)))
            self._device.on("child-removed", lambda child: self._reactor.schedule(lambda: self._on_child_removed(child)))
            self._device.on("output", lambda pid, fd, data: self._reactor.schedule(lambda: self._on_output(pid, fd, data)))
            self._device.on("process-crashed", lambda crash: self._reactor.schedule(lambda: self._on_process_crashed(crash)))
            self._device.on("lost", lambda crash: self._reactor.schedule(lambda: self._on_process_crashed(crash)))

        except Exception as e:
            self.logger.log("error", "Exception - FriSpyController : run : %s" %(str(e)))
            self.Controller_cleaup(None)
            sys.exit(1)
Пример #9
0
def main():
    print("== DevSecOps: ignore ==\nScan github repositories for misconfigured ignore files.\n")

    args = Arguments()
    if not args.load(sys.argv[1:]):
        exit(0)
    if args.help:
        args.print_help()
        exit(0)

    github = Github(args.github_token)
    crawler = GithubCrawler(github, args.organization)
    results = ScanResults()
    try:
        crawler.scan(results)
    except KeyboardInterrupt:
        print("\n\n*****************************\n[W] User aborted with CTRL-C.\n*****************************\n")
        pass
    Reporting(verbose=args.verbose).print(results)
    GitIssueCreator(github,
                    verbose=args.verbose,
                    create_issue=args.create_issue,
                    create_pr=args.create_pr).create_issues(results)
Пример #10
0
def main():
    IN_initialize = Initialize()
    IN_reporting = Reporting()
    IN_audits = Audits()

    Output = []

    analytics = IN_initialize.initialize_analyticsreporting()
    response = IN_reporting.get_report(analytics)
    management_get = IN_reporting.get_management(analytics)
    useful_values = IN_reporting.print_response(response)
    accounts = IN_reporting.get_gtm(analytics)

    print "######"
    IN_audits.check_siteSearch(useful_values)
    print "_______"
    Output.append(IN_audits.check_medium(useful_values))
    #check_totalValue()
    print "_______"
    IN_audits.check_customDimensions(management_get)
    print "_______"
    #url = raw_input('Enter URL for self-Referral check: ')
    IN_audits.check_selfReferral('yandex', useful_values)
    print "_______"
    IN_audits.check_eventTracking(useful_values)
    print "_______"
    Output.append(IN_audits.check_adwordsLink(management_get))
    print "_______"
    IN_audits.check_gtm(accounts)
    print "_______"
    IN_audits.check_goals(management_get)
    print "_______"
    IN_audits.check_customMetrics(management_get)
    print "######"
    print Output
    with open('data.json', 'w') as djson:
        json.dump(Output, djson)
Пример #11
0
def final_report(feature_dictionary, wals, feature_num_instances_dictionary, possibilities, errors, label):
    reporting = Reporting(feature_dictionary, wals,
                          feature_num_instances_dictionary,
                          possibilities, label, args.minIinstances)
    # write reports to stdout
    reporting.print_order_confusion_matrix_for_feature()
    reporting.print_accuracy_vs_num_instances()

    # now write reports to file
    base_file_name = os.path.join(data_dir, label.lower().replace(" ", "_"))
    report_file_name = base_file_name + "_report.txt"
    csv_file_name = base_file_name + "_accuracy_data.txt"
    report_file = open(report_file_name, mode='w')
    csv_file = open(csv_file_name, mode='w')
    reporting.print_order_confusion_matrix_for_feature(report_file)
    reporting.print_accuracy_vs_num_instances(report_file)

    # make a csv (maybe not useful?)
    reporting.write_accuracy_vs_num_instances_to_as_csv(csv_file)

    # print the errors to a file
    errors.print_incorrect_guesses(report_file)
    report_file.close()
    csv_file.close()
Пример #12
0
    def initialize_model(self):
        if self.model is not None:
            #already initialized
            return

        try:

            logger.info("PCRGlobWB: initialize_model")

            initial_state = None
            self.model = PCRGlobWB(self.configuration, self.model_time,
                                   initial_state)

            self.reporting = Reporting(self.configuration, self.model,
                                       self.model_time)

            logger.info("Shape of maps is %s", str(self.shape))

            logger.info("PCRGlobWB Initialized")

        except:
            import traceback
            traceback.print_exc()
            raise
Пример #13
0
    def __init__(self, configuration, modelTime, initialState = None):
        DynamicModel.__init__(self)

        self.modelTime = modelTime        
        self.model = PCRGlobWB(configuration, modelTime, initialState)
        self.reporting = Reporting(configuration, self.model, modelTime)
Пример #14
0
    def fit(self, data):
        """
           sim_mode = {global, local}
           sim_delta = (0, 1]
           clustering_mode = {None, hfps, wsc}
        """
        self.data = data
        if self.data.data_train_folds:
            samples_training = self.data.data_train_folds[self.exp]
            samples_test = self.data.data_valid_folds[self.exp]
        else:
            samples_training = self.data.data_train_list
            samples_test = self.data.data_test_list

        if REBOOT_MODEL:
            trained_model = RebootModel(self.exp, self.data.dtypes)
            pop = trained_model.get_model()
            self.population = ClassifierSets(
                attribute_info=data.attribute_info,
                dtypes=data.dtypes,
                rand_func=self.rng,
                sim_mode='global',
                sim_delta=0.9,
                clustering_method=None,
                cosine_matrix=self.data.sim_matrix,
                data_cov_inv=self.data.cov_inv,
                popset=pop)
            self.population.micro_pop_size = sum(
                [classifier.numerosity for classifier in pop])
            self.population.pop_average_eval(self.data.no_features)
            analyze(pop, data)
        else:
            self.population = ClassifierSets(
                attribute_info=data.attribute_info,
                dtypes=data.dtypes,
                rand_func=self.rng,
                sim_mode='global',
                sim_delta=0.9,
                clustering_method=None,
                cosine_matrix=self.data.sim_matrix,
                data_cov_inv=self.data.cov_inv)

        if THRESHOLD == 1:
            bi_partition = one_threshold
        elif THRESHOLD == 2:
            bi_partition = rank_cut
        else:
            raise Exception("prediction threshold method unidentified!")

        def track_performance(samples):
            f_score = 0
            label_prediction = set()
            for sample in samples:
                self.population.make_eval_matchset(sample[0])
                if not self.population.matchset:
                    f_score += fscore(label_prediction, sample[1])
                else:
                    # if PREDICTION_METHOD == 1:
                    label_prediction = max_prediction([
                        self.population.popset[ref]
                        for ref in self.population.matchset
                    ], self.rng.randint)
                    # else:
                    #     _, label_prediction = aggregate_prediction([self.population.popset[ref]
                    #                                  for ref in self.population.matchset])
                    # label_prediction = bi_partition(vote)
                    f_score += fscore(label_prediction, sample[1])
            return f_score / samples.__len__()

        while self.iteration < (MAX_ITERATION + 1):
            sample = samples_training[self.iteration %
                                      samples_training.__len__()]
            self.train_iteration(sample)

            if (self.iteration % TRACK_FREQ) == 0 and self.iteration > 0:
                self.timer.start_evaluation()
                test_fscore = track_performance(samples_test)
                train_fscore = track_performance(samples_training)
                self.population.pop_average_eval(self.data.no_features)
                self.training_track.write(
                    str(self.iteration) + ", " +
                    self.population.get_pop_tracking() + ", " +
                    str("%.4f" % train_fscore) + ", " +
                    str("%.4f" % test_fscore) + ", " +
                    str("%.4f" % self.timer.get_global_timer()) + "\n")
                self.timer.stop_evaluation()

                self.track_to_plot.append([
                    self.iteration, train_fscore, test_fscore,
                    self.population.ave_fitness,
                    float(self.population.micro_pop_size / MAX_CLASSIFIER),
                    float(self.population.popset.__len__() / MAX_CLASSIFIER)
                ])

            self.iteration += 1

        self.training_track.close()

        self.timer.start_evaluation()
        self.population.pop_average_eval(self.data.no_features)
        self.population.estimate_label_pr(samples_training)
        [train_evaluation, _,
         train_coverage] = self.evaluation(samples_training)
        [test_evaluation, test_class_precision,
         test_coverage] = self.evaluation(samples_test)
        self.timer.stop_evaluation()

        reporting = Reporting(self.exp)
        reporting.write_pop(self.population.popset, self.data.dtypes)
        _ = self.timer.get_global_timer()
        reporting.write_model_stats(self.population, self.timer,
                                    train_evaluation, train_coverage,
                                    test_evaluation, test_coverage)

        return [test_evaluation, test_class_precision, self.track_to_plot]
Пример #15
0
 def __init__(self):
     self.retrieval = Retrieval()
     self.reporting = Reporting()
Пример #16
0
            }
            dict_filter_1.append(d)
            iterator += 1
        except:
            continue

# Filter data for processing (delete elements: [], 'u)
captureFilteredDict = []
for item in dict_filter_1:
    d = {
        'VALID_FRAME_N': str(item["valid_frame.number"]),  # IP Package
        'FRAME_N': str(item["frame.number"][0]),  # Frame
        'IP_SRC': str(item["ip.src"]),
        'IP_DST': str(item["ip.dst"][0]),
        'IP_DSCP': str(item["ip.dsfield.dscp"][0]),
        'QOS': str(item["wlan.qos.priority"][0]),
        'IP_LEN': str(item["ip.len"][0])
    }
    captureFilteredDict.append(d)

# Reporting: Create report file
flReport = Reporting("report.html", configDict, captureFilteredDict)
flReport.doReport()  # Generate report

print("Report created. Opening in browser...\n")
time.sleep(0.5)

new = 2  # Open in a new tab, if possible
url = "file://" + os.path.realpath("report.html")  # URL to report file
webbrowser.open(url, new=new)  # Open report in web-browser
Пример #17
0
            elif report_type == "total-dues":
                return "total: {}".format(reporting_object.dues_user())

            else:
                raise Exception("Invalid input")
        else:
            raise Exception("Invalid input")
    except IndexError as e:
        return ("Expected more data {}".format(e.message))
    except Exception as e:
        return ("Error occured {}".format(e.message))


if __name__ == '__main__':  #pragma: no cover
    report = Reporting()
    """
	class_map is kind of config where we could put all the classes all the available objects of the respective classes
	"""
    class_map = {
        "user": {
            "class_name": User,
            "report_object": report.user_objects
        },
        "merchant": {
            "class_name": Merchant,
            "report_object": report.merchant_objects
        },
        "txn": {
            "class_name": Transact,
            "report_object": report.transact_objects
Пример #18
0
 def initial(self):
     self.reporting = Reporting(self.configuration, self.model,
                                self.modelTime)