Beispiel #1
0
def scheduled_scan():
    scan_starttime = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
    session_id = base64.b64encode(
        scan_starttime.encode("ascii")).decode("ascii")
    utils.config_test()
    exploitdb_success = False
    if cp.get('exploitdb', 'use_exploitdb') == 'True':
        ExploitDB.download_vulndata(langdata)
        extracted = ExploitDB.extract_windows_exploit(langdata)
        exploitdb_vulndata = ExploitDB.parse_vulndata(extracted)
        if exploitdb_vulndata is not None:
            exploitdb_success = True
    if cp.get('general', 'do_not_save_vulndata') == 'True':
        os.remove(cp.get('exploitdb', 'vulndata_filename'))
    jvn_success = False
    if cp.get('jvn', 'use_jvn') == 'True':
        jvn_vulndata = JVN.download_vulndata(langdata)
        if jvn_vulndata is not None:
            jvn_success = True
    nvd_success = False
    if cp.get('nvd', 'use_nvd') == 'True':
        nvd_vulndata = NVD.download_vulndata(langdata)
        if nvd_vulndata is not None:
            nvd_success = True
    installed = {}
    installed.update(local_app.getapp_from_wmi())
    installed.update(local_app.getapp_from_hklm())
    installed.update(local_app.getapp_from_hklmwow64())
    installed.update(local_app.getapp_from_hkcu())
    result = {}
    count = 0
    scanret_exploitdb = ExploitDB.scan(langdata, exploitdb_vulndata, installed)
    scanret_jvn = JVN.scan(langdata, jvn_vulndata, installed)
    scanret_nvd = NVD.scan(langdata, nvd_vulndata, installed)
    scanret_winupdate = WindowsUpdate.scan(langdata)
    result.update(scanret_exploitdb[0])
    result.update(scanret_jvn[0])
    result.update(scanret_nvd[0])
    result.update(scanret_winupdate[0])
    count = scanret_exploitdb[1] + scanret_jvn[1] + scanret_nvd[
        1] + scanret_winupdate[1]
    scan_endtime = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
    history = '''
    Session ID: {}
    Scan started at: {}
    Scan ended at: {}
    Found vulnerable application and available update: {}
    DetExploit Version: {}
    #####################################################################
    '''.format(session_id, scan_starttime, scan_endtime, count,
               DETEXPLOIT_VERSION)
    with open('../history.detexploit', 'w') as wh:
        wh.write(history)
    report.generate_report(langdata, DETEXPLOIT_VERSION, session_id, count,
                           scan_starttime, scan_endtime, exploitdb_success,
                           jvn_success, nvd_success, result)
def run(cfg: DictConfig) -> None:

    # Getting data
    data = get_data(dataname=cfg.stock.name, period=cfg.grain.period)

    # Run optimization
    dct = bayesian_optimization(data, cfg)

    # Generate report
    generate_report(dct, cfg)
Beispiel #3
0
def main():
    # # 1 import non pwi
    nii.import_non_pwi()

    # # 2. import batches
    nii.import_batches(1, MAX_CASE)

    # # 3. import keys
    nii.import_keys()

    # 4. combine batches into a single batch file, and keys into a single key file
    start_case = 1
    end_case = MAX_CASE
    X, Y, size = trainer.combine_batches(start_case, end_case)
    X = np.reshape(X, (-1, SAMPLE_SIZE))
    Y = np.array(Y).flatten()

    # 5.1 training and testing with Kfolds (split = 5, train= 80%, test =20%)
    rpkf = RepeatedKFold(n_splits=5, n_repeats=50, random_state=2652124)
    for train_index, test_index in rpkf.split(X):
        x_train, x_test = X[train_index], X[test_index]
        y_train, y_test = Y[train_index], Y[test_index]

    normalized_x_train = preprocessing.scale(x_train)
    normalized_x_test = preprocessing.scale(x_test)

    if not os.path.isdir(MODEL_DIR):
        os.makedirs(MODEL_DIR)
    for model_name in models:
        print("training with ", model_name)
        model_file_path = os.path.join(
            MODEL_DIR,
            model_name + "[" + str(start_case) + "-" + str(end_case) + "].sav")
        if not os.path.isfile(model_file_path):
            clf = models[model_name].fit(normalized_x_train, y_train)
            joblib.dump(clf, model_file_path)
        else:
            clf = joblib.load(model_file_path)

        # predict and report
        y_predicted = clf.predict(normalized_x_test)
        f1_s = f1_score(y_test, y_predicted)
        acc_s = accuracy_score(y_test, y_predicted)
        print("Model: %s  [%d - %d] f1_score = %f and acc_score = %f" %
              (model_name, start_case, end_case, f1_s, acc_s))
        reporter.generate_report(
            model_name + "[" + str(start_case) + "-" + str(end_case) + "]",
            f1_s, acc_s)
Beispiel #4
0
    def report(self):
        logging.info("Report")
        self.load_project()
        #self.load_data()
        self.create_dir()
        if self.user is None or self.user is False:
            self.user = __author__
        #data = self.show_project()
        if send_mail(self.user, self.project) is True:
            logging.info(
                "A report email has been sent to %s\nCheck your mailbox!" %
                self.user)
            self.update_task(self.task['name'], "report : mail")
        else:
            logging.info("Impossible to send mail to %s\nCheck your email!" %
                         self.user)
            self.update_task(self.task['name'], "report : mail", False)

        if generate_report(self.task, self.project, self.directory):
            #self.coll.update({"_id": self.task['_id']}, {"$push": {"action":"report: document", "status": True, "date": self.date, "msg": "Ok"}})
            self.update_task(self.task['name'], "report : doc")
            logging.info("Report sent and stored!")
            return sys.exit(0)
        else:
            #self.coll.update({"_id": self.task['_id']}, {"$push": {"action":"report: document", "status": False, "date": self.date, "msg": "Unable to create report document"}})
            self.update_task(self.task['name'], "report : doc", False)
            return sys.exit("Report failed")
Beispiel #5
0
def generate_saved_report(param):
    """
    Returns a file object whcih can be stored by the user
    """
    title=param.get('report',[''])[0]
    items=report.load_report(title)
    return ('file',(report.generate_report(title,items),title))
Beispiel #6
0
def core_choice():
    s = ( "exit",
          "projects",
          "procedures",
          "experiments",
          "reagents",
          "equipment",
          "specimens",
          "generate report")

    print "Edit or create new: "
    
    for index, item in enumerate(s):
        print "{}: {}".format(index, item)

    choice = None
    while choice not in range(len(s)):
        try:
            choice = raw_input("=>")
            if choice == "":
                continue
            else:
                choice = int(choice)
                
            if choice == 0:
                return None
            elif choice not in range(len(s)):
                raise ValueError("invalid choice")
        except Exception:
            print "Invalid choice, please try again."

    if s[choice] == "generate report":
        generate_report()
        return True
        
    item = make_choice(s[choice], "SELECT * FROM {}".format(s[choice]))
    
    print "\n{} INFO".format(s[choice]).upper()
    for key in item:
        if 'id' not in key:
            print "{} = {}".format(key, item[key])
    
    edit[s[choice]](item)
    
    return True
Beispiel #7
0
def core_choice():
    s = ("exit", "projects", "procedures", "experiments", "reagents",
         "equipment", "specimens", "generate report")

    print "Edit or create new: "

    for index, item in enumerate(s):
        print "{}: {}".format(index, item)

    choice = None
    while choice not in range(len(s)):
        try:
            choice = raw_input("=>")
            if choice == "":
                continue
            else:
                choice = int(choice)

            if choice == 0:
                return None
            elif choice not in range(len(s)):
                raise ValueError("invalid choice")
        except Exception:
            print "Invalid choice, please try again."

    if s[choice] == "generate report":
        generate_report()
        return True

    item = make_choice(s[choice], "SELECT * FROM {}".format(s[choice]))

    print "\n{} INFO".format(s[choice]).upper()
    for key in item:
        if 'id' not in key:
            print "{} = {}".format(key, item[key])

    edit[s[choice]](item)

    return True
def draw_report_section(width, state, description):
    rooms = state['rooms']
    img = Image.new('RGBA', (width, one_meter * 7), 'white')
    imagedraw = ImageDraw.Draw(img)
    font = ImageFont.truetype('Roboto-Bold.ttf', size=int(0.8 * one_meter))
    for i, s in enumerate(
            report.generate_report(state, description).split('\n')):
        draw_enriched_text(state,
                           img,
                           (one_meter * 3, int(one_meter * (2 + 2.4 * i))),
                           s,
                           font=font)
    return img
Beispiel #9
0
   def on_generate_report(self, request):
       error = None
       url = ''
       hyp_config = {'user': '******', 'password': '******', 'vcenter': 'vcenter'} 
       report = Report(hyp_config)
       try:
           server = report.connect()
       except Error:
           return Response("error", status=500)
 
       report.report_to_file(report.generate_report(server))
       report.disconnect()
 
       return Response("ok", status=200)
Beispiel #10
0
def test_generate_report(tmpdir, monkeypatch):
    output_dir = tmpdir.mkdir("output")
    monkeypatch.setattr("cluster_discovery.tokens.get", lambda x: "mytok")
    monkeypatch.setattr(
        "cluster_discovery.ClusterRegistryDiscoverer.get_clusters",
        lambda x:
        [Cluster("test-cluster-1", "https://test-cluster-1.example.org")],
    )

    responses = {
        "/api/v1/nodes": {
            "items": [{
                "metadata": {
                    "name": "node-1",
                    "labels": {}
                },
                "status": {
                    "capacity": {
                        "cpu": "1",
                        "memory": "1Gi"
                    },
                    "allocatable": {
                        "cpu": "1",
                        "memory": "1Gi"
                    },
                },
            }]
        },
        "/api/v1/pods": {
            "items": []
        },
        "/apis/extensions/v1beta1/ingresses": {
            "items": []
        },
    }

    monkeypatch.setattr(
        "report.request",
        lambda cluster, path: MagicMock(json=lambda: responses.get(path)),
    )
    cluster_summaries = generate_report("https://cluster-registry", None,
                                        False, str(output_dir),
                                        set(['kube-system']), None, None)
    assert len(cluster_summaries) == 1
Beispiel #11
0
def generate_report(param):
    """
    Either generates a report or saves the report.
    Get's hold of all the variables and passes them along to the generate_report function
    """
    number=int(param.get('number',[''])[0])
    items=[]
    title=param.get('title',[''])[0]
    for i in range(1,number+1):
        stat_type=param.get('type'+str(i),[''])[0]
        if stat_type=='count':
            chart_type=param.get('counttype'+str(i),[''])[0]
            start=param.get('countstart'+str(i)+'_year',[''])[0]+'-'+param.get('countstart'+str(i)+'_month',[''])[0]+'-'+param.get('countstart'+str(i)+'_day',[''])[0]
            end=param.get('countend'+str(i)+'_year',[''])[0]+'-'+param.get('countend'+str(i)+'_month',[''])[0]+'-'+param.get('countend'+str(i)+'_day',[''])[0]

            group=param.get('countgroup'+str(i),[''])
            cutoff=param.get('countcutoff'+str(i),[''])
            calc=param.get('countcalc'+str(i),[''])
            items.append({'type':'count','start':start,'end':end,'cutoff':cutoff,'calculation':calc,'group':group,'chart_type':chart_type})
        elif stat_type=='scatter':
            variables=param.get('scattervariables'+str(i),[''])
            calc=param.get('scattercalc'+str(i),[''])
            items.append({'type':'scatter','variables':variables,'calculation':calc})
        elif stat_type=='compare':
            group=param.get('comparegroup'+str(i),[''])
            variable=param.get('comparevariables'+str(i),[''])[0]
            calc=param.get('comparecalc'+str(i),[''])
            calcvariable=param.get('comparecalcvariable'+str(i),[''])[0]
            cutoff=param.get('comparecutoff'+str(i),[''])
            items.append({'type':'compare','variable':variable,'group':group,'calcvariable':calcvariable,'calculation':calc,'cutoff':cutoff})
    


    action=param.get('action',[''])[0]
    if action=='save':
        report.save_report(title,items)
        return pdf(param)
    elif action=='generate':
        content=report.generate_report(title,items)
        return ('file',(content,title))
Beispiel #12
0
	def report(self):
		logging.info("Report")
		self.load_project()
		#self.load_data()
		self.create_dir()
		if self.user is None or self.user is False:
			self.user = __author__
		#data = self.show_project()
		if send_mail(self.user, self.project) is True:
			logging.info("A report email has been sent to %s\nCheck your mailbox!" %self.user)
			try:
				self.update_status(self.task['name'], "report : mail")
			except pymongo.errors.OperationFailure:
				pass
		else:
			logging.info("Impossible to send mail to %s\nCheck your email!" %self.user)
			try:
				self.update_status(self.task['name'], "report : mail", False)
			except pymongo.errors.OperationFailure:
				pass
			
		if generate_report(self.task, self.project, self.directory):
			#self.coll.update({"_id": self.task['_id']}, {"$push": {"action":"report: document", "status": True, "date": self.date, "msg": "Ok"}})
			try:
				self.update_status(self.task['name'], "report : doc")
			except pymongo.errors.OperationFailure:
				pass
			logging.info("Report sent and stored!")
			return sys.exit(0)
		else:
			#self.coll.update({"_id": self.task['_id']}, {"$push": {"action":"report: document", "status": False, "date": self.date, "msg": "Unable to create report document"}})
			try:
				self.update_status(self.task['name'], "report : doc", False)
			except pymongo.errors.OperationFailure:
				pass
			return sys.exit("Report failed")
                    "--mail",
                    help="Activates smpt mail service and sends"
                    " mail with a brief violation summary",
                    action="store_true")

args = parser.parse_args()

# Check for arg
if args.test:
    data = "violation:stopplikt,location:åby allé,timestamp:2020-04-09 11.55,position:[443;121],occurred:1"
    data = randomizedresponse.random_response(data, 0.5)
    print("TEST")
    print("Raw data: " + data)
    if args.mail:
        report.generate_mail(data)
        report.generate_report(data)
    else:
        report.generate_report(data)
elif args.live:
    data = input("Input violation data: ")
    data = randomizedresponse.random_response(data, 0.5)
    if args.mail:
        report.generate_mail(data)
        report.generate_report(data)
    else:
        report.generate_report(data)


def test(coin_prob):
    data = "violation:fdsjmundgnhhg,location:åby allé,timestamp:2020-04-09 11.55,position:[443;121],occurred:1"
    data = randomizedresponse.random_response(data, coin_prob)
Beispiel #14
0
        type=int,
        default=2)
    parser.parse_args()
    return parser.parse_args()


def run_commits_generator(github_token, commit_number):
    spammer = Popen(SPAMMER_FILE + " " + github_token + " " + USERNAME + " " +
                    REPOSITORY_NAME + " " + commit_number,
                    shell=True,
                    stdin=PIPE,
                    stdout=DEVNULL,
                    stderr=STDOUT)
    print("INFO: Commits generator started pushing new changes to github")
    return spammer


if __name__ == '__main__':
    args = get_arguments()
    github_token = args.token
    commit_number = args.commit_number
    time_to_wait_for_changes = commit_number * TIME_DELAY + 5

    spammer = run_commits_generator(github_token, str(commit_number))
    github_session = create_github_session(github_token)
    monitor_changes(github_token, github_session, time_to_wait_for_changes,
                    USERNAME, REPOSITORY_NAME, TESTS_TO_LAUNCH, TIME_DELAY)

    spammer.terminate()
    generate_report()
    def scan_main(self):
        pythoncom.CoInitialize()
        scan_starttime = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
        session_id = base64.b64encode(scan_starttime.encode("ascii")).decode("ascii")
        self.update_status(langdata['OP_START'])

        ## ExploitDB ########################################################
        exploitdb_success = False

        self.update_status(langdata['EXPLOITDB_DOWNLOAD_INTRO'])
        if cp.get('exploitdb', 'use_exploitdb') == 'True':
            ExploitDB.download_vulndata(langdata)
            self.update_status(langdata['EXPLOITDB_EXTRACT_GUI'])
            extracted = ExploitDB.extract_windows_exploit(langdata)
            self.update_status(langdata['EXPLOITDB_PARSE'])
            exploitdb_vulndata = ExploitDB.parse_vulndata(extracted)
            if exploitdb_vulndata is not None:
                exploitdb_success = True
        if cp.get('general', 'do_not_save_vulndata') == 'True':
            os.remove(cp.get('exploitdb', 'vulndata_filename'))
        #####################################################################

        ## JVN ##############################################################
        jvn_success = False

        self.update_status(langdata['JVN_DOWNLOAD_INTRO'])
        if cp.get('jvn', 'use_jvn') == 'True':
            self.jvn_download_vulndata(langdata)
            jvn_vulndata = product_dict
            if jvn_vulndata is not None:
                jvn_success = True
        #####################################################################

        ## NVD ##############################################################
        nvd_success = False

        self.update_status(langdata['NVD_DOWNLOAD_INTRO'])
        if cp.get('nvd', 'use_nvd') == 'True':
            nvd_vulndata = NVD.download_vulndata(langdata)
            if nvd_vulndata is not None:
                nvd_success = True
        #####################################################################

        ## Get locally installed applications ###############################
        installed = {}

        self.update_status(langdata['WMI_APP_RET'])
        installed.update(local_app.getapp_from_wmi())
        self.update_status(langdata['REG_APP_RET'])
        installed.update(local_app.getapp_from_hklm())
        installed.update(local_app.getapp_from_hklmwow64())
        installed.update(local_app.getapp_from_hkcu())
        #####################################################################

        ## Scan #############################################################
        result = {}
        count = 0

        self.update_status(langdata['SCAN_MSG_ONE'])
        scanret_exploitdb = ExploitDB.scan(langdata, exploitdb_vulndata, installed)
        scanret_jvn = JVN.scan(langdata, jvn_vulndata, installed)
        scanret_nvd = NVD.scan(langdata, nvd_vulndata, installed)
        self.update_status(langdata['SCAN_MSG_TWO'])
        scanret_winupdate = WindowsUpdate.scan(langdata)

        result.update(scanret_exploitdb[0])
        result.update(scanret_jvn[0])
        result.update(scanret_winupdate[0])
    
        count = scanret_exploitdb[1] + scanret_jvn[1] + scanret_winupdate[1]
        #####################################################################

        scan_endtime = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")

        ## Report ###########################################################
        self.update_status(langdata['GEN_REPORT'])
        report.generate_report( langdata, 
                                'v1.4-ALPHA-GUI', 
                                session_id, 
                                count, 
                                scan_starttime, 
                                scan_endtime, 
                                exploitdb_success, 
                                jvn_success, 
                                nvd_success, 
                                result)
        #####################################################################

        self.update_status(langdata['SCAN_END'])
Beispiel #16
0
# -*- coding: utf-8 -*-
from time import time

from __init__ import to_minimize, neurons, dimensions, J_grad_wrap
from complex_box import complex_box
from first_order_methods.simulated_annealing import simulated_annealing
from report import generate_report


t_start = time()
l = complex_box(to_minimize, neurons * (2 + dimensions), 
                [-250.0, 1.0, -2.0, -2.0] * neurons, 
                [250.0, 10.0, 6.0, 6.0] * neurons)
l = simulated_annealing(l, J_grad_wrap, to_minimize)
t_end = time()


generate_report(l, t_start, t_end)

print u"Готово"
Beispiel #17
0
def main():
    if os.name == "nt":
        # Fix for UTF-8 on Windows
        if sys.stdout.encoding != "cp65001":
            print(
                "Changing console codepage to UTF-8 to allow for UTF-8 filenames."
            )
            os.system("chcp 65001")
            sys.exit(
                "Please restart the tool for the codepage change to take effect."
            )
    timestamp = datetime.now().strftime('%Y-%m-%d--%H-%M-%S')
    start_time = datetime.now()
    # Input argument parsing
    parser = argparse.ArgumentParser()
    parser.add_argument('--gdrive',
                        action="store_true",
                        dest="drive",
                        help='Analyzing Google Drive')
    parser.add_argument('--dropbox',
                        action="store_true",
                        dest="dropbox",
                        help='Analyzing Dropbox')
    parser.add_argument('--positive',
                        action="store_true",
                        dest="positive",
                        help='Enable positive hashing')
    parser.add_argument('--negative',
                        action="store_true",
                        dest="negative",
                        help='Enable negative hashing')
    parser.add_argument('--md5file',
                        default="None",
                        help='The user-given txt file of md5 hashes')
    parser.add_argument('--sha256file',
                        default="None",
                        help='The user-given txt file of sha256 hashes')
    parser.add_argument('--sha1file',
                        default="None",
                        help='The user-given txt file of sha1 hashes')
    args = parser.parse_args()
    input_parameter_test = error_check(args)
    log_file = create_log_file(timestamp)
    log_and_print(log_file, "\n#######################################")
    log_and_print(log_file, "############## LetItRain ##############")
    log_and_print(log_file, "#######################################\n")
    log_and_print(log_file, "Time started: " + timestamp + "\n")
    if args.dropbox:
        log_and_print(log_file, "Running Dropbox tool...\n")
        folder_name, file_list, deleted_file_list = dbox.dbox(
            timestamp, log_file)
    else:
        log_and_print(log_file, "Running Google Drive tool...\n")
        folder_name, file_list, deleted_file_list = gdrive.google_drive(
            timestamp, log_file)
    if args.positive:
        log_and_print(log_file, "Performing positive hashing...")
        results = hash_checker.hash_checker(folder_name, args, log_file)
    elif args.negative:
        log_and_print(log_file, "Performing negative hashing...")
        results = hash_checker.hash_checker(folder_name, args, log_file)
    else:
        results = []
    end_time = datetime.now()
    run_time = str(end_time - start_time)
    log_and_print(log_file, "Total run time: " + run_time)
    log_and_print(log_file, "Generating report... ", False)
    report.generate_report(results, folder_name, args, timestamp, run_time,
                           file_list, deleted_file_list)
    log_and_print(log_file, "Done!")
    log_and_print(log_file, "Exiting...")
    log_file.close()
Beispiel #18
0
def main(session_id, cp, langdata):
    DETEXPLOIT_VERSION = 'v1.4-ALPHA-CLI'
    scan_starttime = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
    if session_id == None:
        session_id = base64.b64encode(
            scan_starttime.encode("ascii")).decode("ascii")
    utils.config_test(cp)

    ExploitDB_RetData = ExploitDB.proc_data(cp, langdata)
    ExploitDB_VulnData = ExploitDB_RetData[0]
    ExploitDB_Success = ExploitDB_RetData[1]

    JVN_RetData = JVN.proc_data(cp, langdata)
    JVN_VulnData = JVN_RetData[0]
    JVN_Success = JVN_RetData[1]

    NVD_RetData = NVD.proc_data(cp, langdata)
    NVD_VulnData = NVD_RetData[0]
    NVD_Success = NVD_RetData[1]

    installed = local_app.get_all()

    ## Scan #############################################################
    result = {}
    count = 0

    scanret_exploitdb = ExploitDB.scan(langdata, ExploitDB_VulnData, installed)
    scanret_jvn = JVN.scan(langdata, JVN_VulnData, installed)
    scanret_nvd = NVD.scan(langdata, NVD_VulnData, installed)
    scanret_winupdate = WindowsUpdate.scan(langdata)

    result.update(scanret_exploitdb[0])
    result.update(scanret_jvn[0])
    result.update(scanret_nvd[0])
    result.update(scanret_winupdate[0])

    count = scanret_exploitdb[1] + scanret_jvn[1] + scanret_nvd[
        1] + scanret_winupdate[1]
    #####################################################################

    scan_endtime = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
    history = '''
    Session ID: {}
    Scan started at: {}
    Scan ended at: {}
    Found vulnerable application and available update: {}
    DetExploit Version: {}

    #####################################################################

    '''.format(session_id, scan_starttime, scan_endtime, count,
               DETEXPLOIT_VERSION)
    with open('../history.detexploit', 'w') as wh:
        wh.write(history)

    ## Report ###########################################################
    report.generate_report(cp, langdata, DETEXPLOIT_VERSION, session_id, count,
                           scan_starttime, scan_endtime, ExploitDB_Success,
                           JVN_Success, NVD_Success, result)

    cprint('===========================================================',
           'red')
    cprint(langdata['RESONE'] + str(count) + langdata['RESTWO'], 'red')
    cprint('===========================================================',
           'red')
                                                    y,
                                                    test_size=0.2,
                                                    random_state=0)

#Reshape to 28x28 matrix with 1 channel
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)

#Change range from 0-255 to 0 -1
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255

#Save Y_true to use in confusion matrix
Y_true = y_test
#One hot encoding
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)

#Test the model

score = loaded_model.evaluate(X_test, Y_test, verbose=1)
print("Testing Loss : ", score[0], " Accuracy : ", score[1] * 100, "%")

Y_predicted = loaded_model.predict(X_test)
Y_predicted = np.argmax(Y_predicted, axis=1)
confusion_metrics.cnf_mtrx(Y_true, Y_predicted)

report.generate_report(Y_true, Y_predicted)
            with samples as (
              select table_schema, 
                     table_name, 
                     query_to_xml(format('select * from %I.%I limit 1', table_schema, table_name), true, true, '') 
                     as sample_row
              from information_schema.tables 
              where table_schema = '{schema_name}'
            )
            select c.column_name as column, c.data_type,
                case when s.sample_row is document 
                    then (xpath('/row/'||column_name||'/text()', s.sample_row))[1]::text 
                else null end as sample_value
            from information_schema.columns c 
              join samples s on (s.table_schema, s.table_name) = (c.table_schema, c.table_name)
            where c.table_name = '{table_name}'
            order by ordinal_position
            """.format(**vars())

            curs.execute(q)

            table['columns'] = curs.fetchall()

        schema['tables'] = tables

    return schemas


tmpl_vars = {'dictionary': get_dictionary(), 'title': args.title}

report.generate_report(tmpl_vars, args)
Beispiel #21
0
def scan():
    pythoncom.CoInitialize()
    DETEXPLOIT_VERSION = 'v1.4-ALPHA-GUI'
    scan_starttime = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
    session_id = base64.b64encode(scan_starttime.encode("ascii")).decode("ascii")
    utils.config_test()

    progress_label.config(text=langdata['EXPLOITDB_DOWNLOAD_INTRO'])
    ExploitDB_RetData = ExploitDB.proc_data(langdata)
    ExploitDB_VulnData = ExploitDB_RetData[0]
    ExploitDB_Success = ExploitDB_RetData[1]

    progress_label.config(text=langdata['JVN_DOWNLOAD_INTRO'])
    JVN_RetData = proc_jvn()
    JVN_VulnData = JVN_RetData[0]
    JVN_Success = JVN_RetData[1]

    progress_label.config(text=langdata['NVD_DOWNLOAD_INTRO'])
    NVD_RetData = NVD.proc_data(langdata)
    NVD_VulnData = NVD_RetData[0]
    NVD_Success = NVD_RetData[1]

    progress_label.config(text=langdata['GET_LOCAL_APP'])
    installed = local_app.get_all()

    ## Scan #############################################################
    result = {}
    count = 0

    scanret_exploitdb = ExploitDB.scan(langdata, ExploitDB_VulnData, installed)
    scanret_jvn = JVN.scan(langdata, JVN_VulnData, installed)
    scanret_nvd = NVD.scan(langdata, NVD_VulnData, installed)
    scanret_winupdate = WindowsUpdate.scan(langdata)

    result.update(scanret_exploitdb[0])
    result.update(scanret_jvn[0])
    result.update(scanret_nvd[0])
    result.update(scanret_winupdate[0])
    
    count = scanret_exploitdb[1] + scanret_jvn[1] + scanret_nvd[1] + scanret_winupdate[1]
    #####################################################################
    scan_endtime = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
    history = '''
    Session ID: {}
    Scan started at: {}
    Scan ended at: {}
    Found vulnerable application and available update: {}
    DetExploit Version: {}

    #####################################################################

    '''.format(session_id, scan_starttime, scan_endtime, count, DETEXPLOIT_VERSION)
    with open('../history.detexploit', 'w') as wh:
        wh.write(history)

    ## Report ###########################################################
    report.generate_report( langdata, 
                            DETEXPLOIT_VERSION, 
                            session_id, 
                            count, 
                            scan_starttime, 
                            scan_endtime, 
                            ExploitDB_Success, 
                            JVN_Success, 
                            NVD_Success, 
                            result)
Beispiel #22
0
 elif cmd == 3:
     # 정보 조회 -> 출력
     searchUser = input('검색할 사용자명을 입력하세요=>')
     print(sungjuk[searchUser])
 elif cmd == 4:
     with open('sungjuk.dat','w',encoding='utf-8') as file:
         calculatedSungjuk = runSungjuk()
         for element in calculatedSungjuk:
             _name = element[0]
             file.write(_name+',')
             _nums = element[1] # [100, 90, 80, 270, 90.0]
             for _number in _nums:
                 file.write("{0},".format(_number))
             file.write('\n')
         print('++ 파일에 저장 하였습니다.')
 elif cmd == 5:
     with open('sungjuk.dat','r',encoding='utf-8') as file:
         for line in file:
             # sungjuk['USER1'] = [100, 90, 80]
             _values = line.split(',')
             sungjuk[_values[0]] = [int(_values[1]), int(_values[2]), int(_values[3]), int(_values[4]), float(_values[5])]
         print('++ 파일을 읽어 왔습니다.')
 elif cmd == 6:
     dir = os.path.dirname(os.path.realpath(__file__))
     infile = dir + '/sungjuk.dat'
     outfile = dir + '/report.csv'
     report.generate_report(input_file=infile, output_file=outfile)
 elif cmd == 9:
     quit()
 else:
     print()
Beispiel #23
0
        tables = curs.fetchall()

        for table in tables:
            table_name = table['table']

            q = """
            select column_name as column, data_type, t3.description
            from information_schema.columns t1
            join pg_class t2 on (t1.table_name = t2.relname)
            left outer join pg_description t3 on (t2.oid = t3.objoid and t3.objsubid = t1.ordinal_position)
            where table_schema = '{schema_name}'
              and table_name = '{table_name}'
            order by ordinal_position
            """.format(**vars())

            curs.execute(q)

            table['columns'] = curs.fetchall()

        schema['tables'] = tables

    return schemas

tmpl_vars = {
    'dictionary': get_dictionary(),
    'title': args.title
}

report.generate_report(tmpl_vars, args)
Beispiel #24
0
parser.add_argument('-t',
                    '--model_type',
                    default=DEFAULT_MODEL_TYPE,
                    choices=Model.MODEL_OPTIONS,
                    help='type of model')
parser.add_argument('-r',
                    '--report',
                    default=None,
                    choices=REPORTS.keys(),
                    help='type of report')
parser.add_argument('-v',
                    '--visualize',
                    action='store_true',
                    help='visualize the results')

args = parser.parse_args()

if args.report is None:
    start_time = time.time()
    data = read_data(args.dir, args.max_samples, args.min_sample)
    print('Read data in {:.2f} seconds.'.format(time.time() - start_time))

    model = Model(model_type=args.model_type)
    model.train(data.train)
    model.test(data.test, visualize=args.visualize)
    if args.visualize and args.model_type == Model.NAIVE_BAYES:
        visualizer = ModelVisualizer(model)
        visualizer.visualize_naive_bayes()
else:
    generate_report(args.dir, args.report)
Beispiel #25
0
def main():
    global session_id

    cprint(figlet_format('   DetExploit'), 'red', attrs=['bold'], end='')
    print('===========================================================')
    print(langdata['WELCOME_MESSAGE'])
    print('===========================================================')

    scan_starttime = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
    if session_id == None:
        session_id = base64.b64encode(
            scan_starttime.encode("ascii")).decode("ascii")
    utils.config_test()

    ## ExploitDB ########################################################
    exploitdb_success = False

    if cp.get('exploitdb', 'use_exploitdb') == 'True':
        ExploitDB.download_vulndata(langdata)
        extracted = ExploitDB.extract_windows_exploit(langdata)
        exploitdb_vulndata = ExploitDB.parse_vulndata(extracted)
        if exploitdb_vulndata is not None:
            exploitdb_success = True
    if cp.get('general', 'do_not_save_vulndata') == 'True':
        os.remove(cp.get('exploitdb', 'vulndata_filename'))
    #####################################################################

    ## JVN ##############################################################
    jvn_success = False

    if cp.get('jvn', 'use_jvn') == 'True':
        jvn_vulndata = JVN.download_vulndata(langdata)
        if jvn_vulndata is not None:
            jvn_success = True
    #####################################################################

    ## NVD ##############################################################
    nvd_success = False

    if cp.get('nvd', 'use_nvd') == 'True':
        nvd_vulndata = NVD.download_vulndata(langdata)
        if nvd_vulndata is not None:
            nvd_success = True
    #####################################################################

    ## Get locally installed applications ###############################
    installed = {}

    installed.update(local_app.getapp_from_wmi())
    installed.update(local_app.getapp_from_hklm())
    installed.update(local_app.getapp_from_hklmwow64())
    installed.update(local_app.getapp_from_hkcu())
    #####################################################################

    ## Scan #############################################################
    result = {}
    count = 0

    scanret_exploitdb = ExploitDB.scan(langdata, exploitdb_vulndata, installed)
    scanret_jvn = JVN.scan(langdata, jvn_vulndata, installed)
    scanret_nvd = NVD.scan(langdata, nvd_vulndata, installed)
    scanret_winupdate = WindowsUpdate.scan(langdata)

    result.update(scanret_exploitdb[0])
    result.update(scanret_jvn[0])
    result.update(scanret_nvd[0])
    result.update(scanret_winupdate[0])

    count = scanret_exploitdb[1] + scanret_jvn[1] + scanret_nvd[
        1] + scanret_winupdate[1]
    #####################################################################

    scan_endtime = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
    history = '''
    Session ID: {}
    Scan started at: {}
    Scan ended at: {}
    Found vulnerable application and available update: {}
    DetExploit Version: {}
    #####################################################################
    '''.format(session_id, scan_starttime, scan_endtime, count,
               DETEXPLOIT_VERSION)
    with open('../history.detexploit', 'w') as wh:
        wh.write(history)

    ## Report ###########################################################
    report.generate_report(langdata, DETEXPLOIT_VERSION, session_id, count,
                           scan_starttime, scan_endtime, exploitdb_success,
                           jvn_success, nvd_success, result)

    cprint('===========================================================',
           'red')
    cprint(langdata['RESONE'] + str(count) + langdata['RESTWO'], 'red')
    cprint('===========================================================',
           'red')
Beispiel #26
0
from webscraping import process_url
from report import generate_report
import json

if __name__ == '__main__':
    #tree = process_url('freeCodeCamp/freeCodeCamp')
    #tree = process_url('vivadecora/desafio-backend-trabalhe-conosco')
    #tree = process_url('ohmyzsh/ohmyzsh')
    tree = process_url('Edlaine-Pontes/Forkids')
    # print(json.dumps(tree, indent = 2))
    generate_report('Edlaine-Pontes/Forkids', tree)
Beispiel #27
0
def do_PSI(log_time_file):
    
    st_pnt = 1
    end_pnt = 156
    if not check_pnts(st_pnt, end_pnt):
        return

    ##################################

    list_mte_end_time = []      #время окончания считывания данных со счетчика МТЕ
    list_mte_delta_time = []    #интервал считывания данных со счетчика МТЕ
    list_blob_time = []         #время последней записи в BLOB-ячейку со статистикой

    ##################################

    cur_pnt = st_pnt
    # Инициализация для получения данных с Бинома
    init_main_Binom()
    # Инициализация для управления МТЕ
    counter_MTE, generator_MTE, parameters_MTE = init_main_MTE()
    try:

        ##################################
        ##################################
        ##################################
        make_psi.binom_data.open_svg_channel()
        ##################################
        ##################################
        ##################################

        while cur_pnt <= end_pnt:

            #   Исключаем из новой методики несколько точек ПСИ
            #   Точки 125 и 126: номинал тока равен 0,1 мА, счетчик МТЕ измеряет ток в диавпазоне от 1 мА до 12 А.
            #   Точки 140 и 142: основная частота равна 42,5 Гц, генератор МТЕ воспроизводит частоты от 45 до 70 Гц.

            if (cur_pnt == 140) or (cur_pnt == 142) or (cur_pnt == 125) or (cur_pnt == 126):
                cur_pnt += 1
                list_mte_delta_time.append(0.0)          #интервал считывания данных со счетчика МТЕ
                list_mte_end_time.append(0.0)       #время окончания считывания данных со счетчика МТЕ
                list_blob_time.append(0.0)
                continue

            print("\r\n"+"current PSI point is "+str(cur_pnt)+"\r\n")
            #1.3 - Set PSI point on Generator
            sig = measurement.measurement_storage.get_etalon_signal(cur_pnt)
            parameters_MTE.init_data(sig)
            set_PSI_pnt_flag_Generator = generator_MTE.set_PSI_point(sig,parameters_MTE, log_time_file)
            if set_PSI_pnt_flag_Generator == True:      # проверка установки точки ПСИ по генератору пройдена  
                counter_MTE.ser_port.timeout = 1
                #counter_MTE.set_ranges_for_CNT(parameters_MTE.get_ranges_CNT())     # Установка диапазонов для Счетчика
                # 1.3.2 - Check by Counter
                #set_PSI_pnt_flag_Counter = counter_MTE.check_PSI_pnt(sig, parameters_MTE.get_exist_harms_flag(), log_time_file)
                set_PSI_pnt_flag_Counter = counter_MTE.check_PSI_pnt(sig, parameters_MTE, log_time_file)
                if set_PSI_pnt_flag_Counter == True:      # проверка установки точки ПСИ по генератору пройдена  
                    #4 Read data from MTE Counter

                    # опыт с непрерывным измерением за 5, 10, 15 сек. на счетчике МТЕ
                    #1 установить нужное время измерения на счетчике
                    #2 открыть канал svg
                    #3 найти начало 5-ти секундного интервала
                    #4 сон на 5 секунд
                    #5 считать и сохранить данные со счетчика мте
                    #6 считать данные с Бинома

                    test_read_by_one_meas = False
                    #test_read_by_one_meas = True

                    if test_read_by_one_meas == True:

                        meas_time_CNT = 5 # [sec.]
                        #meas_time_CNT = 10 # [sec.]
                        #meas_time_CNT = 20 # [sec.]

                        #1
                        counter_MTE.set_meas_time(meas_time_CNT)

                        #2
                        #print(str(dt.datetime.now())+"  before open svg channel")
                        make_psi.binom_data.open_svg_channel()
                        print(str(dt.datetime.now())+"  after  open svg channel")

                        #3
                        py_second = dt.datetime.now().second
                        t_time = dt.datetime.now()
                        while py_second % 5 != 0:
                            t_time = dt.datetime.now()
                            py_second = t_time.second

                        before_st_mte_meas = dt.datetime.now()
                        print(str(before_st_mte_meas)+"       choose 5 sec start")

                        #4
                        time.sleep(meas_time_CNT + 0.4)

                        cur_mte_time = dt.datetime.now()
                        delta_mte_time = cur_mte_time - t_time
                        print(str(cur_mte_time.time())+         "         cur_mte_time")
                        print(str(delta_mte_time)+       "         delta_time")

                        #5
                        list_ampl_full = []
                        list_angle_full = []
                        freq_Cnt = 0.0
                        freq_Cnt, list_ampl_full, list_angle_full  = counter_MTE.get_meas_from_counter()
                        '''
                        print("Counter MTE measurements")
                        for a_elem in zip(list_ampl_full, list_angle_full):
                            print(str(a_elem[0])+" ")
                            print(str(a_elem[1])+" ")
                        print(str(freq_Cnt))
                        '''
                        flag = 2
                        measurement.measurement_storage.set_mte_measured_signal(flag,cur_pnt,freq_Cnt,list_ampl_full,list_angle_full)
                        
                        #6
                        t_now = dt.datetime.now()
                        print(str(t_now)+     "         t_now before read Binom data")
                        make_psi.binom_data.read_data(cur_pnt)
                        cur_blob_time = make_psi.binom_data.get_blob_time()
                        t_now = dt.datetime.now()
                        print(str(t_now)+     "         t_now after  read Binom data")

                    else:
                        ####
                        counter_MTE.ser_port.timeout = 0.2
                        counter_MTE.start_auto_measure()    # Включить режим автовыдачи результатов   

                        meas_time_CNT = 5 # [sec.]
                        #meas_time_CNT = 10 # [sec.]
                        #meas_time_CNT = 20 # [sec.]

                        ###################     Установить время измерения генератора равным 5 секундам
                        meas_time_GEN = 5 # [sec.]
                        generator_MTE.ser_port.timeout = 0.2
                        generator_MTE.set_meas_time(meas_time_GEN)

                        #print(str(dt.datetime.now())+"  before open svg channel")
                        #make_psi.binom_data.open_svg_channel()
                        #print(str(dt.datetime.now())+"  after  open svg channel")

                        ################
                        ################
                        ################

                        # заглушка для проверки гипотезы, о том, 
                        # что сигнал на выходе генератора МТЕ не окончательно установился за время всех проверок
                        time.sleep(3.0)

                        ################
                        ################
                        ################
                        # ищем начало 5-ти секундного интервала
                        
                        py_second = dt.datetime.now().second
                        t_time = dt.datetime.now()

                        # Вместо принудительной паузы в 2 сек, 
                        # проверяем осталось ли хотя бы 2 секунды до старта ближайшего 5-ти секундного интервала


                        while py_second % 5 != 0:
                            t_time = dt.datetime.now()
                            py_second = t_time.second

                        before_st_mte_meas = dt.datetime.now()
                        print(str(before_st_mte_meas)+"       choose 5 sec start")

                        #make_psi.binom_data.open_svg_channel()
                        #print(str(dt.datetime.now())+"  after  open svg channel")

                        ####
                        #counter_MTE.ser_port.timeout = 0.1
                        #counter_MTE.start_auto_measure()    # Включить режим автовыдачи результатов     
                        ##################
                        #make_psi.binom_data.open_svg_channel()
                        ##################
                        # считать данные со счетчика за время meas_time_CNT, с интервалом между новыми данными 1.0 сек.
                        counter_MTE.readByTimeT(meas_time_CNT,1.0)

                        cur_mte_time = dt.datetime.now()
                        delta_mte_time = cur_mte_time - t_time
                        #print(str(cur_mte_time.time())+         "         cur_mte_time")
                        #print(str(delta_mte_time)+       "         delta_time")

                        # выключить режим автовыдачи результатов после окончания интеравала записи Т
                        counter_MTE.ser_port.timeout = 0.2
                        counter_MTE.stop_auto_measure()
                        # считать результаты измерений с Бинома

                        t_now = dt.datetime.now()
                        #print(str(t_now)+     "         t_now before read Binom data")

                        make_psi.binom_data.read_data(cur_pnt)
                        cur_blob_time = make_psi.binom_data.get_blob_time()

                        t_now = dt.datetime.now()
                        #print(str(t_now)+     "         t_now after  read Binom data")

                        ###################
                        # получить усредненные данные 'короткой посылки' от счетчика МТЕ
                        list_ampl_full = []
                        list_angle_full = []
                        freq_Cnt = 0.0
                        freq_Cnt, list_ampl_full, list_angle_full  = counter_MTE.get_mean_values()
                        '''
                        print("Counter MTE measurements")
                        for a_elem in zip(list_ampl_full, list_angle_full):
                            print(str(a_elem[0])+" ")
                            print(str(a_elem[1])+" ")
                        print(str(freq_Cnt))
                        '''
                        # по непонятным причинам, передача измерений через zip не работает, поэтому передаю по списочно
                        flag = 2
                        measurement.measurement_storage.set_mte_measured_signal(flag,cur_pnt,freq_Cnt,list_ampl_full,list_angle_full)
                        
                    # считать измерения с генератора МТЕ
                    list_Gen_ampl_full = []
                    list_Gen_angle_full = []
                    freq_Gen = 0.0
                    freq_Gen, list_Gen_ampl_full, list_Gen_angle_full = generator_MTE.get_meas_from_generator()
                    flag = 1
                    '''
                    print("Generator MTE measurements")
                    for a_elem in zip(list_Gen_ampl_full, list_Gen_angle_full):
                        print(str(a_elem[0])+" ")
                        print(str(a_elem[1])+" ")
                    print(str(freq_Gen))
                    #'''
                    measurement.measurement_storage.set_mte_measured_signal(flag,cur_pnt,freq_Gen,list_Gen_ampl_full,list_Gen_angle_full)
                    
                    
                    #cur_mte_time       str(datetime.now().time()) == '14:57:06.416287'
                    #cur_blob_time

                    list_mte_delta_time.append(delta_mte_time)          #интервал считывания данных со счетчика МТЕ
                    list_mte_end_time.append(cur_mte_time.time())       #время окончания считывания данных со счетчика МТЕ
                    list_blob_time.append(cur_blob_time.time())


            cur_pnt += 1

        
    #'''
    except Exception as ex:
        print("Exception occur:", ex)
    finally:

        #формирование отчета о ПСИ

        make_psi.binom_data.close_svg_channel()

        list_of_times = [list_mte_delta_time, list_mte_end_time, list_blob_time]
        '''
        for idx in range(len(list_mte_delta_time)):
            print("mte_delta  "+str(list_of_times[0][idx]) +\
                    "mte_end    "+str(list_of_times[1][idx]) + \
                    "blob_time  "+str(list_of_times[2][idx]) )
        '''

        #report.generate_report(st_pnt, end_pnt, list_of_times) #cur_pnt
        report.generate_report(st_pnt, cur_pnt - 1, list_of_times) 

        make_psi.deinit()
        counter_MTE.ser_port.close()
        generator_MTE.ser_port.close()
    #'''

    print("Ask finished")
Beispiel #28
0
def get_report():
    return generate_report()
Beispiel #29
0
                range_count['r4'] * 100 / total_dbid)
        if range_count['r5']:
            range_percentile['r5p'] = '%0.3f' % float(
                range_count['r5'] * 100 / total_dbid)
        if range_count['r6']:
            range_percentile['r6p'] = '%0.3f' % float(
                range_count['r6'] * 100 / total_dbid)
        if range_count['r7']:
            range_percentile['r7p'] = '%0.3f' % float(
                range_count['r7'] * 100 / total_dbid)
        if range_count['r8']:
            range_percentile['r8p'] = '%0.3f' % float(
                range_count['r8'] * 100 / total_dbid)
        if range_count['r9']:
            range_percentile['r9p'] = '%0.3f' % float(
                range_count['r9'] * 100 / total_dbid)
        if range_count['r10']:
            range_percentile['r10p'] = '%0.3f' % float(
                range_count['r10'] * 100 / total_dbid)
        analyse_results.append(
            Model(table, amount, range_count, range_percentile, sql,
                  total_dbid))
        print datetime.datetime.now(), ' End'

    return analyse_results


if __name__ == '__main__':
    generate_report(analyse_db())
    db.close()
Beispiel #30
0
 def post(self):
     students_score = safe_json_decode(self.get_argument('students_score'))
     result = report.generate_report(self, students_score)
     self.write(json.dumps(result))
Beispiel #31
0
def main():
    # Define the description of the programa
    file1 = open("src/description.txt", "r")
    description_text = '\n'.join(file1.readlines())
    file1.close()
    parser = argparse.ArgumentParser(description=description_text)

    # optional arguments for initialize the analysis
    parser.add_argument(
        '--ini_stat',
        action='store_true',
        help='Show the initial statistics of the Database (default: False)')
    parser.add_argument(
        '--create_unique_word',
        action='store_true',
        help='Rebuild the analysis by unique word (takes about 20 sec!)')
    parser.add_argument(
        '--query_ybirth',
        action='store_true',
        help=
        'Remake the complete query for year of born in wikipedia API and scraping Poetry foundation (takes about 5 min!)'
    )

    # optional arguments for the analysis
    parser.add_argument('--word',
                        metavar='W',
                        type=str,
                        nargs='+',
                        help='Show the statistics by decade of the word')
    parser.add_argument(
        '--year',
        metavar='Y',
        type=int,
        nargs='+',
        help='Show the statistics of the Year when it is possible')
    parser.add_argument('--general',
                        action='store_true',
                        help='If active a general report is generated')

    #optional arguments for the report
    parser.add_argument(
        '--mailto',
        type=str,
        nargs='+',
        help=
        'Send a report to the email address provided, this command expects a valid email as parameter.'
    )

    args = parser.parse_args()

    print(args)

    # Import data and create new variables
    path = 'input/PoetryFoundationData.csv'
    df = import_data_poetry(path, args.ini_stat, args.create_unique_word,
                            args.query_ybirth)

    tipo_var = []
    # Get the analysis by words
    if args.word:
        analysis_unique_word(df, args.word[0])
        tipo_var.append(["word", args.word[0]])

    if args.year:
        analysis_year(df, args.year[0])
        tipo_var.append(["year", args.year[0]])

    if args.general:
        analysis_general(df)
        tipo_var.append(["general", ''])

    # Generate the report
    for e in tipo_var:
        generate_report(e)

    if args.mailto:
        generate_mail_report(args.mailto[0])
Beispiel #32
0
def scan():
    pythoncom.CoInitialize()
    DETEXPLOIT_VERSION = 'v1.4-ALPHA-GUI'
    scan_starttime = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
    session_id = base64.b64encode(
        scan_starttime.encode("ascii")).decode("ascii")
    utils.config_test(cp)  ### no cp  default

    ExploitDB_VulnData = {}
    ExploitDB_Success = {}
    if os.path.isfile('ExploitDB.pickle') and os.path.isfile(
            'ExploitDB_Success.pickle'):
        with open('ExploitDB.pickle', 'rb') as f:
            ExploitDB_VulnData = pickle.load(f)
        with open('ExploitDB_Success.pickle', 'rb') as f:
            ExploitDB_Success = pickle.load(f)
    else:
        progress_label.config(text=langdata['EXPLOITDB_DOWNLOAD_INTRO'])
        ExploitDB_RetData = ExploitDB.proc_data(cp, langdata)
        ExploitDB_VulnData = ExploitDB_RetData[0]
        ExploitDB_Success = ExploitDB_RetData[1]

        with open('ExploitDB.pickle', 'wb') as f:
            pickle.dump(ExploitDB_VulnData, f, pickle.HIGHEST_PROTOCOL)
        with open('ExploitDB_Success.pickle', 'wb') as f:
            pickle.dump(ExploitDB_Success, f, pickle.HIGHEST_PROTOCOL)

    JVN_VulnData = {}
    JVN_Success = {}
    if os.path.isfile('JVN.pickle') and os.path.isfile('JVN_Success.pickle'):
        with open('JVN.pickle', 'rb') as f:
            JVN_VulnData = pickle.load(f)
        with open('JVN_Success.pickle', 'rb') as f:
            JVN_Success = pickle.load(f)
    else:
        progress_label.config(text=langdata['JVN_DOWNLOAD_INTRO'])
        JVN_RetData = JVN.proc_data(cp, langdata)
        JVN_VulnData = JVN_RetData[0]
        JVN_Success = JVN_RetData[1]

        with open('JVN.pickle', 'wb') as f:
            pickle.dump(JVN_VulnData, f, pickle.HIGHEST_PROTOCOL)
        with open('JVN_Success.pickle', 'wb') as f:
            pickle.dump(JVN_Success, f, pickle.HIGHEST_PROTOCOL)

    NVD_VulnData = {}
    NVD_Success = {}
    if os.path.isfile('NVD.pickle') and os.path.isfile('NVD_Success.pickle'):
        with open('NVD.pickle', 'rb') as f:
            NVD_VulnData = pickle.load(f)
        with open('NVD_Success.pickle', 'rb') as f:
            NVD_Success = pickle.load(f)
    else:
        progress_label.config(text=langdata['NVD_DOWNLOAD_INTRO'])
        NVD_RetData = NVD.proc_data(cp, langdata)
        NVD_VulnData = NVD_RetData[0]
        NVD_Success = NVD_RetData[1]

        with open('NVD.pickle', 'wb') as f:
            pickle.dump(NVD_VulnData, f, pickle.HIGHEST_PROTOCOL)
        with open('NVD_Success.pickle', 'wb') as f:
            pickle.dump(NVD_Success, f, pickle.HIGHEST_PROTOCOL)

    progress_label.config(text=langdata['GET_LOCAL_APP'])
    installed = local_app.get_all()

    ## Scan #############################################################

    count = 0
    i = 0
    print("DICCIONARIOO EXPLLLLLOITDB_VULNDATA")
    #print(len(ExploitDB_VulnData))
    for key in ExploitDB_VulnData:
        i = i + 1
        if i > 10: break
        print("Clave: " + key + " Valor: " + ExploitDB_VulnData[key])

    i = 0
    print("DICCIONARIOO JVN_VulnData")
    #print(len(JVN_VulnData))
    for key in JVN_VulnData:
        i = i + 1
        if i > 10: break
        print("Clave: " + key + " Valor: " + JVN_VulnData[key])

    i = 0
    print("DICCIONARIOO  NVD_VulnData")
    #print(len(NVD_VulnData))
    for key in NVD_VulnData:
        i = i + 1
        if i > 10: break
        print("Clave: " + key + " Valor: " + NVD_VulnData[key])

    scanret_exploitdb = ExploitDB.scan(langdata, ExploitDB_VulnData, installed)
    scanret_jvn = JVN.scan(langdata, JVN_VulnData, installed)
    scanret_nvd = NVD.scan(langdata, NVD_VulnData, installed)
    scanret_winupdate = WindowsUpdate.scan(langdata)

    result = {}

    result.update(scanret_exploitdb[0])
    result.update(scanret_jvn[0])
    result.update(scanret_nvd[0])
    result.update(scanret_winupdate[0])

    count = scanret_exploitdb[1] + scanret_jvn[1] + scanret_nvd[
        1]  # + scanret_winupdate[1]
    #####################################################################
    scan_endtime = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
    history = '''
    Session ID: {}
    Scan started at: {}
    Scan ended at: {}
    Found vulnerable application and available update: {}
    DetExploit Version: {}

    #####################################################################

    '''.format(session_id, scan_starttime, scan_endtime, count,
               DETEXPLOIT_VERSION)
    with open('../history.detexploit', 'w') as wh:
        wh.write(history)
    resultdict = {}
    ## Report ###########################################################
    report.generate_report(cp, langdata, DETEXPLOIT_VERSION, session_id, count,
                           scan_starttime, scan_endtime, ExploitDB_Success,
                           JVN_Success, NVD_Success, result)