コード例 #1
0
def iter_thru_config(which, dicti):
    # "which" is either "Attack" or "Chaff"
    diction = utilities.parse_config(which)
    for i in diction:
        if not i in dicti:
            # dicti is either chaff or attack dictionary
            dicti[i] = diction[i]
コード例 #2
0
def main():
    s = time.time()
    cwd = os.getcwd()

    config = utilities.parse_config('NetworkIn')
    pcap_dir = config['pcapfolder']
    regex = config['regex']
    length = config['length']

    regex = regex.format(length)
    pcaphandler.split(pcap_dir)
    # Pcaps in put pcaps here should be moved or deleted
    # Will start with moved
    # if there is more than one file, move it to processed
    # only moves files
    for directory in os.listdir(pcap_dir):
        if not directory == 'processed' and os.path.isdir(pcap_dir+'\\'+directory):
            os.mkdir(pcap_dir+'\\processed\\'+directory)
            pcaphandler.get_sql_data(regex, directory, pcap_dir)
    os.chdir(cwd)
    if 'packets.db' not in os.listdir(cwd):
        conn = sqlite3.connect('packets.db')
        generate_db(conn)
    os.chdir(pcap_dir)
    for directory in os.listdir(os.getcwd()):
        if not directory == 'processed' and os.path.isdir(os.getcwd()+'\\'+directory):
            os.chdir(directory)
            add_packet(directory+'.csv', cwd)
            os.rename(pcap_dir+'\\'+directory+'\\'+directory+'.csv', pcap_dir +'\\processed\\'+directory+'\\'+directory+'.csv')
            os.chdir(pcap_dir)
            os.rmdir(directory)
    print_sql_database(cwd)
    e = time.time()
    print('Total time: ', e-s)
    return 'Analyzer is complete'
コード例 #3
0
ファイル: pipeline.py プロジェクト: dan-hook/phoenix_pipeline
def run(run_date,num_days):
    #TODO: update config file to contain details for elasticsearch
    server_details, file_details = utilities.parse_config('PHOX_config.ini')

    main(file_details, server_details, file_details.log_file,
         run_date=run_date,
         run_filter=file_details.oneaday_filter, version='v0.0.0')
コード例 #4
0
def run():
    server_details, file_details = utilities.parse_config('PHOX_config.ini')

    main(file_details,
         server_details,
         file_details.log_file,
         run_filter=file_details.oneaday_filter,
         version='v0.0.0')
コード例 #5
0
    def test_load_model(self):

        config = os.path.realpath('./configuration.ini')
        params = utils.parse_config(config)
        m = utils.load_model(params)

        #todo: make sure that this class is an instance of TestModel

        print 'done'
コード例 #6
0
    def test_load_model(self):

        config = os.path.realpath('./configuration.ini')
        params = utils.parse_config(config)
        m = utils.load_model(params)

        #todo: make sure that this class is an instance of TestModel

        print 'done'
コード例 #7
0
def run():
    #server_details, geo_details, file_details, petrarch_version, run_date = utilities.parse_config('PHOX_config.ini')
    cfg = utilities.parse_config('PHOX_config.ini')
    main(cfg['file_list'],
         cfg['geo_list'],
         cfg['server_list'],
         cfg['petrarch_version'],
         cfg['run_date'],
         cfg['file_list'].log_file,
         run_filter=cfg['file_list'].oneaday_filter,
         version='v0.0.0')
コード例 #8
0
ファイル: dad.py プロジェクト: dracoranger/ConstVig-public
def main():
    '''
    Summary of behavior: starts, restarts, and manages the child processes
    Arguments: None
    Return values:
    Side effects: child processes launched and monitored for success
    Exceptions raised:
    Restrictions on when it can be called: None
    '''
    config = utilities.parse_config('dad')
    round_length = int(config['round_length'])
    total_rounds = int(config['total_rounds'])
    time_between_check = int(config['time_between_check'])
    log_file = config['log_file']

    utilities.log_data(log_file, 'Began operations at '+str(time.time())+'\n')

    child = CHILD('ChildNO')

    for i in range(0, total_rounds):
        time_start = time.time()
        while time.time() - time_start < round_length:
            print(str(time.time()-time_start))
            if utilities.check_input(child.process.poll(), 1):
                if child.process.poll() == 0:
                    response = str(child.process.communicate())
                    utilities.log_data(log_file, '%s success: %s at %s\n' %
                                       (child.get_name(), response, str(time.time())))
                else:
                    response = str(child.process.communicate())
                    utilities.log_data(log_file, '%s failure: %s at %s\n' %
                                       (child.get_name(), response, str(time.time())))
                    print(child.get_name()+response)
            elif child.process.poll() is None:
                utilities.log_data(log_file, '%s Ongoing at %s\n' %
                                   (child.get_name(), str(time.time())))
            else:
                print('please look up what happens if subprocess.poll() does not return 1 or None')
            time.sleep(time_between_check)
        if child.process.poll() is None:
            print(child.get_name()+' on going: possibly too many files being launched')
        child.recreate_subprocess()
        print('round '+str(i)+' complete')
    utilities.log_data(log_file, 'Ended operations at %s\n' %
                       (str(time.time())))
    print('fully complete')
コード例 #9
0
def run():
    server_details, geo_details, file_details, petrarch_version = utilities.parse_config('PHOX_config.ini')
    main(file_details, geo_details, server_details, petrarch_version, file_details.log_file,
         run_filter=file_details.oneaday_filter, version='v0.0.0')
コード例 #10
0
ファイル: page.py プロジェクト: johnb30/atlas
    if website == 'almonitor':
        text = re.sub("^.*?\(photo by REUTERS.*?\)", "", text)
    if website in site_list:
        text = re.sub("^\(.*?MENAFN.*?\)", "", text)
    elif website == 'upi':
        text = text.replace("Since 1907, United Press International (UPI) has been a leading provider of critical information to media outlets, businesses, governments and researchers worldwide. UPI is a global operation with offices in Beirut, Hong Kong, London, Santiago, Seoul and Tokyo. Our headquarters is located in downtown Washington, DC, surrounded by major international policy-making governmental and non-governmental organizations. UPI licenses content directly to print outlets, online media and institutions of all types. In addition, UPI's distribution partners provide our content to thousands of businesses, policy groups and academic institutions worldwide. Our audience consists of millions of decision-makers who depend on UPI's insightful and analytical stories to make better business or policy decisions. In the year of our 107th anniversary, our company strives to continue being a leading and trusted source for news, analysis and insight for readers around the world.", '')

    text = text.replace('\n', ' ')

    return text


if __name__ == '__main__':
    time.sleep(60)

    aparse = argparse.ArgumentParser(prog='rss')
    aparse.add_argument('-rb', '--rabbit_conn', default='localhost')
    aparse.add_argument('-db', '--db_conn', default='127.0.0.1')
    args = aparse.parse_args()

    config_dict = utilities.parse_config()
    coll = utilities.make_coll(config_dict.get('auth_db'),
                               config_dict.get('auth_user'),
                               config_dict.get('auth_pass'),
                               args.db_conn)
    proxies = config_dict.get('proxy_list')
    proxy_pass = config_dict.get('proxy_pass')
    proxy_user = config_dict.get('proxy_user')

    main(args)
コード例 #11
0
# EXCEPTIONS:
# FUNCTIONS:
#     split
#     get_sql_data

import subprocess
import os
import re
import socket
import datetime

import dpkt

import utilities

DictionNI = utilities.parse_config('NetworkIn')
PATH_SPLIT = DictionNI['splitlocl']


def split(pcap_dir):
    os.chdir(pcap_dir)
    for fil in os.listdir(pcap_dir):
        if os.path.isfile(fil):
            # Splits up large pcaps into smaller pcaps inside folders
            # and deletes large pcaps
            sname = os.fsdecode(fil)
            fname = pcap_dir + '\\' + sname
            # This is the path to SplitCap; not just a string.
            inp = PATH_SPLIT + ' -r ' + fname + ' -s session'
            process = subprocess.Popen(inp, stdout=subprocess.PIPE)
            process.wait()
コード例 #12
0
ファイル: pipeline.py プロジェクト: thodrek/phoenix_pipeline
        logger.warning("Can't run with the options you've specified. Exiting.")
        sys.exit()

    if run_filter == 'True':
        logger.info("Running oneaday_formatter.py")
        print("Running oneaday_formatter.py")
        formatted_results = oneaday_filter.main(petr_results)
    else:
        logger.info("Running result_formatter.py")
        print("Running result_formatter.py")
        formatted_results = result_formatter.main(petr_results)

    logger.info("Running postprocess.py")
    print("Running postprocess.py")
    postprocess.main(formatted_results, date_string, file_details)

    logger.info("Running phox_uploader.py")
    print("Running phox_uploader.py")
    uploader.main(date_string, server_details, file_details)

    logger.info('PHOX.pipeline end')
    print('PHOX.pipeline end:', datetime.datetime.utcnow())


if __name__ == '__main__':
    # initialize the various utilities globals
    server_details, file_details = utilities.parse_config('PHOX_config.ini')

    main(file_details, server_details, file_details.log_file,
         run_filter=file_details.oneaday_filter)
コード例 #13
0
ファイル: pipeline.py プロジェクト: thodrek/phoenix_pipeline
    if run_filter == 'True':
        logger.info("Running oneaday_formatter.py")
        print("Running oneaday_formatter.py")
        formatted_results = oneaday_filter.main(petr_results)
    else:
        logger.info("Running result_formatter.py")
        print("Running result_formatter.py")
        formatted_results = result_formatter.main(petr_results)

    logger.info("Running postprocess.py")
    print("Running postprocess.py")
    postprocess.main(formatted_results, date_string, file_details)

    logger.info("Running phox_uploader.py")
    print("Running phox_uploader.py")
    uploader.main(date_string, server_details, file_details)

    logger.info('PHOX.pipeline end')
    print('PHOX.pipeline end:', datetime.datetime.utcnow())


if __name__ == '__main__':
    # initialize the various utilities globals
    server_details, file_details = utilities.parse_config('PHOX_config.ini')

    main(file_details,
         server_details,
         file_details.log_file,
         run_filter=file_details.oneaday_filter)
コード例 #14
0
 EXCEPTIONS:
 FUNCTIONS:
   main
   iter_thru_config
   run_processes
'''
import os
import time
import ipaddress
import random
import utilities
import ssResponse


# Constants & user set variables (naming exception made for variables)
dictionA = utilities.parse_config('Attacks')
dictionC = utilities.parse_config('Chaff')
dictionNO = utilities.parse_config('NetworkOut')
dictionLaunch = utilities.parse_config('dad')
PATH_ATTACK = dictionNO['path_attack']
PATH_CHAFF = dictionNO['path_chaff']
PORTS = dictionNO['ports'].split(',')
SUBMIT_FLAG_PORT = dictionNO['submit_flag_port']
SUBMIT_FLAG_IP = dictionNO['submit_flag_ip']
RANDOMIZED_AND_SPACED = int(dictionNO['randomized_and_spaced'])
CHAFF_PER_ATTACK = int(dictionNO['chaff_per_attack'])
ROUND_LENGTH = int(dictionLaunch['round_length'])
SAFETY_BUFFER = int(dictionNO['safety_buffer'])
SUBMIT_AUTOMATICALLY = int(dictionNO['submit_flags_automatically'])
if int(dictionNO['use_ip_range']):
    IP_RANGE = list(ipaddress.ip_network(dictionNO['ip_range']).hosts())
コード例 #15
0
    while True:
        logging.info('Starting a new scrape. {}'.format(
            datetime.datetime.now()))
        results = [
            pool.apply_async(scrape_func, (website, address, lang, args))
            for website, (address, lang) in scrape_dict.iteritems()
        ]
        timeout = [res.get(9999999) for res in results]
        logging.info('Finished a scrape. {}'.format(datetime.datetime.now()))
        time.sleep(1800)


if __name__ == '__main__':
    #Get the info from the config
    time.sleep(60)
    config_dict = utilities.parse_config()

    aparse = argparse.ArgumentParser(prog='rss')
    aparse.add_argument('-rb', '--rabbit_conn', default='localhost')
    aparse.add_argument('-rd', '--redis_conn', default='localhost')
    args = aparse.parse_args()

    logging.basicConfig(format='%(levelname)s %(asctime)s: %(message)s',
                        level=logging.INFO)

    logging.info('Running. Processing in 45 min intervals.')

    print('Running. See log file for further information.')

    #Convert from CSV of URLs to a dictionary
    try:
コード例 #16
0
    logger = None
    return logger


def usage_help():
    """
    """
    print("This script will perform a data conversion between CSV and JSON.\n")
    print("Usage: python DataConverter.py")
    sys.exit(-1)


if __name__ == "__main__":

    try:
        config_data = Utils.parse_config(CONFIG_FILE)
        logger = setup_log_system(config_data)
    except Exception as ex:
        assert False, "Error in obtaining config data!"
        sys.exit(-1)

    filename = config_data["CSV_DATA"]["FILENAME"]

    if filename:
        if (filename.endswith(".csv")):
            print("Valid CSV file: " + filename)
        else:
            print("Unsupported conversion of: " + filename)
            usage_help()
    else:
        usage_help()
コード例 #17
0
def test_geo_config():
    server_details, geo_details, file_details, petrarch_version = utilities.parse_config('PHOX_config.ini')
    geo_keys = geo_details._asdict().keys()
    assert geo_keys == ['geo_service', 'cliff_host', 'cliff_port', 'mordecai_host', 'mordecai_port']