コード例 #1
0
    def __init__(self):
        super(GameInspector, self).__init__(name='GameInspector')
        self.main_layout = BoxLayout(orientation='vertical')
        self.first_row = BoxLayout(orientation='horizontal')
        self.second_row = BoxLayout(orientation='horizontal')
        self.main_layout.add_widget(self.first_row)
        self.main_layout.add_widget(self.second_row)

        self.fps_plot = fps_plot.get_fps_plot()
        self.video = video_player.get_video_player()
        self.screenshot = screenshot.get_screenshot()
        self.config = options.get_config(self.fps_plot, self.video,
                                         self.screenshot)

        self.first_row.add_widget(self.config)
        self.first_row.add_widget(self.fps_plot)
        self.second_row.add_widget(self.video)
        self.second_row.add_widget(self.screenshot)

        self.add_widget(self.main_layout)
コード例 #2
0
#!/usr/bin/env python

import os
import glob
import shutil
from copy import deepcopy

from conf import bib_dir, template_dir, html_dir, static_dir, pdf_dir
from options import get_config, mkdir_p
from build_template import bib_from_tmpl, html_from_tmpl, from_template

config = get_config()
mkdir_p(bib_dir)
for file in glob.glob(os.path.join(static_dir, '*.css')):
    shutil.copy(file, html_dir)
html_pdfs = os.path.join(html_dir, 'pdfs')
mkdir_p(html_pdfs)
for file in glob.glob(os.path.join(pdf_dir, '*.pdf')):
    shutil.copy(file, html_pdfs)

citation_key = config['proceedings']['citation_key']  # e.g. proc-scipy-2010

bib_from_tmpl('proceedings', config, citation_key)

proc_dict = deepcopy(config)
proc_dict.update({
    'pdf': 'pdfs/proceedings.pdf',
    'bibtex': 'bib/' + citation_key
})

for dest_fn in ['index', 'organization', 'students']:
コード例 #3
0
ファイル: allPythonContent.py プロジェクト: Mondego/pyreco
__FILENAME__ = build_html
#!/usr/bin/env python

import os
import glob
import shutil

from conf import bib_dir, template_dir, html_dir, static_dir, pdf_dir
from options import get_config, mkdir_p
from build_template import bib_from_tmpl, html_from_tmpl, from_template

config = get_config()
mkdir_p(bib_dir)
for file in glob.glob(os.path.join(static_dir,'*.css')):
    shutil.copy(file, html_dir)
html_pdfs = os.path.join(html_dir, 'pdfs')
mkdir_p(html_pdfs)
for file in glob.glob(os.path.join(pdf_dir,'*.pdf')):
    shutil.copy(file, html_pdfs)

citation_key = config['proceedings']['citation_key'] # e.g. proc-scipy-2010

bib_from_tmpl('proceedings', config, citation_key)

proc_dict = dict(config.items() +
                {'pdf': 'pdfs/proceedings.pdf'}.items() +
                {'bibtex': 'bib/' + citation_key}.items())

for dest_fn in ['index', 'organization', 'students']:
    html_from_tmpl(dest_fn+'.html', proc_dict, dest_fn)
コード例 #4
0
ファイル: main.py プロジェクト: mazeyang/GANomaly
        shows the best performance in the manuscript
        and test takes every numbers. and set label-2 class as 1, other sest is to 0.
        resize the images into 32*32 size in order to fit the model input
    '''
    x_train = x_train[y_train != anomaly]
    x_train = batch_resize(x_train, (32, 32))
    x_test = batch_resize(x_test, (32, 32))
    y_test = 1 * (y_test == anomaly)
    '''add one new axis to fit the model input'''
    x_train = x_train[:, :, :, None]
    x_test = x_test[:, :, :, None]

    print('train shape:', x_train.shape)
    ''' 1. train model and evaluate on test data by AUC '''
    sess = tf.Session()
    opts = get_config(is_train=True)
    model = Ganormal(sess, opts)
    ''' 
    strat training
    '''
    auc_all = []
    for i in range(opts.iteration):
        loss_train_all = []
        loss_test_all = []
        real_losses = []
        fake_losses = []
        enco_losses = []
        ''' shuffle data in each epoch'''
        permutated_indexes = np.random.permutation(x_train.shape[0])
        ''' decay the learning rate. we dont do that in tensorflow way because it
        is more easier to fine-tuning'''
コード例 #5
0
def main():
    # set up logging
    logger = log_to_stderr(
        logging.DEBUG)  # special multiprocessing.log_to_stderr logger
    logger.handlers[
        0].formatter._fmt = '%(asctime)s [%(levelname)s/%(processName)s] %(message)s'
    logger.debug('Starting Event Enrichment 2.0')

    # raise SystemExit
    def create_shared_dicts(all_options, subparser_options):
        parser_file_names = set()

        # this thread blocks forever
        while True:
            for option in [all_options, subparser_options]:
                for vendor_product, vendor_product_arguments in option.items():
                    # vendor_product = [mcafee_web_gateway]
                    # vendor_product_arguments = everything underneath [mcafee_web_gateway]
                    for subparser_method, subparser_arguments in vendor_product_arguments.items(
                    ):
                        # subparser_method = calc_entropy
                        # subparser_arguments = request, cfp1, requestURL.entropy

                        # parse methods have to be named 'parse'
                        if subparser_method == 'parse':
                            for item in subparser_arguments:
                                # convert the comma separated values to a list, stripped out
                                # request, cfp1, requestURL.entropy  ->  ['request', 'cfp1', 'requestURL.entropy']
                                parser_file_names.add([
                                    arg.strip() for arg in item.split(',')
                                ][0])

                            # now we have a set of file names in parser_file_names.  Let's pull in the patterns.
                            for file in parser_file_names:
                                # TODO: fall back on last known good config
                                # http://stackoverflow.com/questions/5137497/find-current-directory-and-files-directory
                                # get current working directory
                                cwd = os.path.dirname(
                                    os.path.realpath(__file__))
                                with open('{0}/parsers/{1}'.format(cwd,
                                                                   file)) as f:
                                    patterns = [
                                        line.rstrip('\r\n') for line in f
                                    ]
                                    try:
                                        # TODO: may want to make a formal loop so I can identify parser that is incorrect
                                        compiled_regex_parsers[file] = [
                                            re.compile(p, re.IGNORECASE)
                                            for p in patterns
                                        ]
                                    except:
                                        logger.debug(
                                            '{} contains an invalid regex parser'
                                            .format(file))
                            logger.debug('regex parsers updated')
            time.sleep(60)

    # address = ('localhost', 6070) # let the kernel give us a port
    # server = ThreadedEchoServer(address, ThreadedEchoRequestHandler)
    # t = threading.Thread(target=server.serve_forever)
    # t.setDaemon(True)
    # t.start()

    # multiprocessing shared dict stuff
    manager = Manager()
    compiled_regex_parsers = manager.dict()
    shared_dicts = manager.dict()
    input_output_stats = manager.dict()
    historical_stats = manager.list()

    # input/output hosts from conf
    input_hosts = []
    output_hosts = []
    webserver_host = []
    procs = 4

    # subparsing stuff from conf
    all_options = {}
    subparser_options = {}

    # parse through the conf, setting a few variables - input/output_hosts, all_options, and subparser_options
    for item in get_config():
        if 'default' in item:
            # print 'default =', item
            default_values = item.get('default')
            for key, value in default_values.items():
                # TODO: probably a better way to do this, not very idiomatic
                if key == 'input':
                    for host in value:
                        position = value.index(host)
                        host = host.split(':')
                        value[position] = (host[0], int(host[1]))
                    input_hosts.extend(value)
                if key == 'output':
                    for host in value:
                        position = value.index(host)
                        host = host.split(':')
                        value[position] = (host[0], int(host[1]))
                    output_hosts.extend(value)
                if key == 'webserver':
                    for host in value:
                        host = host.split(':')
                        webserver_host = (host[0], int(host[1]))
                if key == 'parsers':
                    procs = int(value[0])

        elif 'all' in item:
            all_options = item
        else:
            subparser_options = item

    # start parser refreshing thread
    thread_refresh_parsers = Thread(target=create_shared_dicts,
                                    args=[all_options, subparser_options])
    thread_refresh_parsers.daemon = True
    thread_refresh_parsers.start()

    # start RX queue.  This is a ZMQ Queue Device and will contain raw CEF
    rx_in_port = 5551
    rx_out_port = 5552
    rx_streamer = ZMQQueue()
    Process(name='rx_streamer',
            target=rx_streamer.run,
            args=(rx_in_port, rx_out_port)).start()

    # start TX queue.  This is a ZMQ Queue Device and will contain processed events
    tx_in_port = 5553
    tx_out_port = 5554
    tx_streamer = ZMQQueue()
    Process(name='tx_streamer',
            target=tx_streamer.run,
            args=(tx_in_port, tx_out_port)).start()

    # start writer procs.  Each host defined in conf gets its own process.
    writer = CEFSocketWriter()
    for i, host in enumerate(output_hosts):
        Process(
            name='writer{0}'.format(i + 1),
            target=writer.run,
            args=(host,
                  input_output_stats)).start()  # forks CEFSocketWriter.run()
        logger.debug('started writer-{0}'.format(i))
        time.sleep(0.1)

    # set up parsers and start them in their own processes.  These parsers do the regex grunt work, entropy, etc
    parser = CEFParser()
    for i in range(procs):
        Process(name='parser{0}'.format(i + 1),
                target=parser.run,
                args=(compiled_regex_parsers, shared_dicts, subparser_options,
                      all_options,
                      input_output_stats)).start()  # forks CEFParser.run()
        logger.debug('started parser-{0}'.format(i))
        time.sleep(.1)
        # Process(name='parser{0}'.format(i+1), target=parser.run).start()  # forks CEFParser.run()

    # start reader procs.  Each host defined in conf gets its own process.
    reader = CEFSocketReader()
    for i, host in enumerate(input_hosts):
        Process(
            name='reader{0}'.format(i + 1),
            target=reader.run,
            args=(host,
                  input_output_stats)).start()  # forks CEFSocketReader.run()
        logger.debug('started reader-{0}'.format(i))
        time.sleep(.1)

    webserver = CEFWebServer()
    webserver.run(shared_dicts, input_output_stats, webserver_host,
                  historical_stats)