예제 #1
0
def setup_logger(name="Pyhole"):
    """Log handler"""
    # NOTE(jk0): Disable unnecessary requests logging.
    requests.packages.urllib3.disable_warnings()
    requests_log = logging.getLogger("requests")
    requests_log.setLevel(logging.WARNING)

    debug_option = utils.get_option("debug")
    debug_config = utils.get_config().get("debug", type="bool")
    debug = debug_option or debug_config

    log_dir = utils.get_directory("logs")
    log_level = logging.DEBUG if debug else logging.INFO
    log_format = "%(asctime)s [%(name)s] %(message)s"
    log_datefmt = "%H:%M:%S"

    logging.basicConfig(level=log_level,
                        format=log_format,
                        datefmt=log_datefmt)

    log_file = "%s/%s.log"
    log = logging.handlers.TimedRotatingFileHandler(
        log_file % (log_dir, name.lower()), "midnight")
    log.setLevel(log_level)
    formatter = logging.Formatter(log_format, log_datefmt)
    log.setFormatter(formatter)
    logging.getLogger(name).addHandler(log)
예제 #2
0
def add_dash(dash_module, resource):
    if not isinstance(dash_module, Dash):
        dash_app = [
            getattr(dash_module, x) for x in dir(dash_module)
            if isinstance(getattr(dash_module, x), Dash)
        ][0]
    else:
        dash_app = dash_module
    dash_app.config.requests_pathname_prefix = '/dashes/' + resource + '/render/'
    dash_app.config.routes_pathname_prefix = '/dashes/' + resource + '/render/'
    dash_app.css.config.serve_locally = True
    dash_app.scripts.config.serve_locally = True
    dash_app.server.before_request(lambda: os.chdir(
        os.path.join(get_directory(), resource)
        if resource != UPLOAD_RESULT_URL_PART else HOME_DIRECTORY))
    existing_rules = dash_app.server.url_map.iter_rules()
    dash_app.server.url_map = Map()
    for rule in existing_rules:
        dash_app.server.url_map.add(
            Rule('/render' + rule.rule.split('render')[-1],
                 endpoint=rule.endpoint))
    if '/dashes/' + resource in dispatcher.mounts:
        del dispatcher.mounts['/dashes/' + resource]
    dispatcher.mounts.update({'/dashes/' + resource: dash_app.server.wsgi_app})
    return '/dashes/' + resource + '/render/'
예제 #3
0
파일: log.py 프로젝트: kremlinkev/pyhole
def setup_logger(name="Pyhole"):
    """Log handler"""
    # NOTE(jk0): Disable unnecessary requests logging.
    requests.packages.urllib3.disable_warnings()
    requests_log = logging.getLogger("requests")
    requests_log.setLevel(logging.WARNING)

    debug_option = utils.get_option("debug")
    debug_config = utils.get_config().get("debug", type="bool")
    debug = debug_option or debug_config

    log_dir = utils.get_directory("logs")
    log_level = logging.DEBUG if debug else logging.INFO
    log_format = "%(asctime)s [%(name)s] %(message)s"
    log_datefmt = "%H:%M:%S"

    logging.basicConfig(level=log_level, format=log_format,
                        datefmt=log_datefmt)

    log_file = "%s/%s.log"
    log = logging.handlers.TimedRotatingFileHandler(log_file % (log_dir,
                                                    name.lower()), "midnight")
    log.setLevel(log_level)
    formatter = logging.Formatter(log_format, log_datefmt)
    log.setFormatter(formatter)
    logging.getLogger(name).addHandler(log)
예제 #4
0
def pull_directories(dirs):
    directories_pulled = 0
    for directory in dirs:
        instance_path = "%s:/home/ec2-user/platformer/%s/." % (INSTANCE_URL,
                                                               directory)
        local_path = get_directory(directory)
        status = os.system("scp -r -i %s %s %s\n" %
                           (PEM_FILEPATH, instance_path, local_path))
        directories_pulled += 1 if status == 0 else 0
    print("Directories Pulled: %d" % directories_pulled)
예제 #5
0
파일: plugin.py 프로젝트: rishair/cs-4284
def load_user_plugin(plugin, *args, **kwargs):
    """Load a user plugin"""
    sys.path.append(utils.get_home_directory() + "plugins")
    user_plugins = os.listdir(utils.get_directory("plugins"))

    for user_plugin in user_plugins:
        if user_plugin.endswith(".py"):
            user_plugin = user_plugin[:-3]
            if plugin == user_plugin:
                try:
                    __import__(plugin, globals(), locals(), [plugin])
                except Exception, exc:
                    LOG.error(exc)
예제 #6
0
파일: plugin.py 프로젝트: pschwartz/pyhole
def load_user_plugin(plugin, *args, **kwargs):
    """Load a user plugin"""
    sys.path.append(utils.get_home_directory() + "plugins")
    user_plugins = os.listdir(utils.get_directory("plugins"))

    for user_plugin in user_plugins:
        if user_plugin.endswith(".py"):
            user_plugin = user_plugin[:-3]
            if plugin == user_plugin:
                try:
                    __import__(plugin, globals(), locals(), [plugin])
                except Exception, exc:
                    LOG.error(exc)
예제 #7
0
def render(resource):
    if resource is None:
        return DEFAULT_LAYOUT
    if resource.startswith(DASH_UPLOAD_RESULTS_FLAG):
        loads = json.loads(str(homepage.layout[INVISIBLE_ID].children))
        return render_layout(
            add_dash(get_upload_dash(*loads), UPLOAD_RESULT_URL_PART))
    dir_path = os.path.join(get_directory(), unquote(resource))
    full_path = os.path.join(dir_path, unquote(resource) + ".py")
    with open(full_path) as f:
        if 'Dash' not in f.read():
            return error_layout("Этот файл не содержит объект Dash")
    try:
        dash_module = SourceFileLoader(resource[:-3], full_path).load_module()
    except ImportError as ie:
        return render(resource) if pip_install(ie.__str__().split(
            "'")[1]) == 0 else error_layout("Невозможно загрузить зависимости")
    except:
        error = traceback.format_exc().split("call_with_frames_removed\n",
                                             1)[1].replace(
                                                 get_directory() + "/", "")
        return error_layout(error)
    else:
        return render_layout(add_dash(dash_module, resource))
예제 #8
0
파일: plugin.py 프로젝트: msparks/pyhole
def load_user_plugin(plugin, *args, **kwargs):
    """Load a user plugin"""
    sys.path.append(utils.get_home_directory() + "plugins")
    user_plugins = os.listdir(utils.get_directory("plugins"))

    for user_plugin in user_plugins:
        if user_plugin.endswith(".py"):
            user_plugin = user_plugin[:-3]
            if plugin == user_plugin:
                try:
                    __import__(plugin, globals(), locals(), [plugin])
                except Exception, e:
                    # Catch all because this could be many things
                    kwargs.get("irc").log.error(e)
                    pass
예제 #9
0
파일: api.py 프로젝트: posix4e/pyhole
def get_paste(paste_id, raw=None):
    """Fetch and return a paste."""
    stats = os.stat(utils.get_directory("pastes") + paste_id)
    st_mtime = time.ctime(stats.st_mtime)
    st_size = stats.st_size

    paste = utils.read_file("pastes", paste_id)

    if not paste:
        flask.abort(404)

    if raw:
        return flask.Response(paste, status=200, mimetype="text/plain")

    return flask.render_template_string(PASTE_TEMPLATE,
                                        paste_id=paste_id,
                                        paste=cgi.escape(paste),
                                        st_mtime=st_mtime,
                                        st_size=st_size,
                                        version=version.version_string())
예제 #10
0
파일: log.py 프로젝트: bdelliott/pyhole
def setup_logger(name="Pyhole"):
    """Log handler"""
    debug_option = utils.get_option("debug")
    debug_config = utils.get_config().get("debug", type="bool")
    debug = debug_option or debug_config

    log_dir = utils.get_directory("logs")
    log_level = logging.DEBUG if debug else logging.INFO
    log_format = "%(asctime)s [%(name)s] %(message)s"
    log_datefmt = "%H:%M:%S"

    logging.basicConfig(level=log_level, format=log_format,
                        datefmt=log_datefmt)

    log_file = "%s/%s.log"
    log = logging.handlers.TimedRotatingFileHandler(log_file % (log_dir,
                                                    name.lower()), "midnight")
    log.setLevel(log_level)
    formatter = logging.Formatter(log_format, log_datefmt)
    log.setFormatter(formatter)
    logging.getLogger(name).addHandler(log)
예제 #11
0
파일: api.py 프로젝트: posix4e/pyhole
def get_paste(paste_id, raw=None):
    """Fetch and return a paste."""
    stats = os.stat(utils.get_directory("pastes") + paste_id)
    st_mtime = time.ctime(stats.st_mtime)
    st_size = stats.st_size

    paste = utils.read_file("pastes", paste_id)

    if not paste:
        flask.abort(404)

    if raw:
        return flask.Response(paste, status=200, mimetype="text/plain")

    return flask.render_template_string(
        PASTE_TEMPLATE,
        paste_id=paste_id,
        paste=cgi.escape(paste),
        st_mtime=st_mtime,
        st_size=st_size,
        version=version.version_string())
예제 #12
0
    def load_synthetic_data_set(name, target_model):
        """
        Method that loads the local data set stored in the directory called 'name'

        :param name: name of the directory where data set is stored
        :param target_model: decides the format in which to return the data set
        :return: if for patchy_san return all graphs and labels in the data set
                else return all attributes and labels in the data set
        """

        all_graphs = list()
        all_labels = list()

        dataset_directory = get_directory() + '/Data_Sets/Provenance_Graphs/' + name
        number_of_classes, graphs_per_class = DataLoader.__load_data_property_file(dataset_directory + '/property_file')

        for index in range(1, number_of_classes + 1):
            class_directory = dataset_directory + '/Class_' + str(index)
            class_graphs = DataLoader.__load_graphs(class_directory, graphs_per_class[index - 1])
            for graph in class_graphs:
                all_graphs.append(graph)
                all_labels.append(index)

        all_labels = np.array(all_labels)
        all_graphs = np.array(all_graphs)

        if target_model == 'patchy_san':
            return all_graphs, all_labels, number_of_classes

        elif target_model == 'baselines':

            all_values = list()
            for graph in all_graphs:
                all_values.append(merge_splits(graph.values()))

            all_values = add_padding(all_values, 0)
            all_values = np.array(all_values)

            return all_values, all_labels, number_of_classes
예제 #13
0
파일: log.py 프로젝트: roaet/pyhole
def setup_logger(name="Pyhole"):
    """Log handler"""
    debug_option = utils.get_option("debug")
    debug_config = utils.get_config().get("debug", type="bool")
    debug = debug_option or debug_config

    log_dir = utils.get_directory("logs")
    log_level = logging.DEBUG if debug else logging.INFO
    log_format = "%(asctime)s [%(name)s] %(message)s"
    log_datefmt = "%H:%M:%S"

    logging.basicConfig(level=log_level,
                        format=log_format,
                        datefmt=log_datefmt)

    log_file = "%s/%s.log"
    log = logging.handlers.TimedRotatingFileHandler(
        log_file % (log_dir, name.lower()), "midnight")
    log.setLevel(log_level)
    formatter = logging.Formatter(log_format, log_datefmt)
    log.setFormatter(formatter)
    logging.getLogger(name).addHandler(log)
예제 #14
0
파일: logger.py 프로젝트: jk0/pyhole
#   limitations under the License.

"""Pyhole Logging"""

import bz2
import glob
import logging
import logging.handlers
import os
import requests
import shutil

import utils


LOG_DIR = utils.get_directory("logs")
LOG_ARCHIVE_DIR = utils.get_directory(os.path.join("logs", "archive"))
LOG_FORMAT = "%(asctime)s [%(name)s] %(message)s"
LOG_DATEFMT = "%H:%M:%S"


class PyholeFileHandler(logging.handlers.TimedRotatingFileHandler):
    def doRollover(self):
        result = super(PyholeFileHandler, self).doRollover()
        self.archive_old_logs()
        return result

    def archive_old_logs(self):
        matcher = "*.log.*[!b][!z][!2]"
        files = glob.glob(os.path.join(LOG_DIR, matcher))
        for file_path in files:
예제 #15
0
    Input(TABS_DIV_ID, 'n_clicks_timestamp')
],
                   state=[State(SLIDESHOW_BUTTON_ID, 'children')])
def change_slideshow_btn_text(btn_ts, div_ts, btn_state):
    if btn_ts is not None and div_ts is not None and btn_ts >= div_ts - 200 and btn_state == START_SLIDESHOW:
        return STOP_SLIDESHOW
    return START_SLIDESHOW


@homepage.callback(Output(LOCATION_ID, 'search'),
                   [Input(TABS_LIST_ID, 'value')])
def change_url(value):
    if value is None:
        return ""
    state = urlencode({'tab': value})
    return f'?{state}'


@server.route('/static/<resource>')
def serve_static(resource):
    return send_from_directory(STATIC_PATH, resource)


if __name__ == '__main__':
    print("Using dashes directory:", get_directory())
    run_simple('0.0.0.0',
               5000,
               dispatcher,
               use_reloader=True,
               use_debugger=True,
               threaded=True)
예제 #16
0
파일: main.py 프로젝트: awoo769/Level_8_Lab
	]


	# Columns that we want to train on
	cols = [ 'ax_l', 'ay_l', 'az_l', 'ax_r', 'ay_r', 'az_r', 'a_res_l', 'a_res_r']
	cols = [ 'ax_l', 'ay_l', 'az_l', 'ax_r', 'ay_r', 'az_r']
	cols = ['ax_diff', 'ay_diff', 'az_diff']
	cols = ['ax_diff', 'ay_diff', 'az_diff','a_res_diff']
	cols = [ 'ax_l', 'ay_l', 'az_l', 'ax_r', 'ay_r', 'az_r', 'ax_diff', 'ay_diff', 'az_diff']

	for event in events:
			for event_type in event_types:
				x = []
				
				for col in cols:
					directory = get_directory(initial_directory=data_folder, columns=col, est_events=True, event=event, event_type=event_type)

					# Load features (after extract data has been run)
					X_dictionary, y_dictionary, groups = load_features(data_folder, directory, est_events=True)

					x.append(X_dictionary)

				X = {}

				for k in X_dictionary.keys():
					concat_list = []

					for idx in x:
						concat_list.append(idx[k])

					X[k] = pd.concat(concat_list, axis=1)
예제 #17
0
def nested_cross_validation(data_set: np.array,
                            labels: np.array,
                            model_name: str,
                            no_of_classes: int,
                            no_of_outer_folds: int,
                            no_of_inner_folds: int,
                            no_of_samples: int):
    """
    Method that performs nested cross validation for a specified model on a given dataset

    :param data_set: graph or attributes list, depending on the model
    :param labels: true class labels
    :param model_name: name of the model to be evaluated
    :param no_of_classes: number of classes in the dataset
    :param no_of_outer_folds: number of outer folds in the NCV
    :param no_of_inner_folds: number of inner folds in the NCV
    :param no_of_samples: number of samples to be generated in the RandomSearchCV

    :return: a list of predictions on the entire dataset of the NCV
    """

    results_file = open(get_directory() + '/Results/' + model_name, 'a')
    results_file.truncate(0)

    data_set, labels, permutation = randomise_order(data_set, labels)

    splitted_data_set = split_in_folds(data_set, no_of_outer_folds)
    splitted_labels = split_in_folds(labels, no_of_outer_folds)
    all_predictions = list()

    for outer_iterator in range(0, no_of_outer_folds):
        print('Outer Fold #' + str(outer_iterator + 1))
        print('Outer Fold #' + str(outer_iterator + 1), file=results_file)
        print(file=results_file)
        results_file.flush()

        test_set = splitted_data_set[outer_iterator]
        test_labels = splitted_labels[outer_iterator]

        training_set = list()
        training_labels = list()
        for iterator in range(0, no_of_outer_folds):
            if iterator != outer_iterator:
                training_set.append(splitted_data_set[iterator])
                training_labels.append(splitted_labels[iterator])
        training_set = merge_splits(training_set)
        training_labels = merge_splits(training_labels)

        best_model, best_parameters = hyperparameter_tuning(data_set=training_set,
                                                            labels=training_labels,
                                                            no_of_classes=no_of_classes,
                                                            no_of_folds=no_of_inner_folds,
                                                            no_of_samples=no_of_samples,
                                                            model_name=model_name)
        results_file.flush()

        print('Best model on Outer Fold #' + str(outer_iterator + 1))
        print('Best model on Outer Fold #' + str(outer_iterator + 1), file=results_file)
        print(best_parameters, file=results_file)

        best_model.train(training_set, training_labels)
        predictions = best_model.predict_class(test_set)
        for prediction in predictions:
            all_predictions.append(prediction)
        metrics = compute_metrics(predictions, test_labels, no_of_classes)

        for element in metrics:
            print(element, file=results_file)

        print(file=results_file)
        print(file=results_file)
        results_file.flush()
        print()
        print()

    print(confusion_matrix(labels, all_predictions), file=results_file)

    permutation, all_predictions = (list(t) for t in zip(*sorted(zip(permutation, all_predictions))))
    return all_predictions
예제 #18
0
    def create_dataset(name: str, depth: int, history_len: int,
                       degree_dist: np.array, no_of_classes: int,
                       no_of_graphs_per_class: np.array,
                       cmd_line_dist: np.array, login_name_dist: np.array,
                       euid_dist: np.array, binary_file_dist: np.array,
                       node_type_dist: np.array):
        """
        Method that creates a basic 2-Node synthetic dataset

        :param name: name of the dataset
        :param depth: how deep to look in the ancestor graph of a file
        :param history_len: how long to look in the past for interactions with a file
        :param degree_dist: degree distribution of nodes in the graphs
        :param no_of_classes: number of classes of graphs
        :param no_of_graphs_per_class: list containing number of graphs in each class
        :param cmd_line_dist: prob dist of cmd line type (for processes)
        :param login_name_dist: prob dist of login names (for processes)
        :param euid_dist: probability distribution of euids (for processes)
        :param binary_file_dist: probability distribution of provenance binary file (for files)
        :param node_type_dist: [0] - prob of creating a process, [1] - prob of creating a process
        :return: a synthetic dataset that respects the given metrics
        """

        main_dir_path = create_directory(
            get_directory() + '/Data_Sets/Provenance_Graphs', name)

        for class_number in range(1, no_of_classes + 1):  # for each class
            class_dir_path = create_directory(main_dir_path,
                                              'Class_' + str(class_number))
            clear_directory(class_dir_path)

            property_file = open(main_dir_path + '/property_file', 'a')
            property_file.truncate(0)
            print(no_of_classes, file=property_file)

            for dir_size in no_of_graphs_per_class:
                print(dir_size, file=property_file, end=' ')

            for iterator in range(1, no_of_graphs_per_class[class_number - 1] +
                                  1):  # for each graph in the class
                graph_file = open(
                    class_dir_path + '/provenance_graph_' + str(iterator), 'a')
                graph_file.truncate(0)

                # keep track of how many nodes are generated
                node_counter = 0

                edges = list()
                files = list()

                for file in range(
                        0, history_len
                ):  # file nodes, i.e. different versions of the same file in time
                    node_counter += 1

                    files.append(node_counter)

                    nodes_at_depth = dict()
                    nodes_at_depth['0'] = [node_counter]

                    for level in range(
                            0, depth
                    ):  # create ancestors at increasing depth levels
                        nodes = nodes_at_depth[str(level)]
                        nodes_at_depth[str(level + 1)] = list()
                        for node in nodes:
                            no_of_neighbours = np.random.choice(
                                degree_dist['values'], p=degree_dist['probs'])

                            for neigh in range(0, no_of_neighbours):
                                node_counter += 1
                                edges.append((node, node_counter))
                                nodes_at_depth[str(level +
                                                   1)].append(node_counter)

                no_of_nodes = node_counter
                no_of_edges = node_counter - 1

                print(no_of_nodes, no_of_edges, file=graph_file)

                # add edges between file versions
                for index in range(1, len(files)):
                    edges.append((files[index - 1], files[index]))

                for node in range(1, no_of_nodes + 1):
                    if node in files:
                        attribute = SyntheticDataGenerator.__create_file_vector(
                            binary_file_dist[class_number - 1])
                        for value in attribute:
                            print(value, file=graph_file, end=' ')
                        print(file=graph_file)
                    else:
                        # choose to create either socket or file with given probabilities
                        choice = np.random.choice([0, 1], p=node_type_dist)
                        if choice:
                            attribute = SyntheticDataGenerator.__create_socket_vector(
                            )
                        else:
                            attribute = SyntheticDataGenerator.__create_process_vector(
                                cmd_line_dist[class_number - 1],
                                login_name_dist[class_number - 1],
                                euid_dist[class_number - 1])
                        for value in attribute:
                            print(value, file=graph_file, end=' ')
                        print(file=graph_file)

                for edge in edges:
                    print(edge[0], edge[1], file=graph_file)
예제 #19
0
def main(game, levels, process, solve, trial, max_sol, threads):

    if process:
        print("----- PROCESSING -----")
        process_dir = utils.get_directory("process_console_output")

        for level in levels:
            process_file = utils.get_filepath(process_dir, "%s.txt" % level)
            os.system(
                "(time pypy3 main.py platformer %s %s --process) > %s 2>&1" %
                (game, level, process_file))
            os.system(
                "(time python main.py platformer %s %s --gen_prolog) >> %s 2>&1"
                % (game, level, process_file))
            print("Saved to: %s" % process_file)

    if solve:
        print("----- SOLVING -----")
        config_formats = TRIAL_CONFIG_FORMATS.get(trial)
        if config_formats is None:
            utils.error_exit("--trial must be one of %s" %
                             str(list(TRIAL_CONFIG_FORMATS.keys())))

        prolog_file_format = "level_saved_files_block/prolog_files/%s.pl"
        level_structural_txt_file_format = "level_structural_layers/generated/%s.txt"
        level_model_str_file_format = "level_saved_files_block/generated_level_model_strs/%s.txt"
        level_assignments_dict_file_format = "level_saved_files_block/generated_level_assignments_dicts/%s.pickle"
        level_valid_path_file_format = "level_saved_files_block/generated_level_paths/%s.pickle"
        level_state_graph_file_format = "level_saved_files_block/enumerated_state_graphs/generated/%s.gpickle"

        solve_dir = utils.get_directory("solver_console_output")
        sol_order = list(range(max_sol))
        sol_order.reverse()

        for sol in sol_order:
            for config_file_format in config_formats:
                for level in levels:
                    prolog_file = prolog_file_format % level
                    prolog_filename = utils.get_basepath_filename(
                        prolog_file, 'pl')
                    config_file = config_file_format % (game, level)
                    config_filename = utils.get_basepath_filename(
                        config_file, 'json')

                    answer_set_filename_format = '_'.join(
                        [prolog_filename, config_filename, 'a%d'])
                    cur_answer_set_filename = answer_set_filename_format % sol
                    default_answer_set_filename = answer_set_filename_format % 0

                    solve_file = utils.get_filepath(
                        "%s/%s/" % (solve_dir, level),
                        "%s.txt" % cur_answer_set_filename)

                    os.system(
                        "(time python run_solver.py %s %s --max_sol 1 --threads %d --save --validate) > %s 2>&1"
                        % (prolog_file, config_file, threads, solve_file))
                    print("Saved to: %s" % solve_file)

                    if sol != 0 and os.path.exists(
                            level_structural_txt_file_format %
                            default_answer_set_filename):
                        os.system("mv %s %s" %
                                  (level_structural_txt_file_format %
                                   default_answer_set_filename,
                                   level_structural_txt_file_format %
                                   cur_answer_set_filename))

                    if sol != 0 and os.path.exists(
                            level_assignments_dict_file_format %
                            default_answer_set_filename):
                        os.system("mv %s %s" %
                                  (level_assignments_dict_file_format %
                                   default_answer_set_filename,
                                   level_assignments_dict_file_format %
                                   cur_answer_set_filename))

                    if sol != 0 and os.path.exists(
                            level_model_str_file_format %
                            default_answer_set_filename):
                        os.system("mv %s %s" % (level_model_str_file_format %
                                                default_answer_set_filename,
                                                level_model_str_file_format %
                                                cur_answer_set_filename))

                    if sol != 0 and os.path.exists(
                            level_valid_path_file_format %
                            default_answer_set_filename):
                        os.system("mv %s %s" % (level_valid_path_file_format %
                                                default_answer_set_filename,
                                                level_valid_path_file_format %
                                                cur_answer_set_filename))

                    if sol != 0 and os.path.exists(
                            level_state_graph_file_format %
                            default_answer_set_filename):
                        os.system("mv %s %s" % (level_state_graph_file_format %
                                                default_answer_set_filename,
                                                level_state_graph_file_format %
                                                cur_answer_set_filename))

                    if os.path.exists(level_structural_txt_file_format %
                                      cur_answer_set_filename):
                        print("Level txt path: %s" %
                              level_structural_txt_file_format %
                              cur_answer_set_filename)
예제 #20
0
    "learning_rate": [0.001, 0.005, 0.01, 0.05, 0.1, 0.5],
    "dropout_rate": [0.1, 0.3, 0.5, 0.7, 0.9]
}

CNN_GRID = {
    "width": [13, 14, 15, 16, 17],
    "stride": [1],
    "rf_size": [4, 5, 6],
    # "hidden_size": [32, 64, 128, 256, 512],
    "batch_size": [32, 64, 128],
    "epochs": [50, 100, 200],
    "learning_rate": [0.001, 0.005],
    "dropout_rate": [0.1, 0.3, 0.5]
}

GRIDS = {
    "CNN": CNN_GRID,
    "MLP": MLP_GRID,
    "KNN": KNN_GRID,
    "RF": RF_GRID
}

LOG_DIRS = [
    get_directory() + '/Data_Sets/Logs/Android',
    get_directory() + '/Data_Sets/Logs/Apache',
    get_directory() + '/Data_Sets/Logs/Hadoop',
    get_directory() + '/Data_Sets/Logs/OpenStack',
    get_directory() + '/Data_Sets/Logs/Spark',
    get_directory() + '/Data_Sets/Logs/SSH'
]
예제 #21
0
import telegram
from dotenv import load_dotenv

import vk
import fb
import tgram
import utils

logging.basicConfig(
    filename="post.log",
    filemode="w",
    format="%(asctime)s:%(message)s",
    level=logging.ERROR,
)

directory = utils.get_directory()

try:
    post = utils.get_post(directory)
except FileNotFoundError as error:
    exit(error)
except KeyError as error:
    exit(f"{error} is unsupported file format")

load_dotenv()
try:
    vk.post_to_group(post)
except requests.HTTPError as error:
    logging.error(error)

try:
예제 #22
0
    return


if __name__ == "__main__":

    data_folder = "C:\\Users\\alexw\\Desktop\\Harvard_data\\"
    event = 'HS'

    # columns in X = ['id', 'time', 'ax_l', 'ay_l', 'az_l', 'ax_r', 'ay_r', 'az_r',
    # 				'ax_diff', 'ay_diff', 'az_diff', 'a_res_l', 'a_res_r', 'a_res_diff']
    columns = ['id', 'time', 'a_res_l', 'a_res_r']
    #columns = ['id', 'time', 'ax_diff', 'ay_diff', 'az_diff', 'a_res_diff'] # Columns that we want to use

    directory = get_directory(initial_directory=data_folder,
                              columns=columns,
                              est_events=False)  #, event=event)

    # Load features (after extract data has been run)
    X_dictionary, y_dictionary, groups = load_features(data_folder,
                                                       directory,
                                                       est_events=False)

    i = 0
    X = X_dictionary[groups[i]]
    y = y_dictionary[groups[i]]

    test_split = 0.33
    #learn(X, y, directory, test_split=test_split)
    learn(X_dictionary, y_dictionary, directory, groups)