Exemple #1
0
def create_net(pkl_dir, mnt):
    pkl_file_list = util.get_file_list(pkl_dir, '.pkl')
    m_dict = {}

    # 按照年月来分类所有的文件
    for file_name in pkl_file_list:
        fm = file_name[0:7]
        # 获取
        mf_list = m_dict.get(fm, [])
        if mf_list is None:
            mf_list = [file_name]
        else:
            mf_list.append(file_name)
        m_dict[fm] = mf_list

    # 创建存放按照月份创建的pkl目录
    util.create_directory(mnt)
    # 合并一个月内的所有网络
    for (k, vl) in m_dict.items():
        mg = nx.Graph()
        print(k)
        for file in vl:
            g1 = util.get_nw(pkl_dir + file)
            mg = nx.compose(mg, g1)
        util.save_nw(mg, mnt + k)
Exemple #2
0
    def __init__(self, interactor_queue, lock, config, env_config, learner_config, **bonus_kwargs):
        self.learner_name = self.learner_name()
        self.interactor_queue = interactor_queue
        self.learner_lock = lock
        self.config = config
        self.env_config = env_config
        self.learner_config = learner_config
        self.bonus_kwargs = bonus_kwargs
        self.kill_threads = False
        self.permit_desync = False
        self.need_frames_notification = threading.Condition()
        self._reset_inspections()
        self.total_frames = 0

        self.save_path = util.create_directory("%s/%s/%s/%s" % (self.config["output_root"], self.config["env"]["name"], self.config["name"], self.config["save_model_path"]))
        self.log_path = util.create_directory("%s/%s/%s/%s" % (self.config["output_root"], self.config["env"]["name"], self.config["name"],  self.config["log_path"])) + "/%s.log" % self.learner_name

        # replay buffer to store data
        self.replay_buffer_lock = threading.RLock()
        self.replay_buffer = ReplayBuffer(self.learner_config["replay_size"],
                                          np.prod(self.env_config["obs_dims"]),
                                          self.env_config["action_dim"])

        # data loaders pull data from the replay buffer and put it into the tfqueue for model usage
        self.data_loaders = self.make_loader_placeholders()
        queue_capacity = np.ceil(1./self.learner_config["frames_per_update"]) if self.learner_config["frames_per_update"] else 100
        self.tf_queue = tf.FIFOQueue(capacity=queue_capacity, dtypes=[dl.dtype for dl in self.data_loaders])
        self.enqueue_op = self.tf_queue.enqueue(self.data_loaders)
        self.current_batch = self.tf_queue.dequeue()

        # build the TF graph for the actual model to train
        self.core, self.train_losses, self.train_ops, self.inspect_losses = self.make_core_model()
        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())
 def __write_service_file(path, data):
     create_directory(
         os.path.join(GitClient.CHECKOUT_DIR, os.path.dirname(path)))
     with (open(os.path.join(GitClient.CHECKOUT_DIR, path),
                'w')) as service_file:
         yaml.dump(data,
                   service_file,
                   default_flow_style=False,
                   allow_unicode=False)
Exemple #4
0
def log_config():
    HPS_PATH = util.create_directory("output/" + config["env"]["name"] + "/" +
                                     config["name"] + "/" +
                                     config["log_path"]) + "/hps.json"
    print("ROOT GPU: " + str(args.root_gpu) + "\n" + str(cstr))
    with open(HPS_PATH, "w") as f:
        f.write("ROOT GPU: " + str(args.root_gpu) + "\n" + str(cstr))
Exemple #5
0
    def __init__(self, options, exefile='mainFeatures'):    
        """
            Initialize the different parameters for calling CPP program and provides
            main interface function to compute features...
            ftype= features type among [LBP, LTP, LBP+LTP]
            featdir= directory path for storing feature files.
            listfile= list file containing absolute path of images for computing features... 
        """
        self.display(options)
        ftype = options.ftype.upper()
        
        if not ftype in ["LQP", "LBP", "LTP", "LBP+LTP"]:
            print 'Error: Any one of the following Feature Types should be chosen  "LQP","LBP","LTP","LBP+LTP", not %s' % ftype
            raise ValueError
        
        self.ftype = options.ftype;
        if not os.path.exists(options.featdir):
            print 'Making Directory =', options.featdir
#             os.mkdir(options.featdir)
            u.create_directory(options.featdir)
        self.featdir = options.featdir;
        
        if not os.path.exists(options.listfile):
            print "Error: Given file(%s) for list of images does not exist " % options.listfile
            raise ValueError
        self.listfile = options.listfile
        self.rdir = os.getcwd()  # root directory
        self.exe = os.path.normpath(os.path.join(self.rdir, os.path.join(os.pardir, os.pardir), 'build', exefile))
        
        values = {'exefile':self.exe, 'color_channel':4,
                   'add_rgb':1, 'norm_sep':0, 'cell_size':options.cellsize,
                   'lbp_stride':options.cellsize, 'width':options.width, 'height':options.height,
                   'listfile':options.listfile, 'cbfile':options.cbfile }
        cmd = string.Template("${exefile} --Color-Channel=${color_channel} "
                              " --Add-RGBLBP=${add_rgb}  --Norm-Sep=${norm_sep} "
                              " --Cell-Size=${cell_size} --LBP-Stride=${lbp_stride} "
                              " --Win-Width=${width} --Win-Height=${height} --Validation-File=${cbfile}"
                              " --TrainingFile=${listfile}")
        cmd = cmd.safe_substitute(values)
       
        self.dirs = []  # contain the name of feature directories...
        cwd = os.getcwd()
        if self.ftype == "LQP":
            self.compute_lqp(cmd, options)
        else:
            self.compute_localpattern(cmd, options)
        os.chdir(cwd)
    def __init__(self, interactor_queue, lock, config, env_config,
                 learner_config, **bonus_kwargs):
        self.learner_name = self.learner_name()
        self.interactor_queue = interactor_queue
        self.learner_lock = lock
        self.config = config
        self.env_config = env_config
        self.learner_config = learner_config
        self.bonus_kwargs = bonus_kwargs
        self.kill_threads = False
        self.permit_desync = False
        self.need_frames_notification = threading.Condition()
        self._reset_inspections()
        self.total_frames = 0

        self.save_path = util.create_directory(
            "%s/%s/%s/%s" %
            (self.config["output_root"], self.config["env"]["name"],
             self.config["name"], self.config["save_model_path"]))
        self.log_path = util.create_directory(
            "%s/%s/%s/%s" %
            (self.config["output_root"], self.config["env"]["name"],
             self.config["name"],
             self.config["log_path"])) + "/%s.log" % self.learner_name

        # replay buffer to store data
        self.replay_buffer_lock = threading.RLock()
        self.replay_buffer = ReplayBuffer(self.learner_config["replay_size"],
                                          np.prod(self.env_config["obs_dims"]),
                                          self.env_config["action_dim"])

        # data loaders pull data from the replay buffer and put it into the tfqueue for model usage
        self.data_loaders = self.make_loader_placeholders()
        queue_capacity = np.ceil(
            1. / self.learner_config["frames_per_update"]
        ) if self.learner_config["frames_per_update"] else 100
        self.tf_queue = tf.FIFOQueue(
            capacity=queue_capacity,
            dtypes=[dl.dtype for dl in self.data_loaders])
        self.enqueue_op = self.tf_queue.enqueue(self.data_loaders)
        self.current_batch = self.tf_queue.dequeue()

        # build the TF graph for the actual model to train
        self.core, self.train_losses, self.train_ops, self.inspect_losses = self.make_core_model(
        )
        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())
Exemple #7
0
 def compute_lqp(self, addcmd, options):
     """
         Compute LQP Features, 
         can be configured to run with different set of LQP types, coding types, tolerance, codebook size etc.
     """  
     values = { 'feature_type':14, 'cb_distance':0,
              'ncluster_rounds':10,
              'code_prune_count':10,
              'lqp_stride':1, 'norm':1, 'patch_type':options.lqptype, 'code_type':options.coding}
     cmd = string.Template(" --FeatureType=${feature_type}  --PatchType=${patch_type} "\
                         " --CodeBook-DMetric=${cb_distance} --ClusteringRounds=${ncluster_rounds} "\
                         "--Patch-PruneCount=${code_prune_count} --Patch-PatchStride=${lqp_stride} "\
                         " --Patch-CodingType=${code_type} --Normalization=${norm} ")
     cmd = addcmd + cmd.safe_substitute(values)
      
     lqpsize = u.check_islist(options.lqpsize)
     cbsize = u.check_islist(options.cbsize)
     tolerance = u.check_islist(options.tol)
     featdir = options.featdir
             
     count = 1
     for tol in tolerance:
         for lqps in lqpsize:
             for cbs in cbsize:
                 os.chdir(featdir)
                 dirname = 'lqp-size=' + str(lqps) + '-codebooksize=' + str(cbs) + '-tolerance=' + str(tol)
                 dirname = os.path.join(featdir, dirname)
                 if not os.path.exists(dirname):
                     print 'Making Directory =', dirname
                     u.create_directory(dirname)
                 self.dirs.append(dirname)
                 os.chdir(dirname)              
                 values = {'tol':tol, 'cbsize':cbs, 'patch_size':lqps}
                 addcmd = string.Template(" --LTP-Tolerance=${tol} --CodeBookSize=${cbsize}  --Patch-PatchSize=${patch_size} ")
                 addcmd = cmd + addcmd.safe_substitute(values)
                 print  "Running Process Number = ", count
                 count = count + 1
                 self.compute_features(addcmd)
Exemple #8
0
 def compute_localpattern(self, addcmd, options):
     """
         Function is used to compute Local Pattern (LBP, LTP, Features:
         
     """
     features = {"LBP":1, "LTP":2, "LBP+LTP":3}
     values = {'feature_type': features[self.ftype], 'norm':1}
     cmd = string.Template(" --FeatureType=${feature_type}  --Normalization=${norm} ")
     cmd = addcmd + cmd.safe_substitute(values)
     tolerance = u.check_islist(options.tol) # used only for ltp and lbp+ltp features, lbp features ignores this options
     count = 1
     if self.ftype.upper() != "LBP":
         for tol in tolerance:
             os.chdir(self.featdir)
             dirname = self.ftype + '-tolerance=' + str(tol)
             dirname = os.path.join(self.featdir, dirname)
             if not os.path.exists(dirname):
                 print 'Making Directory =', dirname
                 u.create_directory(dirname)
             self.dirs.append(dirname)
             os.chdir(dirname)              
             values = {'tol':tol}
             addcmd = string.Template(" --LTP-Tolerance=${tol} ")
             addcmd = cmd + addcmd.safe_substitute(values)
             print  "Running Process Number = ", count
             count = count + 1
             self.compute_features(addcmd)
     else:
             os.chdir(self.featdir)
             dirname = self.ftype
             dirname = os.path.join(self.featdir, dirname)
             if not os.path.exists(dirname):
                 print 'Making Directory =', dirname
                 u.create_directory(dirname)
             self.dirs.append(dirname)
             os.chdir(dirname)              
             self.compute_features(cmd)
    def __init__(self,
                 thread_index=0,
                 game=None,
                 model_savefile=None,
                 network_class="ACLstmNet",
                 global_steps_counter=None,
                 scenario_tag=None,
                 run_id_string=None,
                 session=None,
                 tf_logdir=None,
                 global_network=None,
                 optimizer=None,
                 learning_rate=None,
                 test_only=False,
                 test_interval=1,
                 write_summaries=True,
                 enable_progress_bar=True,
                 deterministic_testing=True,
                 save_interval=1,
                 writer_max_queue=10,
                 writer_flush_secs=120,
                 gamma_compensation=False,
                 figar_gamma=False,
                 gamma=0.99,
                 show_heatmaps=True,
                 **settings):
        super(A3CLearner, self).__init__()

        log("Creating actor-learner #{}.".format(thread_index))
        self.thread_index = thread_index

        self._global_steps_counter = global_steps_counter
        self.write_summaries = write_summaries
        self.save_interval = save_interval
        self.enable_progress_bar = enable_progress_bar
        self._model_savefile = None
        self._train_writer = None
        self._test_writer = None
        self._summaries = None
        self._session = session
        self.deterministic_testing = deterministic_testing
        self.local_steps = 0
        # TODO epoch as tf variable?
        self._epoch = 1
        self.train_scores = []
        self.train_actions = []
        self.train_frameskips = []
        self.show_heatmaps = show_heatmaps
        self.test_interval = test_interval

        self.local_steps_per_epoch = settings["local_steps_per_epoch"]
        self._run_tests = settings["test_episodes_per_epoch"] > 0 and settings["run_tests"]
        self.test_episodes_per_epoch = settings["test_episodes_per_epoch"]
        self._epochs = np.float32(settings["epochs"])
        self.max_remembered_steps = settings["max_remembered_steps"]

        assert not (gamma_compensation and figar_gamma)

        gamma = np.float32(gamma)

        if gamma_compensation:
            self.scale_gamma = lambda fskip: ((1 - gamma ** fskip) / (1 - gamma), gamma ** fskip)
        elif figar_gamma:
            self.scale_gamma = lambda fskip: (1.0, gamma ** fskip)
        else:
            self.scale_gamma = lambda _: (1.0, gamma)

        if self.write_summaries and thread_index == 0 and not test_only:
            assert tf_logdir is not None
            self.run_id_string = run_id_string
            self.tf_models_path = settings["models_path"]
            create_directory(tf_logdir)

            if self.tf_models_path is not None:
                create_directory(self.tf_models_path)

        if game is None:
            self.doom_wrapper = VizdoomWrapper(**settings)
        else:
            self.doom_wrapper = game
        misc_len = self.doom_wrapper.misc_len
        img_shape = self.doom_wrapper.img_shape
        self.use_misc = self.doom_wrapper.use_misc

        self.actions_num = self.doom_wrapper.actions_num
        self.local_network = getattr(networks, network_class)(actions_num=self.actions_num, img_shape=img_shape,
                                                              misc_len=misc_len,
                                                              thread=thread_index, **settings)

        if not test_only:
            self.learning_rate = learning_rate
            # TODO check gate_gradients != Optimizer.GATE_OP
            grads_and_vars = optimizer.compute_gradients(self.local_network.ops.loss,
                                                         var_list=self.local_network.get_params())
            grads, local_vars = zip(*grads_and_vars)

            grads_and_global_vars = zip(grads, global_network.get_params())
            self.train_op = optimizer.apply_gradients(grads_and_global_vars, global_step=tf.train.get_global_step())

            self.global_network = global_network
            self.local_network.prepare_sync_op(global_network)

        if self.thread_index == 0 and not test_only:
            self._model_savefile = model_savefile
            if self.write_summaries:
                self.actions_placeholder = tf.placeholder(tf.int32, None)
                self.frameskips_placeholder = tf.placeholder(tf.int32, None)
                self.scores_placeholder, summaries = setup_vector_summaries(scenario_tag + "/scores")

                # TODO remove scenario_tag from histograms
                a_histogram = tf.summary.histogram(scenario_tag + "/actions", self.actions_placeholder)
                fs_histogram = tf.summary.histogram(scenario_tag + "/frameskips", self.frameskips_placeholder)
                score_histogram = tf.summary.histogram(scenario_tag + "/scores", self.scores_placeholder)
                lr_summary = tf.summary.scalar(scenario_tag + "/learning_rate", self.learning_rate)
                summaries.append(lr_summary)
                summaries.append(a_histogram)
                summaries.append(fs_histogram)
                summaries.append(score_histogram)
                self._summaries = tf.summary.merge(summaries)
                self._train_writer = tf.summary.FileWriter("{}/{}/{}".format(tf_logdir, self.run_id_string, "train"),
                                                           flush_secs=writer_flush_secs, max_queue=writer_max_queue)
                self._test_writer = tf.summary.FileWriter("{}/{}/{}".format(tf_logdir, self.run_id_string, "test"),
                                                          flush_secs=writer_flush_secs, max_queue=writer_max_queue)
Exemple #10
0
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

import numpy as np
import tensorflow as tf
# import moviepy.editor as mpy
import time, os, traceback, multiprocessing, portalocker, sys

import envwrap
import util
import valuerl, worldmodel
from config import config

MODEL_NAME = config["name"]
LOG_PATH = util.create_directory("output/" + config["env"] + "/" + MODEL_NAME +
                                 "/" + config["log_path"]) + "/" + MODEL_NAME
LOAD_PATH = util.create_directory("output/" + config["env"] + "/" +
                                  MODEL_NAME + "/" + config["save_model_path"])
OBS_DIM = np.prod(config["obs_dims"])
HIDDEN_DIM = config["hidden_dim"]
ACTION_DIM = config["action_dim"]
MAX_FRAMES = config["max_frames"]
REWARD_SCALE = config["reward_scale"]
DISCOUNT = config["discount"]
ALGO = config["policy_config"]["algo"]
AGENT_BATCH_SIZE = config["agent_config"]["batch_size"]
EVALUATOR_BATCH_SIZE = config["evaluator_config"]["batch_size"]
RELOAD_EVERY_N = config["agent_config"]["reload_every_n"]
FRAMES_BEFORE_LEARNING = config["policy_config"]["frames_before_learning"]
FRAMES_PER_UPDATE = config["policy_config"]["frames_per_update"]
LEARNER_EPOCH_N = config["policy_config"]["epoch_n"]
Exemple #11
0
    def predict(self):
        try:
            if not util.check_valid_path(self.en_input_file.get()):
                messagebox.showwarning("Warning",
                                       "Your input image is invalid.")
            elif not util.check_valid_path(self.en_model_file.get()):
                messagebox.showwarning("Warning",
                                       "Your input model is invalid.")
            elif util.check_valid_path(
                    self.en_input_file.get()) and util.check_valid_path(
                        self.en_model_file.get()):
                # load label
                print("[INFO]: LOADING LABEL...")
                self.__label_data = pd.read_csv(cons.SIGNNAMES)
                self.__label_values = self.__label_data['SignName'].values
                print("[INFO]: FINISH LOADING LABEL.")

                # load Model
                print("[INFO]: LOADING MODEL...")
                self.__model = load_model(self.en_model_file.get())
                print("[INFO]: FINISH LOADING MODEL.")

                # PREDICT PROCESS
                print("[INFO]: PREDICTING IMAGE...")
                img = cv2.imread(self.en_input_file.get())

                # get ROI values in Test.csv
                y_test = pd.read_csv("./input/Test.csv")
                x1_val = y_test['Roi.X1'].values
                y1_val = y_test['Roi.Y1'].values
                x2_val = y_test['Roi.X2'].values
                y2_val = y_test['Roi.Y2'].values

                if self.__is_test_set_img:
                    try:
                        img_bbx = img.copy()
                        x1, y1, x2, y2 = util.get_roi(self.en_input_file.get(),
                                                      x1_val, y1_val, x2_val,
                                                      y2_val)
                        proposal = img[y1:y2, x1:x2]
                        result = util.recognize_sign([proposal],
                                                     self.__model)[0]
                        sign_name = util.load_name(result, self.__label_values)
                        if len(sign_name) > 0:
                            # wm = plt.get_current_fig_manager()
                            # wm.window.showMaximized()
                            plt.imshow(cv2.cvtColor(img_bbx,
                                                    cv2.COLOR_BGR2RGB))
                            plt.axis("off")
                            plt.title("Result of prediction: " + sign_name)
                            plt.show()
                        else:
                            messagebox.showinfo(
                                "Infomation",
                                "Sorry. Can not recognize any traffic sign in this image."
                            )
                    except Exception as ex:
                        print(ex)
                        messagebox.showinfo(
                            "Infomation",
                            "Sorry. Can not recognize any traffic sign in this image."
                        )
                else:
                    # convert Image to Binary Image
                    img_bbx = img.copy()
                    rows, cols, _ = img.shape
                    img_bin = util.preprocess_img(img, False)

                    # localize Traffic Sign (find Contours and draw to Image)
                    min_area = img_bin.shape[0] * img.shape[1] / (25 * 25)
                    rects = util.detect_contour(img_bin, min_area=min_area)
                    img_rects = util.draw_rects_on_img(img, rects)

                    sign_names = []
                    sep = ', '

                    # recognize Traffic sign
                    for rect in rects:
                        xc = int(rect[0] + rect[2] / 2)
                        yc = int(rect[1] + rect[3] / 2)
                        size = max(rect[2], rect[3])
                        x1 = max(0, int(xc - size / 2))
                        y1 = max(0, int(yc - size / 2))
                        x2 = min(cols, int(xc + size / 2))
                        y2 = min(rows, int(yc + size / 2))
                        proposal = img[y1:y2, x1:x2]
                        result = util.recognize_sign([proposal],
                                                     self.__model)[0]
                        cv2.rectangle(img_bbx, (rect[0], rect[1]),
                                      (rect[0] + rect[2], rect[1] + rect[3]),
                                      (0, 0, 255), 2)
                        cv2.putText(img_bbx, str(result), (rect[0], rect[1]),
                                    1, 1.5, (0, 0, 255), 2)
                        cv2.putText(
                            img_bbx, util.load_name(result,
                                                    self.__label_values),
                            (rect[0], rect[1] + rect[3] + 20), 1, 1.5,
                            (0, 0, 255), 2)
                        sign_names.append(
                            util.load_name(result, self.__label_values))

                    if len(sign_names) > 0:
                        sep_res = sep.join(sign_names).replace('/', '-')
                        time = datetime.now().strftime("%Y-%m-%d %Hh-%Mm-%Ss")
                        dir_path = cons.RESULT_PATH + time
                        util.create_directory(dir_path)
                        res_path = dir_path + '\\{}_' + sep_res.replace(
                            ',', '-') + '.jpg'
                        cv2.imwrite(res_path.format("BIN_IMAGE"), img_bin)
                        print("[INFO]: Saved binary image to " +
                              res_path.format("BIN_IMAGE"))
                        cv2.imwrite(res_path.format("RECTS_IMAGE"), img_rects)
                        print("[INFO]: Saved rects image to " +
                              res_path.format("RECTS_IMAGE"))
                        cv2.imwrite(res_path.format("PREDICTED_IMAGE"),
                                    img_bbx)
                        print("[INFO]: Saved predicted image to " +
                              res_path.format("PREDICTED_IMAGE"))
                        # wm = plt.get_current_fig_manager()
                        # wm.window.showMaximized()
                        plt.imshow(cv2.cvtColor(img_bbx, cv2.COLOR_BGR2RGB))
                        plt.axis("off")
                        plt.title("Result of prediction: " + sep_res)
                        plt.show()
                        messagebox.showinfo(
                            "Infomation",
                            "Finish. Your result is saved in {}\\".format(
                                dir_path))
                    else:
                        messagebox.showinfo(
                            "Infomation",
                            "Sorry. Can not recognize any traffic sign in this image."
                        )

                print("[INFO]: FINISH PREDICTING IMAGE.")
        except Exception as ex:
            print(ex)
            messagebox.showerror("Error",
                                 "Error occurred while predicting image!")
Exemple #12
0
def print_sep(sep="-", width=100):
    print(sep * width)
    return


if __name__ == "__main__":

    try:
        terminal_columns, terminal_rows = os.get_terminal_size(0)
    except:
        terminal_columns = 100

    params = vars(args)
    print("PARAMETERS:", params)
    create_directory("../results/")
    output_sufix_formatter = "_{}_sent_{}_ep_{}"
    print()

    if params["model"] not in ['BOTH', 'SP', 'RNN']:
        print("Invalid Model!")

    if params["model"] in ['BOTH', 'SP']:
        print_sep(width=terminal_columns)
        print("Starting SP:")
        print_sep(width=terminal_columns)
        params["output_sufix"] = output_sufix_formatter.format(
            "SP", params["number_sentences"], params["number_epochs"])
        import sp
        sp.main(params)
        print_sep(width=terminal_columns)
Exemple #13
0
def do_simulations():
    delete_directory("./results")
    create_directory("./results")

    for index in file_indexes:
        print("#################### PROCESSING SCENARIO " + str(index) +
              " #########################")
        path_files = "./results/scenario-" + str(index)
        create_directory(path_files)

        print("#### Reading labbeled file: " + list_labelled_files[index])
        df = read_labelled_file(list_labelled_files[index])
        print("#### Generating baseline labbeled file: " +
              list_labelled_files[index])
        baseline_bot = generate_botnet_baseline(df)
        num_bot_5tflows = len(baseline_bot["bot-5-tuples"])

        print("#### Reading pcap's csv file: " + list_frompcap_files[index])
        df = read_pcap_file(list_frompcap_files[index])

        #baseline_hh = {}
        print("#### Generating baseline pcap's csv file: " +
              list_labelled_files[index])
        for time_window in array_time_window:
            out_file_hh = csv.writer(open(
                path_files + "/hh_stats_f" + str(index) + "_w" +
                str(time_window) + ".csv", "wb"),
                                     delimiter=';',
                                     quoting=csv.QUOTE_ALL)
            columns = [
                "index_chunk", "bound", "total_num_flows",
                "total_num_hh_flows", "total_num_5tflows",
                "total_num_hh_5tflows", "pkts_to_start_hhtests", "TP", "TN",
                "FP", "FN", "TPR (Recall)", "TNR", "FPR", "FNR", "Precision",
                "Accuracy", "F1_Score"
            ]
            out_file_hh.writerow(columns)

            out_file_bot = csv.writer(open(
                path_files + "/bot_stats_f" + str(index) + "_w" +
                str(time_window) + ".csv", "wb"),
                                      delimiter=';',
                                      quoting=csv.QUOTE_ALL)
            columns = [
                "bound", "total_num_bot_5tflows", "pkts_to_start_hhtests",
                "TP", "FN", "TPR (Recall)"
            ]
            out_file_bot.writerow(columns)

            time = str(time_window) + 's'
            chunks = df.groupby(pd.Grouper(freq=time))
            index_chunk = 1
            results_bot_5tflows = {}
            for name, c in chunks:
                for bound in array_bound:
                    print "### Time Window: " + str(
                        time_window) + " - Time Slot: " + str(
                            index_chunk) + " - Bound: " + str(bound)

                    if str(bound) not in results_bot_5tflows:
                        results_bot_5tflows[str(bound)] = {}

                    baseline_hh = generate_baseline(c, bound)
                    data = [
                        index_chunk, bound,
                        len(baseline_hh["packets"]) +
                        len(baseline_hh["tn_packets"]),
                        len(baseline_hh["packets"]),
                        len(baseline_hh["5t_packets"]) +
                        len(baseline_hh["5t_tn_packets"]),
                        len(baseline_hh["5t_packets"])
                    ]

                    data_pkts_to_start = [[]]
                    data_hh_stats = [[], [], [], [], [], [], [], [], [], [],
                                     []]
                    for pkts_to_start in array_pkts_to_start:
                        print "## Packets to Start HH Tests: " + str(
                            pkts_to_start)

                        if str(pkts_to_start) not in results_bot_5tflows[str(
                                bound)]:
                            results_bot_5tflows[str(bound)][str(
                                pkts_to_start)] = set()

                        data_pkts_to_start[0].append(pkts_to_start)
                        results = simulate_rtp4mon(c, bound, pkts_to_start)
                        results_bot_5tflows[str(bound)][str(
                            pkts_to_start)] = results_bot_5tflows[str(bound)][
                                str(pkts_to_start)] | results["5t_packets"]
                        hh_stats = generate_hh_stats(baseline_hh, results)
                        for i in range(0, len(data_hh_stats)):
                            data_hh_stats[i].append(hh_stats[i])

                    data = data + data_pkts_to_start + data_hh_stats
                    out_file_hh.writerow(data)
                index_chunk += 1

            for bound in array_bound:
                data_pkts_to_start = [[]]
                data_bot_stats = [[], [], []]
                data = [bound, num_bot_5tflows]
                for pkts_to_start in array_pkts_to_start:
                    data_pkts_to_start[0].append(pkts_to_start)
                    bot_stats = generate_bot_stats(
                        baseline_bot,
                        results_bot_5tflows[str(bound)][str(pkts_to_start)])
                    for i in range(0, len(data_bot_stats)):
                        data_bot_stats[i].append(bot_stats[i])
                data = data + data_pkts_to_start + data_bot_stats
                out_file_bot.writerow(data)
Exemple #14
0
    def __init__(self,
                 scenario_tag=None,
                 model_savefile=None,
                 run_id_string=None,
                 network_class="DQNNet",
                 write_summaries=True,
                 tf_logdir="tensorboard_logs",
                 epochs=100,
                 train_steps_per_epoch=1000000,
                 test_episodes_per_epoch=100,
                 run_tests=True,
                 initial_epsilon=1.0,
                 final_epsilon=0.0000,
                 epsilon_decay_steps=10e07,
                 epsilon_decay_start_step=2e05,
                 frozen_steps=5000,
                 batchsize=32,
                 memory_capacity=10000,
                 update_pattern=(4, 4),
                 prioritized_memory=False,
                 enable_progress_bar=True,
                 save_interval=1,
                 writer_max_queue=10,
                 writer_flush_secs=120,
                 dynamic_frameskips=None,
                 **settings):

        if prioritized_memory:
            raise NotImplementedError(
                "Prioritized memory not implemented. Maybe some day.")
            # TODO maybe some day ...
            pass

        if dynamic_frameskips:
            if isinstance(dynamic_frameskips, (list, tuple)):
                self.frameskips = list(dynamic_frameskips)
            elif isinstance(dynamic_frameskips, int):
                self.frameskips = list(range(1, dynamic_frameskips + 1))
        else:
            self.frameskips = [None]

        self.update_pattern = update_pattern
        self.write_summaries = write_summaries
        self._settings = settings
        self.run_id_string = run_id_string
        self.train_steps_per_epoch = train_steps_per_epoch
        self._run_tests = test_episodes_per_epoch > 0 and run_tests
        self.test_episodes_per_epoch = test_episodes_per_epoch
        self._epochs = np.float32(epochs)

        self.doom_wrapper = VizdoomWrapper(**settings)
        misc_len = self.doom_wrapper.misc_len
        img_shape = self.doom_wrapper.img_shape
        self.use_misc = self.doom_wrapper.use_misc
        self.actions_num = self.doom_wrapper.actions_num
        self.replay_memory = ReplayMemory(img_shape,
                                          misc_len,
                                          batch_size=batchsize,
                                          capacity=memory_capacity)
        self.network = getattr(networks, network_class)(
            actions_num=self.actions_num * len(self.frameskips),
            img_shape=img_shape,
            misc_len=misc_len,
            **settings)

        self.batchsize = batchsize
        self.frozen_steps = frozen_steps

        self.save_interval = save_interval

        self._model_savefile = model_savefile
        ## TODO move summaries somewhere so they are consistent between dqn and asyncs
        if self.write_summaries:
            assert tf_logdir is not None
            create_directory(tf_logdir)

            self.scores_placeholder, summaries = setup_vector_summaries(
                scenario_tag + "/scores")
            self._summaries = tf.summary.merge(summaries)
            self._train_writer = tf.summary.FileWriter(
                "{}/{}/{}".format(tf_logdir, self.run_id_string, "train"),
                flush_secs=writer_flush_secs,
                max_queue=writer_max_queue)
            self._test_writer = tf.summary.FileWriter(
                "{}/{}/{}".format(tf_logdir, self.run_id_string, "test"),
                flush_secs=writer_flush_secs,
                max_queue=writer_max_queue)
        else:
            self._train_writer = None
            self._test_writer = None
            self._summaries = None
        self.steps = 0
        # TODO epoch as tf variable?
        self._epoch = 1

        # Epsilon
        self.epsilon_decay_rate = (initial_epsilon -
                                   final_epsilon) / epsilon_decay_steps
        self.epsilon_decay_start_step = epsilon_decay_start_step
        self.initial_epsilon = initial_epsilon
        self.final_epsilon = final_epsilon

        self.enable_progress_bar = enable_progress_bar
Exemple #15
0
    def run(self, src, dest):
        print src, dest
        if (not os.path.exists(src)):
            raise Exception("Source path not valid: %s" % (src))
        elif (not os.path.exists(dest)):
            raise Exception("Destination path not valid: %s" % (dest))

        src = os.path.normpath(src)
        dest = os.path.normpath(dest)

        if (dest.find(src) == 0):
            raise Exception("Destination must not be inside Source path.")

        history = {}

        w = os.walk(src)

        for root, dirs, files in w:
            print root

            for f in files:
                fullpath = os.path.join(root, f)
                if self.time_type == MTIME:
                    t = os.path.getmtime(fullpath)
                elif self.time_type == CTIME:
                    t = os.path.getctime(fullpath)
                elif self.time_type == ATIME:
                    t = os.path.getatime(fullpath)
                else:
                    raise Exception("time_type is not one of MTIME, CTIME, ATIME.")

                ts = time.localtime(t)

                if self.keep_directory:
                    subfolder = root[len(src)+1:]
                    outdir = os.path.join(dest, subfolder, self.get_outdir(ts))
                else:
                    outdir = os.path.join(dest, self.get_outdir(ts))

                outfile = os.path.join(outdir, f)

                if outfile not in history and not os.path.exists(outfile):
                    version = 0
                else:
                    name = f
                    ext = ''
                    if name.rfind('.') > 0:  # Has ext and not .something.
                        (name, ext) = name.rsplit('.', 1)
                        ext = '.' + ext
                    if outfile not in history:
                        # Attempt to see if the file aleady ends in a number in
                        # _### format.
                        if name.rfind('_') != -1:
                            (tname, tver) = name.rsplit('_', 1)
                            try:
                                version = int(tver)
                                name = tname
                            except ValueError:
                                version = 1
                        else:
                            version = 1
                    else:
                        version = history[outfile]

                    # Put the versioned filename back together.
                    fn = name + '_' + str(version) + ext
                    outfile = os.path.join(outdir, fn)

                    # Unlikely, but just in case of bizarre conflicts.
                    while os.path.exists(outfile):
                        version += 1
                        fn = name + '_' + str(version) + ext
                        outfile = os.path.join(outdir, fn)

                history[outfile] = version + 1

                print "in: %s, out: %s" % (fullpath, outfile)

                # Actually copy/move the file.
                if not os.path.exists(outdir):
                    util.create_directory(outdir)
                if self.op_type == MOVE:
                    shutil.move(fullpath, outfile)
                else:
                    shutil.copy2(fullpath, outfile)

            if not self.recurse:
                break
Exemple #16
0
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

import numpy as np
import tensorflow as tf
# import moviepy.editor as mpy
import time, os, traceback, multiprocessing, portalocker, sys

import envwrap
import util
import valuerl, worldmodel
from config import config

MODEL_NAME = config["name"]
LOG_PATH = util.create_directory("output/" + config["env"] + "/" + MODEL_NAME + "/" + config["log_path"]) + "/" + MODEL_NAME
LOAD_PATH =    util.create_directory("output/" + config["env"] + "/" + MODEL_NAME + "/" + config["save_model_path"])
OBS_DIM =   np.prod(config["obs_dims"])
HIDDEN_DIM = config["hidden_dim"]
ACTION_DIM = config["action_dim"]
MAX_FRAMES = config["max_frames"]
REWARD_SCALE = config["reward_scale"]
DISCOUNT = config["discount"]
ALGO = config["policy_config"]["algo"]
AGENT_BATCH_SIZE = config["agent_config"]["batch_size"]
EVALUATOR_BATCH_SIZE = config["evaluator_config"]["batch_size"]
RELOAD_EVERY_N = config["agent_config"]["reload_every_n"]
FRAMES_BEFORE_LEARNING = config["policy_config"]["frames_before_learning"]
FRAMES_PER_UPDATE = config["policy_config"]["frames_per_update"]
LEARNER_EPOCH_N = config["policy_config"]["epoch_n"]
SYNC_UPDATES = config["policy_config"]["frames_per_update"] >= 0
Exemple #17
0
def log_config():
  HPS_PATH = util.create_directory("output/" + config["env"]["name"] + "/" + config["name"] + "/" + config["log_path"]) + "/hps.json"
  print("ROOT GPU: " + str(args.root_gpu) + "\n" + str(cstr))
  with open(HPS_PATH, "w") as f:
    f.write("ROOT GPU: " + str(args.root_gpu) + "\n" + str(cstr))