예제 #1
0
    def visualize_grid(self):

        # get list of houses and batteries, and visualize the grid
        list_houses = optimalorder
        list_batteries = self.batteries
        visualize_grid = Visualize(list_houses, list_batteries)
        visualize_grid.visualize_all(list_houses, list_batteries,
                                     optimallength)

        # print the best and worst score, standard deviation and mean
        print("best: ", min(lengths))
        print("worst: ", max(lengths))
        print("sd: ", np.std(lengths))
        print("mean: ", np.mean(lengths))

        # plot a histogram with the score (x-axis) and count (y-axis)
        unique_lengths = set(lengths)
        count_unique = len(unique_lengths)
        bins = np.linspace(math.ceil(min(lengths)), math.floor(max(lengths)),
                           count_unique)
        plt.xlim([min(lengths), max(lengths)])
        plt.hist(lengths, bins=bins, alpha=1)
        plt.title('Shuffle algorithm (iteraties: 500 000)')
        plt.xlabel('Score')
        plt.ylabel('Aantal per score')
        plt.show()
class Experiment:
    def __init__(self, world_dim, nagents, agent_xy, goal_xy):
        self.world = world(world_dim[1], world_dim[0])
        self.vis = Visualize(self.world)
        self.list_agents = []
        for index in range(nagents):
            self.list_agents.append(
                self.world.new_agent(agent_xy[index][1], agent_xy[index][0],
                                     goal_xy[index][1], goal_xy[index][0]))
        self.init_vis()

    def init_vis(self):
        self.vis.draw_world()
        self.vis.draw_agents()
        self.vis.canvas.pack()
        self.vis.canvas.update()

    def run_random(self, ts, T):
        nsteps = int(T / ts)
        nagents = len(self.list_agents)
        for step in range(nsteps):
            random.shuffle(self.list_agents)
            for agent in self.list_agents:
                agent.move(random.choice(agent.get_move_actions()))
                self.vis.canvas.update()
                self.vis.canvas.after(int((ts * 900) / nagents))
            print '\n'
            for agent in self.list_agents:
                print agent
            print '\n\n'
            self.vis.canvas.after(int(ts * 100))
예제 #3
0
    def visualize_grid(self):

        # get list of houses and batteries, and visualize the grid
        list_houses = self.houses
        list_batteries = self.batteries
        visualize_grid = Visualize(list_houses, list_batteries)
        visualize_grid.visualize_all(list_houses, list_batteries)
예제 #4
0
    def run_one_game(player_1: ANET, player_2: ANET, visualize: bool) -> int:
        """
        Runs excatly one game with the provided players.
        """
        world = SimulatedWorldFactory.get_simulated_world()
        current_state = world.reset()

        if visualize and parameters.GAME_TYPE == Game.Hex:
            Visualize.initialize_board(current_state)

        players = (player_1, player_2)
        i = 0
        winner = 0
        while not world.is_final_state():
            legal_actions = world.get_legal_actions()

            action = players[i].choose_greedy(current_state, legal_actions)
            current_state, winner = world.step(action)

            # Alternating players
            i = (i + 1) % 2

            if visualize and parameters.GAME_TYPE == Game.Hex:
                Visualize.draw_board(current_state, winner, str(player_1), str(player_2))

        print(f'Player {winner} won the game.')
        return winner
예제 #5
0
파일: main.py 프로젝트: flpolyproject/ATNE
def start_simulation(Env, sim_number, gui=False, _seed=None):

    if gui:
        traci.start(["sumo-gui", "-c", Settings.sumo_config])
    else:
        traci.start(["sumo", "-c", Settings.sumo_config])

    env = Env(sim_number=sim_number, _seed=_seed)
    my_vis = Visualize(env)

    while True:

        traci.simulationStep()
        traci.addStepListener(env)
        if gui:
            my_vis.show()  #this is for visualization of the path
        if env.break_condition:
            break

    print("veh succesffuly arrived ", env.sim_env.success_veh)
    traci.close()

    env.post_process.to_csv()

    return env.sim_env
예제 #6
0
def start_simulation(Env, sim_number, gui=False, _seed=None, setting_obj=None, dir_name=None, main_env=None, new_players=False):

	#testpath = "./../map/london-seg4/data/london-seg4.sumocfg")

	if gui:
		traci.start(["sumo-gui", "-c", Settings.sumo_config])
	else:
		traci.start(["sumo", "-c", Settings.sumo_config])


	
	env = Env(sim_number=sim_number, _seed=_seed, setting_obj=setting_obj, main_env=main_env, new_players=new_players)
	my_vis = Visualize(env)

	while True:

		traci.simulationStep()
		traci.addStepListener(env)
		if gui:
			my_vis.show()  #this is for visualization of the path
		if env.break_condition:
			break


	#env.reward_to_json(os.path.join(dir_name, f"{sim_number}"))

	print("veh succesffuly arrived ", env.sim_env.success_veh)
	traci.close()



	#env.post_process.to_csv()
	return env.post_process, env
예제 #7
0
파일: lmnn.py 프로젝트: hkrsnd/ml-pqgram
 def __init__(self, problem, k=3, target_k=3, lr=1e-2, wd=1e-3, epoch=500, b=50, margin=5.0, push_margin=5.0):
     """
     problem: the Problem class
     k: the number of neighbors of k-nearest neighbor classification
     target_k: the number of neighbor for the Large Margin Nrearest Neighbor learning
     lr: learning rate for gradient descent using Adam optimizer
     wd: the weight decay parameter for gradient descent
     epoch: the number of epochs
     b: interval for executing test
     margin: pull margin for positive pairs
     push_margin: push margin for negative pairs
     """
     self.problem = problem
     self.X_train = problem.X_train
     self.y_train = problem.y_train
     self.X_test = problem.X_test
     self.y_test = problem.y_test
     self.k = k
     self.target_k = target_k
     self.lr = lr
     self.wd = wd
     self.epoch = epoch
     self.b = b
     self.margin = torch.tensor([margin], dtype=torch.float64)
     self.push_margin = torch.tensor([push_margin], dtype=torch.float64)
     self.target_dic = {}
     self.imposter_dic = {}
     self.set_optimizer(self.problem.get_params())
     self.create_pairs()
     self.vis = Visualize(problem)
예제 #8
0
    def __init__(self):
        self.logfile = None
        self.gettrace = getattr(sys, 'gettrace', None)
        self.original_stdout = sys.stdout
        self.timestr = time.strftime("%Y%m%d-%H%M%S")
        self.log_file()

        print(__doc__)

        self.filehandler = Filehandler()
        self.ds = KDDCup1999()
        self.visualize = Visualize()
        self.random_state = 20
        self.X = None
        self.y = None
        self.full = None
        self.ac_count = {}
        self.scores = OrderedDict()
        self.scale_cols = ['duration', 'src_bytes', 'dst_bytes', 'land', 'wrong_fragment', 'urgent', 'hot',
                           'num_failed_logins', 'logged_in', 'num_compromised', 'root_shell', 'su_attempted',
                           'num_root', 'num_file_creations', 'num_shells', 'num_access_files', 'is_guest_login',
                           'count', 'srv_count', 'serror_rate', 'rerror_rate', 'diff_srv_rate', 'srv_diff_host_rate',
                           'dst_host_count', 'dst_host_srv_count', 'dst_host_diff_srv_rate',
                           'dst_host_same_src_port_rate', 'dst_host_srv_diff_host_rate']

        with timer('\nLoading dataset'):
            self.load_data()
            self.set_attack_category_count()
        with timer('\nEncoding categoricals'):
            le = preprocessing.LabelEncoder()
            self.full['protocol_type'] = le.fit_transform(self.full['protocol_type'])
            self.full['service'] = le.fit_transform(self.full['service'])
            self.full['flag'] = le.fit_transform(self.full['flag'])
        with timer('\nSetting X'):
            self.set_X()
            self.ds.shape()
        with timer('\nDistribution Before Scaling'):
            self.dist_before_scaling()
        with timer('\nScaling'):
            for scaler in (StandardScaler(),
                           Normalizer(),
                           MinMaxScaler(feature_range=(0, 1)),
                           Binarizer(threshold=0.0),
                           RobustScaler(quantile_range=(25, 75)),
                           PowerTransformer(method='yeo-johnson'),
                           QuantileTransformer(output_distribution='normal')):
                title, res_x = self.scale(scaler)

                label = 'attack_category'
                self.set_y(label)
                self.model_and_score(scaler, res_x, title, label)

                label = 'target'
                self.set_y(label)
                self.model_and_score(scaler, res_x, title, label)

        self.log_file()
        print('Finished')
    def run(self):

        trip = Trip(3, 3, 3, True)
        environment = SimpleEnvironment(trip)
        visualize = Visualize(environment.NumberOfStops,
                              environment.MaxTripTime, environment.MaxBattery,
                              environment.ExpectedTripTime)
        drivingRewards, chargingRewards = environment.GetRewards()
        visualize.VisualizeRewards(drivingRewards, chargingRewards)
예제 #10
0
 def __init__(self, world_dim, nagents, agent_xy, goal_xy):
     self.world = world(world_dim[1], world_dim[0])
     self.vis = Visualize(self.world)
     self.list_agents = []
     for index in range(nagents):
         self.list_agents.append(
             self.world.new_agent(agent_xy[index][1], agent_xy[index][0],
                                  goal_xy[index][1], goal_xy[index][0]))
     self.init_vis()
예제 #11
0
 def run(self):
     trip = Trip(10, 10, 5, True)
     environment = SimpleEnvironment(trip)
     visualize = Visualize(environment.NumberOfStops, environment.MaxTripTime, environment.MaxBattery, environment.ExpectedTripTime)
     optimizer = Optimizer()
     values = optimizer.ComputeExpectedValue(environment)
     policy = optimizer.GetOptimalPolicy(values, environment)
     visualize.VisualizeValueTable(values)
     visualize.VisualizePolicy(policy)
예제 #12
0
    def __init__(self):
        self.logfile = False
        self.gettrace = getattr(sys, 'gettrace', None)
        self.original_stdout = sys.stdout
        self.timestr = time.strftime("%Y%m%d-%H%M%S")
        self.log_file()

        print(__doc__)

        self.filehandler = Filehandler()
        self.visualize = Visualize()
        self.ds = KDDCup1999()

        with timer('\nLoading dataset'):
            self.ds.dataset = self.filehandler.read_csv(
                self.ds.config['path'], self.ds.config['file'])
            self.ds.set_columns()
        with timer('\nTransforming dataset'):
            self.ds.transform()
        with timer('\nInitial dataset discovery'):
            self.ds.shape()
            self.ds.show_duplicates(self.ds.config['level_01'])
            self.ds.drop_duplicates()
            self.show_zeros()
            self.ds.drop_outliers()
            self.ds.shape()
            self.ds.discovery()
        with timer('\nSetting target'):
            self.ds.set_target()
        with timer('\nEvaluating sparse features'):
            self.ds.evaluate_sparse_features(engineer=False)
        with timer('\nVisualising pairplot for selected columns'):
            self.visualize.pairplot(self.ds.dataset,
                                    self.ds.config['pairplot_cols'],
                                    self.ds.config['pairplot_target'])
        with timer('\nDropping columns'):
            self.ds.drop_cols(self.ds.config['drop_cols_01'])
        with timer('\nEvaluating correlation'):
            self.visualize.correlation_heatmap(
                self.ds.dataset,
                title='Correlation Heatmap Before Column Drop')
            self.ds.drop_highly_correlated()
            self.visualize.correlation_heatmap(
                self.ds.dataset, title='Correlation Heatmap After Column Drop')
        with timer('\nPersisting transformed dataset and target'):
            self.filehandler.write_csv(self.ds.config['path'],
                                       self.ds.config['file'] + '_processed',
                                       self.ds.dataset)
            self.filehandler.write_csv(self.ds.config['path'],
                                       self.ds.config['file'] + '_target',
                                       self.ds.target)
            self.ds.shape()

        self.log_file()
        print('Finished')
예제 #13
0
def main():

    # establish the path to the file
    essay = os.path.join("C:\Users\Jim\Downloads\knighthacks\data\data", "profile_data.txt")
    if not essay:
        sys.exit("Could not find the path")
    # instantiate the object
    analyzer = Analyzer(essay)
    visuals = Visualize(analyzer.data_structures())
    
    visuals.jobVSposts()
예제 #14
0
def convert_world_to_cam(data_3d, center, focus):

    data_2d = np.zeros((data_3d.shape[0], data_3d.shape[1], 2))

    for i in tqdm(range(data_3d.shape[0])):
        viz = Visualize(1)
        viz.place_camera_circular(0, 2000, data_3d[0, 0, :])

        data_2d[i, :, :] = viz.get_projection(data_3d[i, :, :], 0, focus,
                                              center)

    return data_2d
예제 #15
0
 def __init__(self):
     self.__board_type = parameters.BOARD_TYPE
     if parameters.BOARD_TYPE == Shape.Diamond:
         self.__game_board = Diamond(parameters.BOARD_TYPE, parameters.SIZE, parameters.HOLES)
         Visualize.initialize_board(self.__game_board.get_board(), self.__game_board._edges, self.__board_type)
     else:
         self.__game_board = Triangle(parameters.BOARD_TYPE, parameters.SIZE, parameters.HOLES)
         Visualize.initialize_board(self.__game_board.get_board(), self.__game_board._edges, self.__board_type)
     self.__peg_history = []
     self.__memoized_legal_actions = {}
     print('Initial board:')
     print(self.__game_board)
def bubble_sort(d):
    vis = Visualize(d)
    vis.bubble_sort()  # Starting frame

    length = len(d)
    for p in range(length):
        for i in range(length - p - 1):
            vis.bubble_sort(highlight_0=i, highlight_1=i + 1)
            if d[i] > d[i + 1]:
                d[i], d[i + 1] = d[i + 1], d[i]

    vis.bubble_sort()  # Finished frame
    vis.create_gif('bubble_sort')
예제 #17
0
    def __init__(self):
        self.pivot_distance = 8
        self.n_nearest_neighbors = 50
        self.p_norm = 2
        self.reg_box = RegBox()

        #submap_topic = rospy.get_param("~submap_constraint_topic")
        map_topic = '/s2loc/map'
        self.map_pub = rospy.Publisher(map_topic, PointCloud2, queue_size=10)
        self.visualizer = Visualize()
        self.submap_seq = 0
        self.compute_poses_in_LiDAR = False
        self.refine_with_ICP = False
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--player", help="player 1-6", type=int, default=1)
    args = parser.parse_args()

    config = Config(CONFIG_FILE, args.player)

    comms_config = {
        'rx_ip': config.client_ip,
        'rx_port': config.client_port,
        'tx_ip': config.sim_ip,
        'tx_port': config.sim_port
    }
    print("Rx at {}:{}".format(comms_config["rx_ip"], comms_config["rx_port"]))
    print("Tx to {}:{}".format(comms_config["tx_ip"], comms_config["tx_port"]))

    commands_mutex = Lock()

    # Launch comms in background thread
    comms = CommsThread(comms_config, False, commands_mutex)
    comms.daemon = True
    comms.start()

    # Launch perception, motion planning, and controls in main thread
    sweep_builder = SweepBuilder()
    perception = Perception(config)
    planning = Planning(config)
    controls = Controls(config)
    visualize = Visualize(config)

    try:
        while True:
            vehicle_state = sweep_builder.run(comms.get_vehicle_states())
            if vehicle_state is not None:
                t1 = time.time()

                world_state = perception.run(vehicle_state)
                plan = planning.run(world_state)
                vehicle_commands = controls.run(plan)

                t2 = time.time()
                freq = 1 / (t2 - t1)
                print(f"Running at {freq} Hz")

                vehicle_commands['draw'] = visualize.run(world_state, plan)
                with commands_mutex:
                    # hold the lock to prevent the Comms thread from
                    # sending the commands dict while we're modifying it
                    comms.vehicle_commands.update(vehicle_commands)
    except KeyboardInterrupt:
        pass
예제 #19
0
    def __init__(self):
        self.logfile = False
        self.gettrace = getattr(sys, 'gettrace', None)
        self.original_stdout = sys.stdout
        self.timestr = time.strftime("%Y%m%d-%H%M%S")
        self.log_file()

        print(__doc__)

        self.filehandler = Filehandler()
        self.visualize = Visualize()
        self.ds = KDDCup1999()
        self.X = None
        self.y = None
        self.full = None
        self.random_state = 20
        self.num_features = 15
        self.scale_cols = ['duration', 'src_bytes', 'dst_bytes', 'land', 'wrong_fragment', 'urgent', 'hot',
                           'num_failed_logins', 'logged_in', 'num_compromised', 'root_shell', 'su_attempted',
                           'num_root', 'num_file_creations', 'num_shells', 'num_access_files', 'is_guest_login',
                           'count', 'srv_count', 'serror_rate', 'rerror_rate', 'diff_srv_rate', 'srv_diff_host_rate',
                           'dst_host_count', 'dst_host_srv_count', 'dst_host_diff_srv_rate',
                           'dst_host_same_src_port_rate', 'dst_host_srv_diff_host_rate']

        with timer('\nLoading dataset'):
            self.load_data()
            self.encode_scale()
            self.set_X()
        with timer('\nFeature selection'):
            for selector in (Original(),
                             UnivariateSelector(),
                             RecursiveSelector(),
                             PCASelector(),
                             #KernelPCASelector(),
                             ExtraTreesSelector(),
                             RandomForestSelector()):
                for label in ('attack_category', 'target'):
                    self.set_y(label)
                    with timer('\nFitting selector ' + selector.__class__.__name__):
                        selector.fit_model(self.X, self.y)
                        x = selector.get_top_features(self.X, label)
                    with timer('\nXGBoost scoring of features selected by ' + selector.__class__.__name__):
                        self.score_with_xgboost(x, self.y, selector.title)

        self.log_file()
        print('Finished')
예제 #20
0
    def run(self) -> None:
        """
        Runs all episodes with pivotal parameters.
        Visualizes one round at the end.
        """
        self.__ANET.save('0.h5')  # Save the untrained ANET prior to episode 1
        for episode in range(1, self.__episodes + 1):
            print('\nEpisode:', episode)
            self.__run_one_episode()

            if episode % self.__caching_interval == 0:
                # Save ANET for later use in tournament play.
                self.__ANET.save(str(episode) + '.h5')

        Visualize.plot_loss(self.__ANET.loss_history)
        Visualize.plot_epsilon(self.__ANET.epsilon_history)

        if parameters.VISUALIZE_GAMES:
            print('Showing one episode with the greedy strategy.')
            ReinforcementLearner.run_one_game(self.__ANET, self.__ANET, True)
예제 #21
0
 def random(cls, grid_length: int, sensor_density: int, seed: int, filename: str):
     '''randomly generate some sensors in a grid
     '''
     print('grid_length', grid_length)
     print('sensor density', sensor_density)
     print('seed', seed)
     print('filename', filename)
     random.seed(seed)
     all_sensors = list(range(grid_length * grid_length))
     subset_sensors = random.sample(all_sensors, sensor_density)
     Visualize.sensors(subset_sensors, grid_length, 1)
     subset_sensors = GenerateSensors.relocate_sensors(subset_sensors, grid_length, sensor_density)
     Visualize.sensors(subset_sensors, grid_length, 2)
     # subset_sensors = GenerateSensors.relocate_sensors(subset_sensors, grid_length, sensor_density)
     # Visualize.sensors(subset_sensors, grid_length, 3)
     # subset_sensors = GenerateSensors.relocate_sensors(subset_sensors, grid_length, sensor_density)
     # Visualize.sensors(subset_sensors, grid_length, 4)
     # subset_sensors = GenerateSensors.relocate_sensors(subset_sensors, grid_length, sensor_density)
     # Visualize.sensors(subset_sensors, grid_length, 5)
     subset_sensors.sort()
     GenerateSensors.save(subset_sensors, grid_length, filename)
예제 #22
0
    def cluster_documents(self,
                          docs,
                          dimensions_reduction,
                          clusters,
                          normalizer=TFIDF):
        """
    Clusters the documents based on text similarity using LSA

    :param docs: List of documents
    :param dimensions_reduction: Dimensions to reduce to
    :param clusters: Number of clusters to cluster into
    :param normalizer: Frequency normalizer class
    :return: -
    """
        # calculate the frequency matrix
        term_matrix = FrequencyMatrix(docs, filters=self.filterz)
        # Set-up Latent Semantic Analysis class
        lsa = LSA(term_matrix, normalizer=normalizer)
        # Decompose term matrix into SVD
        svd = lsa.decompose()
        # Reduce the dimensions of the SVD
        rsvd = lsa.reduce_svd(dimensions_reduction, svd)
        # Cluster the data
        centroids, doc_clusters, labels = lsa.cluster(clusters,
                                                      rsvd,
                                                      dim=dimensions_reduction)
        # Visualize it
        vis = Visualize()
        vis.plot_documents(rsvd, labels, doc_clusters, len(centroids))
        vis.show()
예제 #23
0
def evaluate(dataset, session, operation, inputs_placeholder, labels_placeholder, keep_prob_placeholder,
             is_training_placeholder, name,
             summary_writer, learning_step, visualize_correct=0, visualize_incorrect=0):
    steps_per_epoch = len(dataset['examples']) // 50
    number_of_examples = steps_per_epoch * 50

    visualize = Visualize()
    correct_visualized_counter = 0
    incorrect_visualized_counter = 0

    correct_num = 0
    for step in range(steps_per_epoch):
        batch = get_batch(dataset, inputs_placeholder, labels_placeholder, keep_prob_placeholder, 1,
                          is_training_placeholder, False)
        corrects_in_batch, corrects_vector, predictions = session.run(operation, feed_dict=batch)
        correct_num += corrects_in_batch

        # visualize correct and incorrect recognitions
        if incorrect_visualized_counter < visualize_incorrect or correct_visualized_counter < visualize_correct:
            for i in range(len(batch[inputs_placeholder])):
                true_label = np.argmax(batch[labels_placeholder][i], axis=1)
                if correct_visualized_counter < visualize_correct and corrects_vector[i] == True:
                    visualize.visualize_with_correct(batch[inputs_placeholder][i], predictions[i], true_label,
                                                     name + "_correct")
                    correct_visualized_counter += 1
                elif incorrect_visualized_counter < visualize_incorrect and corrects_vector[i] == False:
                    visualize.visualize_with_correct(batch[inputs_placeholder][i], predictions[i], true_label,
                                                     name + "_incorrect")
                    incorrect_visualized_counter += 1

    precision = correct_num / number_of_examples
    summary = tf.Summary()
    summary.value.add(tag='Accuracy_' + name, simple_value=precision)
    summary_writer.add_summary(summary, learning_step)
    print("Accuracy %.3f" % precision)
예제 #24
0
    def on_click_visualize_action(self):
        """ Triggered by Visualize button - toolbar """
        #self.window = SecondaryWindow()
        path, _ = QFileDialog.getSaveFileName(
            self, "Save file", "", "svg documents (*.svg);All files (*.*)")
        link = visualize(self.editor.toPlainText())

        if not path:
            # If dialog is cancelled, will return ''
            return

        try:
            with open(path, 'w') as f:
                f.write(link)

        except Exception as e:
            self.dialog_critical(str(e))

        else:
            self.path = path
            self.update_title()

        self.dialog = Visualize(path)
        self.dialog.show()
예제 #25
0
import matplotlib.pyplot as plt
import numpy as np
from importdata import Importdata
from visualize import Visualize

d_test, d_train = Importdata.load_csv()
l_train = d_train['income']
l_test = d_test['income']

# Labels
classes = [l_train[l_train == val].shape[0] for val in l_train.unique()]
Visualize.piechart(classes,
        labels=[str(classes[1]) + ' people w/ 50,000$+', str(classes[0]) + ' people w/ 50,000-'],
        title='A few people save more than 50,000$')

# Age & sex
wealthy_age = d_train[d_train['income'] > 0]['age'].values.tolist()
bins=np.linspace(-1, 90, 91)
plt.hist(d_train['age'], bins=bins)
plt.hist(saving_age, bins=bins)
plt.title('Age distribution')
plt.show()

nMen = d_train[d_train['sex'] == 'Male'].shape[0]
nWom = d_train[d_train['sex'] == 'Female'].shape[0]
nMen_weal = d_train[(d_train['sex'] == 'Male') & (d_train['income'] > 0)].shape[0]
nWom_weal = d_train[(d_train['sex'] == 'Female') & (d_train['income'] > 0)].shape[0]
Visualize.doublepiechart([nMen, nWom], [nMen_weal, nWom_weal], title1='Men/women distribution', title2='Among the wealthiest')
plt.figure(1)
plt.subplot(211)
Visualize.piechart([nMen, nWom], ['Men', 'Women'], 'Men/women distribution', show=False)
예제 #26
0
class AnnMLPBinary:
    def __init__(self):
        os.environ[
            'TF_CPP_MIN_LOG_LEVEL'] = '2'  # Ignore low level instruction warnings
        tf.logging.set_verbosity(tf.logging.ERROR)  # Set tensorflow verbosity
        self.g = tf.Graph()
        self.tf_sess = tf.Session(
            config=tf.ConfigProto(log_device_placement=True), graph=self.g)

        self.logfile = None
        self.gettrace = getattr(sys, 'gettrace', None)
        self.original_stdout = sys.stdout
        self.timestr = time.strftime("%Y%m%d-%H%M%S")
        self.log_file()

        print(__doc__)

        self.random_state = 20
        self.filehandler = Filehandler()
        self.ds = KDDCup1999()
        self.visualize = Visualize()
        self.folder = 'viz'

        # Datasets
        self.X = None
        self.y = None
        self.X_train = None
        self.X_test = None
        self.y_train = None
        self.y_test = None
        self.n_features = None
        self.label_map_int_2_string = {
            0: 'good',
            1: 'bad',
            '0': 'good',
            '1': 'bad'
        }
        self.label_map_string_2_int = {
            'normal': 0,
            'dos': 1,
            'u2r': 1,
            'r2l': 1,
            'probe': 1
        }

        # K-fold validation
        self.splits = 5
        self.kfold = StratifiedKFold(n_splits=self.splits,
                                     shuffle=True,
                                     random_state=self.random_state)

        # Network parameters
        self.epochs = 20
        self.batch_size = 100
        self.verbose = 0

        # Scores
        self.metric_loss = []
        self.metric_acc = []
        self.metric_dr = []
        self.metric_far = []

        self.metric_val_loss = []
        self.metric_val_acc = []
        self.metric_val_dr = []
        self.metric_val_far = []

        with timer('\nPreparing dataset'):
            self.load_data()
            self.set_y()
            self.remove_target_from_X()
            self.n_features = self.X.shape[1]
            self.train_test_split()

        with timer('\nTraining & validating model with kfold'):
            self.g.as_default()  # Reset graph for tensorboard display
            K.clear_session()

            # Train model on K-1 and validate using remaining fold
            for train, val in self.kfold.split(self.X_train, self.y_train):
                #self.tensorboard = TensorBoard(log_dir='logs/tb/annmlpbinary_cv')
                self.model = self.get_model()

                self.history = self.model.fit(
                    self.X_train.iloc[train],
                    self.y_train.iloc[train],
                    validation_data=(self.X_train.iloc[val],
                                     self.y_train.iloc[val]),
                    epochs=self.epochs,
                    batch_size=self.batch_size,
                    verbose=self.verbose)
                #callbacks=[self.tensorboard])

                self.metric_loss.append(self.history.history['loss'])
                self.metric_acc.append(self.history.history['acc'])
                self.metric_dr.append(self.history.history['dr'])
                self.metric_far.append(self.history.history['far'])
                self.metric_val_loss.append(self.history.history['val_loss'])
                self.metric_val_acc.append(self.history.history['val_acc'])
                self.metric_val_dr.append(self.history.history['val_dr'])
                self.metric_val_far.append(self.history.history['val_far'])

            print('\nTraining mean loss', np.mean(self.metric_loss))
            print('Training mean acc', np.mean(self.metric_acc))
            print('Training mean dr', np.mean(self.metric_dr))
            print('Training mean far', np.mean(self.metric_far))
            print('\nValidation mean loss', np.mean(self.metric_val_loss))
            print('Validation mean acc', np.mean(self.metric_val_acc))
            print('Validation mean dr', np.mean(self.metric_val_dr))
            print('Validation mean far', np.mean(self.metric_val_far))

        with timer('\nTesting model on unseen test set'):
            self.g.as_default()  # Reset graph for tensorboard display
            K.clear_session()

            self.tensorboard = TensorBoard(log_dir='logs/tb/annmlpbinary_test')
            self.model = self.get_model()

            # Train model on complete train set and validate with unseen test set
            self.history = self.model.fit(self.X_train,
                                          self.y_train,
                                          validation_data=(self.X_test,
                                                           self.y_test),
                                          epochs=self.epochs,
                                          batch_size=self.batch_size,
                                          verbose=self.verbose,
                                          callbacks=[self.tensorboard])

        with timer('\nVisualising results'):
            # Plot model
            plot_model(self.model, to_file='viz/annMLPBinary - model plot.png')

            # Get single class prediction (rather than multi class probability summing to 1)
            y_pred = self.model.predict_classes(self.X_test)

            print('Test loss', np.mean(self.history.history['loss']))
            print('Test acc', np.mean(self.history.history['acc']))
            print('Test dr', np.mean(self.history.history['dr']))
            print('Test far', np.mean(self.history.history['far']))

            # Remap to string class targets
            self.y_pred = self.map_target_to_label(y_pred)
            self.y_pred = self.y_pred.ravel()
            self.y_test = self.map_target_to_label(self.y_test)

            self.visualize.confusion_matrix(self.y_test, self.y_pred,
                                            self.__class__.__name__)

            epochs = range(1, len(self.history.history['loss']) + 1)

            # Plot loss
            fig, ax = plt.subplots(figsize=(15, 8))
            plt.style.use('ggplot')
            ax.xaxis.set_major_locator(MaxNLocator(integer=True))
            ax.tick_params(axis='both', which='major', labelsize=12)
            ax.plot(epochs,
                    np.mean(self.metric_loss, axis=0),
                    'g',
                    label='Training')
            ax.plot(epochs,
                    np.mean(self.metric_val_loss, axis=0),
                    'b',
                    label='Validation')
            ax.plot(epochs, self.history.history['loss'], 'r', label='Test')
            self.title = '{} - {}'.format(self.__class__.__name__, 'Loss')
            plt.title(self.title, fontsize=18)
            plt.xlabel('Epochs', fontsize=14)
            plt.ylabel('Loss', fontsize=14)
            plt.legend(loc=1, prop={'size': 14})
            plt.savefig(fname=self.fname(self.title), dpi=300, format='png')
            plt.show()

            # Plot accuracy
            plt.clf()
            fig, ax = plt.subplots(figsize=(15, 8))
            plt.style.use('ggplot')
            ax.xaxis.set_major_locator(MaxNLocator(integer=True))
            ax.tick_params(axis='both', which='major', labelsize=12)
            ax.plot(epochs,
                    np.mean(self.metric_acc, axis=0),
                    'g',
                    label='Training')
            ax.plot(epochs,
                    np.mean(self.metric_val_acc, axis=0),
                    'b',
                    label='Validation')
            ax.plot(epochs, self.history.history['acc'], 'r', label='Test')
            self.title = '{} - {}'.format(self.__class__.__name__, 'Accuracy')
            plt.title(self.title, fontsize=18)
            plt.xlabel('Epochs', fontsize=14)
            plt.ylabel('Accuracy', fontsize=14)
            plt.legend(loc=4, prop={'size': 14})
            plt.savefig(fname=self.fname(self.title), dpi=300, format='png')
            plt.show()

            # Plot detection rate
            plt.clf()
            fig, ax = plt.subplots(figsize=(15, 8))
            plt.style.use('ggplot')
            ax.xaxis.set_major_locator(MaxNLocator(integer=True))
            ax.tick_params(axis='both', which='major', labelsize=12)
            ax.plot(epochs,
                    np.mean(self.metric_dr, axis=0),
                    'g',
                    label='Training')
            ax.plot(epochs,
                    np.mean(self.metric_val_dr, axis=0),
                    'b',
                    label='Validation')
            ax.plot(epochs, self.history.history['dr'], 'r', label='Test')
            self.title = '{} - {}'.format(self.__class__.__name__,
                                          'Detection Rate')
            plt.title(self.title, fontsize=18)
            plt.xlabel('Epochs', fontsize=14)
            plt.ylabel('Detection Rate', fontsize=14)
            plt.legend(loc=4, prop={'size': 14})
            plt.savefig(fname=self.fname(self.title), dpi=300, format='png')
            plt.show()

            # Plot false alarm rate
            plt.clf()
            fig, ax = plt.subplots(figsize=(15, 8))
            plt.style.use('ggplot')
            ax.xaxis.set_major_locator(MaxNLocator(integer=True))
            ax.tick_params(axis='both', which='major', labelsize=12)
            ax.plot(epochs,
                    np.mean(self.metric_far, axis=0),
                    'g',
                    label='Training')
            ax.plot(epochs,
                    np.mean(self.metric_val_far, axis=0),
                    'b',
                    label='Validation')
            ax.plot(epochs, self.history.history['far'], 'r', label='Test')
            self.title = '{} - {}'.format(self.__class__.__name__,
                                          'False Alarm Rate')
            plt.title(self.title, fontsize=18)
            plt.xlabel('Epochs', fontsize=14)
            plt.ylabel('False Alarm Rate', fontsize=14)
            plt.legend(loc=1, prop={'size': 14})
            plt.savefig(fname=self.fname(self.title), dpi=300, format='png')
            plt.show()

        self.log_file()
        print('Finished')

    @staticmethod
    def dr(y_true, y_pred):
        y_pred_pos = K.round(K.clip(y_pred, 0, 1))
        y_pred_neg = 1 - y_pred_pos
        y_pos = K.round(K.clip(y_true, 0, 1))
        tp = K.sum(y_pos * y_pred_pos)
        fn = K.sum(y_pos * y_pred_neg)
        return tp / (tp + fn + K.epsilon())

    @staticmethod
    def far(y_true, y_pred):
        y_pred_pos = K.round(K.clip(y_pred, 0, 1))
        y_pred_neg = 1 - y_pred_pos
        y_pos = K.round(K.clip(y_true, 0, 1))
        y_neg = 1 - y_pos
        tn = K.sum(y_neg * y_pred_neg)
        fp = K.sum(y_neg * y_pred_pos)
        return fp / (tn + fp + K.epsilon())

    def get_model(self):
        model = models.Sequential()
        model.add(
            layers.Dense(25,
                         activation='relu',
                         input_shape=(self.n_features, )))
        model.add(layers.Dropout(0.08))
        model.add(layers.Dense(25, activation='relu'))
        model.add(layers.Dropout(0.08))
        model.add(layers.Dense(25, activation='relu'))
        model.add(layers.Dropout(0.08))
        model.add(layers.Dense(25, activation='relu'))
        model.add(layers.Dropout(0.08))
        model.add(layers.Dense(1, activation='sigmoid'))
        model.compile(optimizer=optimizers.RMSprop(lr=0.0023),
                      loss='binary_crossentropy',
                      metrics=['accuracy', self.dr, self.far])
        return model

    def log_file(self):
        if self.gettrace is None:
            pass
        elif self.gettrace():
            pass
        else:
            if self.logfile:
                sys.stdout = self.original_stdout
                self.logfile.close()
                self.logfile = False
            else:
                # Redirect stdout to file for logging if not in debug mode
                self.logfile = open(
                    'logs/{}_{}_stdout.txt'.format(self.__class__.__name__,
                                                   self.timestr), 'w')
                sys.stdout = self.logfile

    def load_data(self):
        self.X = self.filehandler.read_csv(
            self.ds.config['path'],
            self.ds.config['file'] + '_Tensor2d_type_2')
        print('\tRow count:\t', '{}'.format(self.X.shape[0]))
        print('\tColumn count:\t', '{}'.format(self.X.shape[1]))

    def set_y(self):
        self.y = self.X['attack_category']
        self.y = self.y.map(self.label_map_string_2_int)

    def remove_target_from_X(self):
        self.X.drop('attack_category', axis=1, inplace=True)

    def train_test_split(self):
        self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
            self.X, self.y, test_size=0.30, random_state=self.random_state)

    def map_target_to_label(self, t):
        return np.vectorize(self.label_map_int_2_string.get)(t)

    def fname(self, title):
        return '{}/{}.png'.format(self.folder, title)
예제 #27
0
    def __init__(self):
        os.environ[
            'TF_CPP_MIN_LOG_LEVEL'] = '2'  # Ignore low level instruction warnings
        tf.logging.set_verbosity(tf.logging.ERROR)  # Set tensorflow verbosity
        self.g = tf.Graph()
        self.tf_sess = tf.Session(
            config=tf.ConfigProto(log_device_placement=True), graph=self.g)

        self.logfile = None
        self.gettrace = getattr(sys, 'gettrace', None)
        self.original_stdout = sys.stdout
        self.timestr = time.strftime("%Y%m%d-%H%M%S")
        self.log_file()

        print(__doc__)

        self.random_state = 20
        self.filehandler = Filehandler()
        self.ds = KDDCup1999()
        self.visualize = Visualize()
        self.folder = 'viz'

        # Datasets
        self.X = None
        self.y = None
        self.X_train = None
        self.X_test = None
        self.y_train = None
        self.y_test = None
        self.n_features = None
        self.label_map_int_2_string = {
            0: 'good',
            1: 'bad',
            '0': 'good',
            '1': 'bad'
        }
        self.label_map_string_2_int = {
            'normal': 0,
            'dos': 1,
            'u2r': 1,
            'r2l': 1,
            'probe': 1
        }

        # K-fold validation
        self.splits = 5
        self.kfold = StratifiedKFold(n_splits=self.splits,
                                     shuffle=True,
                                     random_state=self.random_state)

        # Network parameters
        self.epochs = 20
        self.batch_size = 100
        self.verbose = 0

        # Scores
        self.metric_loss = []
        self.metric_acc = []
        self.metric_dr = []
        self.metric_far = []

        self.metric_val_loss = []
        self.metric_val_acc = []
        self.metric_val_dr = []
        self.metric_val_far = []

        with timer('\nPreparing dataset'):
            self.load_data()
            self.set_y()
            self.remove_target_from_X()
            self.n_features = self.X.shape[1]
            self.train_test_split()

        with timer('\nTraining & validating model with kfold'):
            self.g.as_default()  # Reset graph for tensorboard display
            K.clear_session()

            # Train model on K-1 and validate using remaining fold
            for train, val in self.kfold.split(self.X_train, self.y_train):
                #self.tensorboard = TensorBoard(log_dir='logs/tb/annmlpbinary_cv')
                self.model = self.get_model()

                self.history = self.model.fit(
                    self.X_train.iloc[train],
                    self.y_train.iloc[train],
                    validation_data=(self.X_train.iloc[val],
                                     self.y_train.iloc[val]),
                    epochs=self.epochs,
                    batch_size=self.batch_size,
                    verbose=self.verbose)
                #callbacks=[self.tensorboard])

                self.metric_loss.append(self.history.history['loss'])
                self.metric_acc.append(self.history.history['acc'])
                self.metric_dr.append(self.history.history['dr'])
                self.metric_far.append(self.history.history['far'])
                self.metric_val_loss.append(self.history.history['val_loss'])
                self.metric_val_acc.append(self.history.history['val_acc'])
                self.metric_val_dr.append(self.history.history['val_dr'])
                self.metric_val_far.append(self.history.history['val_far'])

            print('\nTraining mean loss', np.mean(self.metric_loss))
            print('Training mean acc', np.mean(self.metric_acc))
            print('Training mean dr', np.mean(self.metric_dr))
            print('Training mean far', np.mean(self.metric_far))
            print('\nValidation mean loss', np.mean(self.metric_val_loss))
            print('Validation mean acc', np.mean(self.metric_val_acc))
            print('Validation mean dr', np.mean(self.metric_val_dr))
            print('Validation mean far', np.mean(self.metric_val_far))

        with timer('\nTesting model on unseen test set'):
            self.g.as_default()  # Reset graph for tensorboard display
            K.clear_session()

            self.tensorboard = TensorBoard(log_dir='logs/tb/annmlpbinary_test')
            self.model = self.get_model()

            # Train model on complete train set and validate with unseen test set
            self.history = self.model.fit(self.X_train,
                                          self.y_train,
                                          validation_data=(self.X_test,
                                                           self.y_test),
                                          epochs=self.epochs,
                                          batch_size=self.batch_size,
                                          verbose=self.verbose,
                                          callbacks=[self.tensorboard])

        with timer('\nVisualising results'):
            # Plot model
            plot_model(self.model, to_file='viz/annMLPBinary - model plot.png')

            # Get single class prediction (rather than multi class probability summing to 1)
            y_pred = self.model.predict_classes(self.X_test)

            print('Test loss', np.mean(self.history.history['loss']))
            print('Test acc', np.mean(self.history.history['acc']))
            print('Test dr', np.mean(self.history.history['dr']))
            print('Test far', np.mean(self.history.history['far']))

            # Remap to string class targets
            self.y_pred = self.map_target_to_label(y_pred)
            self.y_pred = self.y_pred.ravel()
            self.y_test = self.map_target_to_label(self.y_test)

            self.visualize.confusion_matrix(self.y_test, self.y_pred,
                                            self.__class__.__name__)

            epochs = range(1, len(self.history.history['loss']) + 1)

            # Plot loss
            fig, ax = plt.subplots(figsize=(15, 8))
            plt.style.use('ggplot')
            ax.xaxis.set_major_locator(MaxNLocator(integer=True))
            ax.tick_params(axis='both', which='major', labelsize=12)
            ax.plot(epochs,
                    np.mean(self.metric_loss, axis=0),
                    'g',
                    label='Training')
            ax.plot(epochs,
                    np.mean(self.metric_val_loss, axis=0),
                    'b',
                    label='Validation')
            ax.plot(epochs, self.history.history['loss'], 'r', label='Test')
            self.title = '{} - {}'.format(self.__class__.__name__, 'Loss')
            plt.title(self.title, fontsize=18)
            plt.xlabel('Epochs', fontsize=14)
            plt.ylabel('Loss', fontsize=14)
            plt.legend(loc=1, prop={'size': 14})
            plt.savefig(fname=self.fname(self.title), dpi=300, format='png')
            plt.show()

            # Plot accuracy
            plt.clf()
            fig, ax = plt.subplots(figsize=(15, 8))
            plt.style.use('ggplot')
            ax.xaxis.set_major_locator(MaxNLocator(integer=True))
            ax.tick_params(axis='both', which='major', labelsize=12)
            ax.plot(epochs,
                    np.mean(self.metric_acc, axis=0),
                    'g',
                    label='Training')
            ax.plot(epochs,
                    np.mean(self.metric_val_acc, axis=0),
                    'b',
                    label='Validation')
            ax.plot(epochs, self.history.history['acc'], 'r', label='Test')
            self.title = '{} - {}'.format(self.__class__.__name__, 'Accuracy')
            plt.title(self.title, fontsize=18)
            plt.xlabel('Epochs', fontsize=14)
            plt.ylabel('Accuracy', fontsize=14)
            plt.legend(loc=4, prop={'size': 14})
            plt.savefig(fname=self.fname(self.title), dpi=300, format='png')
            plt.show()

            # Plot detection rate
            plt.clf()
            fig, ax = plt.subplots(figsize=(15, 8))
            plt.style.use('ggplot')
            ax.xaxis.set_major_locator(MaxNLocator(integer=True))
            ax.tick_params(axis='both', which='major', labelsize=12)
            ax.plot(epochs,
                    np.mean(self.metric_dr, axis=0),
                    'g',
                    label='Training')
            ax.plot(epochs,
                    np.mean(self.metric_val_dr, axis=0),
                    'b',
                    label='Validation')
            ax.plot(epochs, self.history.history['dr'], 'r', label='Test')
            self.title = '{} - {}'.format(self.__class__.__name__,
                                          'Detection Rate')
            plt.title(self.title, fontsize=18)
            plt.xlabel('Epochs', fontsize=14)
            plt.ylabel('Detection Rate', fontsize=14)
            plt.legend(loc=4, prop={'size': 14})
            plt.savefig(fname=self.fname(self.title), dpi=300, format='png')
            plt.show()

            # Plot false alarm rate
            plt.clf()
            fig, ax = plt.subplots(figsize=(15, 8))
            plt.style.use('ggplot')
            ax.xaxis.set_major_locator(MaxNLocator(integer=True))
            ax.tick_params(axis='both', which='major', labelsize=12)
            ax.plot(epochs,
                    np.mean(self.metric_far, axis=0),
                    'g',
                    label='Training')
            ax.plot(epochs,
                    np.mean(self.metric_val_far, axis=0),
                    'b',
                    label='Validation')
            ax.plot(epochs, self.history.history['far'], 'r', label='Test')
            self.title = '{} - {}'.format(self.__class__.__name__,
                                          'False Alarm Rate')
            plt.title(self.title, fontsize=18)
            plt.xlabel('Epochs', fontsize=14)
            plt.ylabel('False Alarm Rate', fontsize=14)
            plt.legend(loc=1, prop={'size': 14})
            plt.savefig(fname=self.fname(self.title), dpi=300, format='png')
            plt.show()

        self.log_file()
        print('Finished')
 def __draw_board(self, action: Action) -> None:
     Visualize.draw_board(self.__board_type, self.__board, action.positions)
예제 #29
0
from __future__ import print_function

from joint_set import JointSet
import constants as jnt
from utils import parse_metadata, read_h5, read_npz
from conversion import convert_json_to_npz, convert_h5_directory_to_augmented, convert_h5_to_projected
from visualize import Visualize
from tqdm import tqdm
import numpy as np

if __name__ == "__main__":

    viz = Visualize(50)

    data = read_h5("annot.h5")

    viz.place_random_cameras(50, [3000, 3500], data['pose/3d-univ'][()][0,
                                                                        0, :])

    data_2d = np.zeros((0, 32, 2))
    data_3d = np.zeros((0, 32, 3))

    point_2d = viz.get_projection(data['pose/3d-univ'][()][0, :, :], 32,
                                  jnt.CAMERAS[0]['focal_length'],
                                  jnt.CAMERAS[0]['center'])

    #viz.plot_3d(data['pose/3d-univ'][()][0, :, :], True)

    #viz.plot_2d(point_2d)

    #convert_h5_directory_to_augmented("../H36M_H5_Annotations/**/**/*.h5", 15)
예제 #30
0
import scipy.misc

image = "image.jpg"


def to_label(label):
    text_label = ""
    for single_label in label:
        number = np.argmax(single_label)
        if number == 10:
            return text_label
        else:
            text_label += str(number)
    return text_label


with tf.Session() as sess:
    saver = tf.train.import_meta_graph('SVHN_recognition/checkpoints/SVHN/SVHN-30000.meta')
    saver.restore(sess, 'SVHN_recognition/checkpoints/SVHN/SVHN-30000')
    graph = tf.get_default_graph()
    inputs = graph.get_tensor_by_name("inputs:0")
    label = graph.get_tensor_by_name("inference/stack:0")
    position = graph.get_tensor_by_name("inference/fc_5/MatMul:0")

    input = scipy.misc.imresize(scipy.misc.imread(image), (128, 256))

    feed_dict = {inputs: [input]}
    label, position = sess.run([label, position], feed_dict)
    visualize = Visualize()
    visualize.visualize_inference(input, to_label(label[0]), position[0])
예제 #31
0
    def __init__(self):
        os.environ[
            'TF_CPP_MIN_LOG_LEVEL'] = '2'  # Ignore low level instruction warnings
        tf.logging.set_verbosity(tf.logging.ERROR)  # Set tensorflow verbosity

        # self.logfile = None
        # self.gettrace = getattr(sys, 'gettrace', None)
        # self.original_stdout = sys.stdout
        # self.timestr = time.strftime("%Y%m%d-%H%M%S")
        # self.log_file()

        print(__doc__)

        self.filehandler = Filehandler()
        self.ds = KDDCup1999()
        self.visualize = Visualize()
        self.full = None
        self.X = None
        self.y = None
        self.X_train = None
        self.X_test = None
        self.y_train = None
        self.y_test = None
        self.n_features = None
        self.random_state = 20
        self.label_multi = {
            0: 'normal',
            '0': 'normal',
            1: 'dos',
            '1': 'dos',
            2: 'u2r',
            '2': 'u2r',
            3: 'r2l',
            '3': 'r2l',
            4: 'probe',
            '4': 'probe'
        }
        self.label_binary = {0: 'good', '0': 'good', 1: 'bad', '1': 'bad'}

        with timer('\nLoading dataset'):
            self.load_data()

        with timer('\nSetting X and y'):
            self.set_X()
            self.n_features = self.X.shape[1]

        models = (RandomForestClf(), AnnSLPBinary(self.n_features),
                  AnnMLPBinary(self.n_features), AnnMLPMulti(self.n_features))
        classification_type = ('Binary', 'Multi')

        for m, ctype in itertools.product(models, classification_type):
            score = False
            if ctype == 'Binary' and m.binary_enabled:
                self.set_y_binary()
                score = True
            elif ctype == 'Multi' and m.multi_enabled:
                self.set_y_multi()
                score = True

            if not score:
                continue

            with timer('\nTraining and scoring {} - {} target'.format(
                    m.__class__.__name__, ctype)):
                m.base['model'] = m.get_model()
                #self.train_test_split()
                m.score(self.X, self.y, ctype)

            m.y_test[ctype] = pd.Series(m.y_test[ctype])
            m.y_pred[ctype] = pd.Series(m.y_pred[ctype])
            m.y_test[ctype] = m.y_test[ctype].astype(int)
            m.y_pred[ctype] = m.y_pred[ctype].astype(int)

            if ctype == 'Binary':
                m.y_test[ctype] = self.series_map_ac_binary_to_label(
                    m.y_test[ctype])
                m.y_pred[ctype] = self.series_map_ac_binary_to_label(
                    m.y_pred[ctype])
            else:
                m.y_test[ctype] = self.series_map_ac_multi_to_label(
                    m.y_test[ctype])
                m.y_pred[ctype] = self.series_map_ac_multi_to_label(
                    m.y_pred[ctype])

            title = '{} - {} - {} '.format('CM', m.__class__.__name__, ctype)
            self.visualize.confusion_matrix(m.y_test[ctype], m.y_pred[ctype],
                                            title)
            self.scores(m.y_test[ctype], m.y_pred[ctype])

    # Append the scores to a scores array. I could then do an np.mean(scores) to get the mean(average) from all the kfolds
    # save the epoch number and gfold number if possible as well, to get a per/epoch score

    # self.log_file()
        print('Finished')
	# elapsed4 = time.time() - start4

	error = np.linalg.norm(estimated_point - np.array([location[0], location[1]]))

	elapsed = time.time() - start

	print estimated_point, location, error, elapsed, len(shapes) #, elapsed4

	est_x, est_y = estimated_point[0], estimated_point[1]

	estimate_list.append((est_x, est_y))
	actual_list.append(location)
	# if len(shapes) == 4:
	error_list.append(error)

# print estimate_list
# print actual_list
# print error_list

print "mean error", np.mean(np.array(error_list))
print "max error", np.max(np.array(error_list))
print "min error", np.min(np.array(error_list))

#####################################

v = Visualize()
v.plot_estimate_list(estimate_list)
v.plot_actual_list(actual_list)
v.show()