def test_random_visualization_init(self):
        """`visualization.Visualization.init`: Randomized Validator.

        Tests the behavior of `Visualization.init` by feeding it randomly
        generated arguments.

        Raises:
            AssertionError: If `Visualization.init` needs debugging.

        """
        v = Visualization("Title", (1, 1))
        """Visualization: Plotter instance."""

        # Axes should be a list of length 1.
        self.assertIsInstance(v._ax, list)
        self.assertEqual(len(v._ax), 1)

        v.close()

        combinations = [(2, 3), (3, 2), (6, 1), (1, 6)]
        """list of (int, int): Possible subplot layouts."""

        for i in range(5):
            k = _np.random.randint(0, len(combinations))
            """int: Subplot layout index."""
            v = Visualization("Title", combinations[k])

            # Axes should be a list of length 1.
            self.assertIsInstance(v._ax, list)
            self.assertEqual(len(v._ax), 6)

            v.close()
Ejemplo n.º 2
0
def result():
    error = None
    script = ''
    div = {}
    if request.method == 'POST':
        result = request.form
        for key, val in result.items():
            if key == 'prod':
                prod = val
            elif key == 'timeWindow':
                time = val
                print(type(time))

        if time == '' or prod == '':  # checks to see if user filled everything in
            error = 'Please fill in all fields.'
            script = ' '
            div = {}
            cheapest_dates = ""
        elif time.isdigit() and int(time) >= 80 and int(
                time
        ) <= 3650:  #checks to see if input is a number and in the proper range
            if prod == 'Oil' and time != '':
                myinterpreter = Interpreter('oil_prices', int(time))
                myinterpreter.differencing()
                myinterpreter.create_acf()
                myinterpreter.get_p_and_q()
                myinterpreter.build_model()
                data = myinterpreter.get_data_source()
                visualization = Visualization(data)
                plot = visualization.get_graph2()
                cheapest_dates = visualization.find_lowest_prices()
                script, div = components(plot)
            elif prod == 'Electricity' and time != '':
                myinterpreter = Interpreter('avg_elec_price', int(time))
                myinterpreter.differencing()
                #myinterpreter.test_stationarity()
                myinterpreter.create_acf()
                myinterpreter.get_p_and_q()
                myinterpreter.build_model()
                data = myinterpreter.get_data_source()
                visualization = Visualization(data)
                cheapest_dates = visualization.find_lowest_prices()
                plot = visualization.get_graph2()
                script, div = components(plot)
        else:
            error = "Please type your specified time period as a number between 80 and 3650."
            script = ' '
            div = {}
            cheapest_dates = ""
    return render_template("result.html",
                           prod=prod,
                           time=time,
                           error=error,
                           script=script,
                           div=div,
                           cheapest_dates=cheapest_dates)
Ejemplo n.º 3
0
    def __init__(self, src_vocab, tgt_vocab,
                 max_len=300, hidden_size=300, n_layers=2, clip=5, n_epochs=30):
        # hyper-parameters
        self.max_len = max_len
        self.hidden_size = hidden_size
        self.n_layers = n_layers
        self.clip = clip
        self.n_epochs = n_epochs

        # vocab
        self.src_vocab = src_vocab
        self.tgt_vocab = tgt_vocab
        self.pad_idx = self.src_vocab.stoi[PAD]

        # prepare model
        self.encoder = Encoder(self.src_vocab, self.max_len, self.hidden_size, self.n_layers)
        self.decoder = Decoder(self.tgt_vocab, self.max_len, self.hidden_size * 2, self.n_layers)
        self.reverse_decoder = Decoder(self.tgt_vocab, self.max_len, self.hidden_size * 2, self.n_layers, reverse=True)
        self.model = Seq2SeqConcat(self.encoder, self.decoder, self.reverse_decoder, self.pad_idx)
        self.model.to(device)
        print(self.model)
        print("Total parameters:", sum([p.nelement() for p in self.model.parameters()]))

        # initialize weights
        for name, param in self.model.named_parameters():
            if "lstm.bias" in name:
                # set lstm forget gate to 1 (Jozefowicz et al., 2015)
                n = param.size(0)
                param.data[n//4:n//2].fill_(1.0)
            elif "lstm.weight" in name:
                nn.init.xavier_uniform_(param)

        # prepare loss function; don't calculate loss on PAD tokens
        self.criterion = nn.NLLLoss(ignore_index=self.pad_idx)

        # prepare optimizer and scheduler
        self.optimizer = Adam(self.model.parameters())
        self.scheduler = CyclicLR(self.optimizer, base_lr=0.00001, max_lr=0.00005,
                                  step_size_up=4000, step_size_down=4000,
                                  mode="triangular", gamma=1.0, cycle_momentum=False)

        # book keeping vars
        self.global_iter = 0
        self.global_numel = []
        self.global_loss = []
        self.global_acc = []

        # visualization
        self.vis_loss = Visualization(env_name="aivivn_tone", xlabel="step", ylabel="loss", title="loss (mean per 300 steps)")
        self.vis_acc = Visualization(env_name="aivivn_tone", xlabel="step", ylabel="acc", title="training accuracy (mean per 300 steps)")
    def test_invalid_args_visualization_empty_subplot(self):
        """`visualization.Visualization.empty_subplot`: Argument Validator.

        Tests the behavior of `Visualization.empty_subplot` with invalid
        argument counts and values.

        Raises:
            Exception: If at least one `Exception` raised is not of the expected
                kind.

        """
        v = Visualization("Title", (1, 1))
        """Visualization: Plotter instance."""

        with self.assertRaises(TypeError):
            # No arguments.
            v._empty_subplot()

        with self.assertRaises(TypeError):
            # Too many arguments.
            v._empty_subplot(0, 0)

        with self.assertRaises(TypeError):
            # Non-integer index `i`.
            v._empty_subplot(None)

        v.close()
Ejemplo n.º 5
0
def main():
    print(Welcome.WELCOME)
    #Init visualization
    v = Visualization()
    #Ask the data to load
    data_file = input(Data.Q_DATA_2_LOAD)
    #Save the data file to the preprocessing class
    pp = Preprocessing(data_file)

    #Ask if the user wants to see the raw data
    v.show_raw_information(pp.raw)

    #Ask if the user wants to see les raw data time
    pp.decrease_time_channels()

    #Ask and apply a notch filter if required
    if (pp.notch_filter() == Notch_filter.APPLY_NOTCH_FILTER):
        #If the user has applied the filter, we ask if wants to see the results
        v.plot_data(pp.raw)

    #Ask and apply a bandpass filter if required
    bandpass_filter(pp)

    #Ask and apply an ica filter if required
    if (pp.ica_filter(v) == ICA_filter.APPLY_ICA_FILTER):
        print("TODO: Crec que s'ha de treure")
    def test_edge_cases_visualization_plot_feature(self):
        """`visualization.Visualization.plot_feature`: Edge Case Validator.

        Tests the behavior of `Visualization.plot_feature` with edge cases.

        Raises:
            Exception: If at least one `Exception` raised is not of the expected
                kind.

        """
        X = _random_matrix(self.data_shape)
        """np.matrix: Random-valued feature set."""
        Y = _random_matrix((self.data_shape[0], 1))
        """np.matrix: Random-valued observation set."""

        v = Visualization("Title", (1, 1))
        """Visualization: Plotter instance."""

        for ModelWrapper in self.wrappers.values():
            with self.assertRaises(_InvalidFeatureSetError):
                # Empty feature set.
                v._plot_feature(_np.matrix([[]]), Y, 0, ModelWrapper)

            with self.assertRaises(_InvalidObservationSetError):
                # Empty observation set.
                v._plot_feature(X, _np.matrix([[]]), 0, ModelWrapper)

            with self.assertRaises(IndexError):
                # Feature index out of range.
                v._plot_feature(X, Y, self.data_shape[1], ModelWrapper)

        v.close()
Ejemplo n.º 7
0
def main():
    if len(sys.argv) < 2:
        print("Behavioral Cloning\n", "Usage: python3 main.py config.json")
    else:
        config_file = sys.argv[1]
        with open(config_file) as yaml_file:
            # The FullLoader parameter handles the conversion from YAML
            # scalar values to Python the dictionary format
            configs = yaml.load(yaml_file, Loader=yaml.FullLoader)

            # Data configurations
            data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), configs["data_path"])
            labels_file = configs["labels_file"]
            skip_header = bool(configs["skipHeader"])
            use_side_images = bool(configs["useSideImages"])
            correction_factor = float(configs["correctionFactor"])
            train_valid_split = float(configs["trainValidSplit"])
            do_flip = bool(configs["doFlip"])

            # Training configurations
            top_crop = int(configs["topCrop"])
            bottom_crop = int(configs["bottomCrop"])
            batch_size = int(configs["batchSize"])
            epochs = int(configs["epochs"])
            loss = configs["loss"]
            optimizer = configs["optimizer"]
            verbose = int(configs["verbose"])
            model_name = configs["modelName"]
            output_dir = configs["outputDir"]

            # Init Preprocessing
            preprocess = PreprocessData(data_path, labels_file, correction_factor, skip_header, use_side_images,
                                        train_valid_split, do_flip)

            # Preprocess data and extract training and validation samples
            train_samples, validation_samples = preprocess.splitData()

            # Initialize train and validation generators
            train_generator = preprocess.generator(train_samples, batch_size=batch_size)
            validation_generator = preprocess.generator(validation_samples, batch_size=batch_size)

            # Get image shape
            img_shape = preprocess.get_image_shape()

            # Initialize training network
            network = Model(img_shape, top_crop, bottom_crop, batch_size, epochs, loss, optimizer, verbose,
                            train_generator,
                            validation_generator, train_samples, validation_samples, model_name)

            model = network.create_model()

            # Initialize visualization
            visualize = Visualization(model, output_dir)
            visualize.visualize_model()

            network.get_summary(model)

            results = network.train_model(model)

            visualize.save_plots(results)
Ejemplo n.º 8
0
 def __init__(self, configpath):
     self.config = js.load(open(configpath))
     self.vz = Visualization(self.config["language"])
     self.n = None  #Number of simulated rolls
     self.i = None  #Number of iterations
     self.ai = None  #Active iteration
     self.ms = None  #Match size in rolls
     self.discardtieds = None  #Decide if tied rolls are part of a match
     self.starttime = None
     self.sname = None
     self.rollresult = []
     self.simulationdata = []
     self.summarydata = None
     self.dicefaces = ["rock", "paper", "scissors"]
     self.cdice = [-1, -1, -1, -1, -1, -1]
     self.pdice = [-1, -1, -1, -1, -1, -1]
     PPP.setConfig(self, self.config)
     print()
     print(self)
     PPP.run(self)
     PPP.exportRawData(self)
     self.summarydata = PPP.buildSummary(self)
     self.vz.autoCharts(self.sname, self.config["cPath"], self.cdice,
                        self.pdice, self.simulationdata, self.summarydata)
     PPP.exportSummary(self)
Ejemplo n.º 9
0
    def __init__(self, world_size, world_delta, obs_percent, agent_vision_depth, phase, rewards=10., penalty=-10., barrier_mode=0):
        """
        Desc: runs when instance of Visualization class is created

        Input(s):
            world_size: size of box world (width,height)
            world_delta: interval of world
            obs_percent: percent of spaces that are occupied
            agent_vision_depth: raidus of square within agent can see obstacles

            Optional-
            rewards: reward for getting to goal
            penalty: penalty for being in obstacle
        Output(s):
            none
        """
        self.world_size = world_size
        self.world_delta = world_delta      # used on virtualization
        self.obs_percent = obs_percent
        self.agent_vision_depth = agent_vision_depth
        self.spots = self.generate_spots()

        # initialize gridworld elements
        self.obstacle_indices, self.obstacles = self.generate_obstacles(penalty)
        self.goals = [self.generate_goal(rewards)]
        self.agents = [self.generate_agent(0, phase)]
        # # initialize visualization
        self.visualization = Visualization(self)
Ejemplo n.º 10
0
 def visualize(self,
               type=None,
               value=None,
               tile_provider=None,
               world_view=True):
     if not type:
         if self.get_num_dimensions() <= 3:
             Visualization(self.cube, self.element).show_cube()
         else:
             Visualization(self.cube, self.element).show_table()
     elif type == "table":
         Visualization(self.cube, self.element).show_table()
     elif type == "map":
         Visualization(self.cube, self.element).show_map()
     elif type == "map_html":
         return map_html(self, value, tile_provider, world_view)
Ejemplo n.º 11
0
def train(T_obs, T_pred, file_names, epoch_size=50):
    print("-------------------------------------------")
    writer = SummaryWriter("./log/loss")
    vis = Visualization()
    device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
    network = model.SocialLstm(hidden_dim=128, mediate_dim=32, output_dim=2)
    network.to(device)
    #network = torch.load("./log/model/559_model.pt")
    #network.eval()
    #network.to(device)

    criterion = torch.nn.MSELoss(reduction="sum")
    optimizer = torch.optim.Adam(network.parameters(), weight_decay=0.0005)

    loss = 0.
    for epoch in range(epoch_size):
        cost_sum = 0.
        cost_cnt = 0
        for file_name in file_names:
            file_data = dataset.FrameDataset(file_name)
            print(file_name)
            for (idx, data) in enumerate(file_data):
                h = torch.zeros(data["ped_trajs"].shape[1], 128, device=device)
                c = torch.zeros(data["ped_trajs"].shape[1], 128, device=device)
                optimizer.zero_grad()

                with torch.autograd.set_detect_anomaly(True):
                    Y = data["ped_trajs"][:T_pred, :, 1:].clone()
                    trajs = data["ped_trajs"][:T_pred, :, 1:].clone()
                    traj_masks = data["ped_masks"]

                    # forward propagation
                    output = network.forward(trajs, traj_masks, h, c, Y, T_obs,
                                             T_pred)

                    # loss
                    Y_pred = output[T_obs + 1:T_pred]
                    Y_g = Y[T_obs + 1:T_pred]

                    cost = criterion(Y_pred, Y_g)
                    cost_sum += cost.item()
                    cost_cnt += 1
                    if cost_cnt % 50 == 0:
                        vis.plot(
                            Y[:T_pred, :, :].clone().detach().cpu().tolist(),
                            output[:T_pred, :, :].clone().detach().cpu(
                            ).tolist(), T_obs, T_pred)

                    # backward propagation
                    cost.backward()
                    optimizer.step()

        loss = cost_sum / cost_cnt
        print("epoch: ", epoch, "loss: ", loss)
        writer.add_scalar("Loss/train", loss)

    torch.save(network, "./log/model/" + str(int(loss * 100)) + "_model.pt")
    writer.close()
Ejemplo n.º 12
0
 def test(self):
     v = Visualization()
     l = v.file_path_get()
     data = v.data_get(l[1])[0]
     print('data======', data)
     v.show(data)
     data = SF.trans_float_list(data)
     data = SF.data_format([data])
     self.run(data)
    def test_random_visualization_plot_feature(self):
        """`visualization.Visualization.plot_feature`: Randomized Validator.

        Tests the behavior of `Visualization.plot_feature` by feeding it
        randomly generated arguments.

        Raises:
            AssertionError: If `Visualization.plot_feature` needs debugging.

        """
        for i in range(self.n_tests):
            for ModelWrapper in self.wrappers.values():
                X = _random_matrix(self.data_shape)
                """np.matrix: Random-valued feature set."""
                Y = _random_matrix((self.data_shape[0], 1))
                """np.matrix: Random-valued observation set."""

                v = Visualization("Title", (3, 3))
                """Visualization: Plotter instance."""

                # Intialize model parameters to random values.
                ModelWrapper.model = dict(X=X)

                for i in range(9):
                    x, y, error = v._plot_feature(X, Y, i, ModelWrapper)
                    """(list of float, :obj:`list of float`): X- and y-values to
                    plot."""

                    # `x` should be a list of floats.
                    self.assertIsInstance(x, list)
                    map(_appendargs(self.assertIsInstance, float), x)

                    # Number of `x` values should match number of data points in
                    # `X`.
                    self.assertEqual(len(x), X.shape[0])

                    # `x` values should match all values in `X`.
                    self.assertEqual(*map(_np.linalg.norm, [x, X[:, i]]))

                    # `y` should be a dict.
                    self.assertIsInstance(y, dict)

                    for j, values in _compose(enumerate, y.values)():
                        # `values` should be a list of floats.
                        self.assertIsInstance(values, list)
                        map(_appendargs(self.assertIsInstance, float), values)

                        # Number of values in `values` should match number of
                        # data points in `Y`.
                        self.assertEqual(len(values), Y.shape[0])

                        if j == 0:
                            # Observation values should match all values in `Y`.
                            self.assertEqual(
                                *map(_np.linalg.norm, [values, Y[:, 0]]))

                v.close()
Ejemplo n.º 14
0
def main():
    dataset1 = np.genfromtxt(r'../data/new_dataset_1.txt',
                             dtype=float,
                             delimiter='\t')
    dataset2 = np.genfromtxt(r'../data/cho.txt', dtype=float, delimiter='\t')

    km1 = Kmeans(dataset1[:, 2:], dataset1[:, 1], 3)
    km2 = Kmeans(dataset2[:, 2:], dataset2[:, 1], 10)

    ic1 = km1.initial_centroids(3, 5, 9)
    #ic1 = km1.initial_random_centroids(5)
    ic2 = km2.initial_random_centroids(5)
    # km1.centroids = km1.init_centroids = np.loadtxt(r'../log/cho_ground_centroids.txt')

    # specify iteration as parameter here
    km1.kmeans_algorithm()
    km2.kmeans_algorithm()

    extr_index_validation1 = ExternalIndex(km1.ground_truth_clusters,
                                           km1.clusters)
    extr_index_validation2 = ExternalIndex(km2.ground_truth_clusters,
                                           km2.clusters)

    print('Rand Index of dataset1 clusters :',
          extr_index_validation1.rand_index())
    print('Jaccard Coefficient of dataset1 clusters :',
          extr_index_validation1.jaccard_coefficient())

    print('Rand Index of dataset2 clusters :',
          extr_index_validation2.rand_index())
    print('Jaccard Coefficient of dataset2 dataset clusters :',
          extr_index_validation2.jaccard_coefficient())

    plot1 = Visualization(dataset1.data[:, 2:], km1.clusters, dataset1.data[:,
                                                                            1])
    plot2 = Visualization(dataset2.data[:, 2:], km2.clusters, dataset2.data[:,
                                                                            1])
    plot1.plot(r'../log/td1.jpg')
    plot2.plot(r'../log/cho2.jpg')

    # gene_cluster_matched = km1.cluster_validation()
    # print('Genes that matched in clusters: ', gene_cluster_matched)

    return
Ejemplo n.º 15
0
def main():
    print "Reading landmark positions"
    landmarks = read_world("../map/landmarks_sim.dat")

    print "Reading sensor data"
    sensor_readings = read_sensor_data("../map/sensor_data_car.dat")

    print "Reading Ground truth Odometry"
    odom_readings = read_odom("../map/odom_trajectory_car.dat")


    # initialize the particles
    map_limits = [0, 320, 0, 350]
    particles = initialize_particles(args.particles, map_limits)

    curr_pose_x = []
    curr_pose_y = []

    cov_noise = np.array([[0.01, 0, 0],[0,0.01,0],[0,0,0.01]])

    vis = Visualization(landmarks, map_limits, sensor_readings, odom_readings)

    for timestep in range(len(sensor_readings) / 2):
        # plot_state(particles, landmarks, map_limits)
        # curr_mean = mean_pose(particles)
        # curr_pose_x.append(curr_mean[0])
        # curr_pose_y.append(curr_mean[1])
        # plot_trajectories_v3( sensor_readings[timestep, 'sensor'], curr_mean ,landmarks, map_limits)

        new_particles = sample_motion_model(sensor_readings[timestep, 'odometry'], particles)

        curr_mean = mean_pose(new_particles)
        curr_pose_x.append(curr_mean[0])
        curr_pose_y.append(curr_mean[1])
        vis.robot_environment(timestep, new_particles, curr_mean)

        # predict particles by sampling from motion model with odometry info
        # if timestep==0:
        #     new_particles =particles
        #     errors = data_association(sensor_readings[timestep, 'sensor'], new_particles, particles, landmarks, args.DA, cov_noise, sensor_readings[timestep, 'odometry'])
        # else: 
        #     errors = data_association(sensor_readings[timestep, 'sensor'], new_particles, particles, landmarks, args.DA, cov_noise, sensor_readings[timestep, 'odometry'])

        weights = eval_sensor_model(sensor_readings[timestep, 'sensor'], new_particles, landmarks)
        
        # weights = weighting_model(errors)

        particles = resample_particles(new_particles, weights)


        print("Current TimeStep: ", timestep)
        raw_input("Press Enter to continue...")
    plot_trajectories(odom_readings, curr_pose_x,curr_pose_y ,landmarks, map_limits)
    plot_on_maps(curr_pose_x, curr_pose_y)
    plt.show('hold')
Ejemplo n.º 16
0
  def run_trial(self,trial_num,fullscreen=True):
    '''
    Runs the visualization for the specified file number and returns the 
    visualization object.
    '''
    folder = os.path.join(self.folder,self.trials[trial_num-1]) + os.path.sep
    vis = Visualization(folder)
    vis.load_data()
    vis.run(fullscreen,inverse_speed=.25)

    return vis
    def tsneVisualization(self):
        # Init the widget
        self.visualization = Visualization(self.config)

        # t-SNE features
        tSNE_features = getTSNEFeatures(self.bovwTrainingFeatures)
        self.visualization.show()
        self.visualization.updateNodes(tSNE_features,
                                       labels=self.labelTrainingArray)
        # self.visualization.graph_widget.fitInView()
        self.visualization.exec_()
    def test_invalid_args_visualization_init(self):
        """`visualization.Visualization.init`: Argument Validator.

        Tests the behavior of `Visualization.init` with invalid argument counts
        and values.

        Raises:
            Exception: If at least one `Exception` raised is not of the expected
                kind.

        """
        with self.assertRaises(TypeError):
            # No arguments.
            Visualization()

        with self.assertRaises(TypeError):
            # Only one argument.
            Visualization("Title")

        with self.assertRaises(TypeError):
            # Invalid kwarg.
            Visualization("Title", (3, 3), key="value")

        with self.assertRaises(TypeError):
            # `None` instead of `subplots`.
            Visualization("Title", None)

        with self.assertRaises(TypeError):
            # Integer for `subplots`.
            Visualization("Title", 3)

        with self.assertRaises(TypeError):
            # Floats instead of `subplots` integers.
            Visualization("Title", (1.2, 3.4))

        with self.assertRaises(ValueError):
            # Negative integers instead of `subplots`.
            Visualization("Title", (-3, -3))

        with self.assertRaises(IndexError):
            # Zero integers instead of row number.
            Visualization("Title", (0, 3))

        with self.assertRaises(IndexError):
            # Zero integers instead of column number.
            Visualization("Title", (3, 0))
Ejemplo n.º 19
0
    def test_density(self):
        """Test whether the calculation of probability density 
           from state is correct.

        """
        proba_calculated = Visualization(test_data())\
            .density(n_grid=101)\
            .astype(np.float16)
        proba_expected = np.load("example_proba.npz")['arr_0']
        np.testing.assert_array_almost_equal(proba_calculated,
                                             proba_expected,
                                             decimal=3)
Ejemplo n.º 20
0
 def run_visualization(self, fullscreen=True, inverse_speed=.25):
     '''
 Displays the visualization if an outputfolder exist and the simulation has
 been run.
 '''
     if self.run:
         window = Visualization(self.folder)
         window.load_data()
         window.run(fullscreen, inverse_speed)
     else:
         print(
             "The visualization cannot be started becase simulation has not run."
         )
    def test_edge_cases_visualization_init(self):
        """`visualization.Visualization.init`: Edge Case Validator.

        Tests the behavior of `Visualization.init` with edge cases.

        Raises:
            Exception: If at least one `Exception` raised is not of the expected
                kind.

        """
        with self.assertRaises(IndexError):
            # No subplots.
            Visualization("Title", (0, 0))
Ejemplo n.º 22
0
def visualize():
    my_util = Util()
    df = my_util.load_df("../only_calculated_datasets/cleaned_df.pkl")
    my_visu = Visualization()
    my_visu.make_histograms(df, ["age", "race", "gender", "max_glu_serum", "A1Cresult", "num_lab_procedures",
                                 "time_in_hospital", "change", "diabetesMed"], image_path="../outputs/attr_hist_plot.png")  # all the attributes distributions
    my_visu.make_histograms(df, ["readmitted"], cmap="spring",
                            image_path="../outputs/class_attr_hist_plot.png")
    my_visu.build_scatter_2attr_plot(df, x_col="num_lab_procedures", y_col="num_medications",
                                     image_path="../outputs/num_of_procedures_vs_medications_plot.png")
    my_visu.build_3d_scatter_plot(df, x_axis_col="number_outpatient", y_axis_col="number_emergency", z_axis_col="number_inpatient",
                                  class_col="readmitted", image_path="../outputs/outPt_emergencyPt_inPt_vs_readmitted_sctter_plot.png", is_show=False)
    my_visu.build_3d_scatter_plot(df, x_axis_col="diag_1", y_axis_col="diag_2", z_axis_col="diag_3",
                                  class_col="readmitted", image_path="../outputs/diag_1_2_3_vs_readmitted_sctter_plot.png", is_show=False)
Ejemplo n.º 23
0
    def test_random_evolution(self):
        np.random.seed(0)
        params = {"min": 0, "max": 5, "dim": 1}
        original_ind = Individual(
            np.random.uniform(params["min"], params["max"], params["dim"]))
        results = []
        labels = [1]
        epochs = 50

        for _ in labels:
            evo = REvolution(original_ind=original_ind,
                             combine_params=0.1,
                             mutate_params={
                                 "min": 0,
                                 "max": 5
                             },
                             fitness=fitness,
                             pop_params={
                                 "min": 0,
                                 "max": 5,
                                 "dim": 1
                             },
                             method="compare")
            evo.run_random(epochs)
            results.append(evo.result)

        for _ in labels:
            evo = REvolution(original_ind=original_ind,
                             combine_params=0.1,
                             mutate_params={
                                 "std": 0.5,
                                 "dim": 1,
                                 "min": 0,
                                 "max": 5
                             },
                             fitness=fitness,
                             pop_params={
                                 "min": 0,
                                 "max": 5,
                                 "dim": 1
                             },
                             method="compare")
            evo.run_1_1(epochs)
            results.append(evo.result)

        v = Visualization()
        v.visualize(labels + labels, results, epochs)

        self.assertEqual(True, True)
Ejemplo n.º 24
0
def main():
    number_of_pages = 10000
    if len(sys.argv[1:]):
        number_of_pages = int(sys.argv[1])

    client = gRPCClient()
    page_count = client.method2(number_of_pages).count
    res = client.query_stream(queryPages(page_count, number_of_pages))

    count = 0
    for re in res:
        viz = Visualization(tile_number=count,
                            tile_width=100,
                            tile_height=100,
                            data_list=list(zip(re.page_names, re.page_count)))
        viz.visualize_data()
        count += 1
    def test_edge_cases_visualization_empty_subplot(self):
        """`visualization.Visualization.empty_subplot`: Edge Case Validator.

        Tests the behavior of `Visualization.empty_subplot` with edge cases.

        Raises:
            Exception: If at least one `Exception` raised is not of the expected
                kind.

        """
        v = Visualization("Title", (1, 1))
        """Visualization: Plotter instance."""

        with self.assertRaises(IndexError):
            # Subplot index out of range.
            v._empty_subplot(1)

        v.close()
    def __init__(self, visualization=None):
        self.encoder = None
        self.mu = None
        self.log_sigma = None
        self.gml = None
        self.vae = None

        self.X_train = None
        self.X_val = None
        self.X_test = None
        self.y_test = None

        self.visualization = visualization if visualization != None else Visualization(
        )

        self.load_data_functions = {
            'celeb_data': celeb_data,
            'cell_data': cell_data,
            'mnist': mnist
        }
Ejemplo n.º 27
0
    def __call__(self, x, y, color=None, make_copies=True):

        vis = Visualization(width=self.width,
                            height=self.height,
                            padding=self.padding)

        vis.data.append(Data.from_iters(x=x, y=y))

        if make_copies:
            maybe_copy = deepcopy
        else:
            maybe_copy = lambda x: x

        vis.scales.extend(maybe_copy([self.x_scale, self.y_scale]))
        vis.axes.extend(maybe_copy([self.x_axis, self.y_axis]))
        vis.marks.extend(maybe_copy([self.mark]))

        if color:
            vis.marks[0].properties.update.fill.value = color

        return vis
    def test_random_visualization_empty_subplot(self):
        """`visualization.Visualization.empty_subplot`: Randomized Validator.

        Tests the behavior of `Visualization.empty_subplot` by feeding it
        randomly generated arguments.

        Raises:
            AssertionError: If `Visualization.empty_subplot` needs debugging.

        """
        for i in range(self.n_tests):
            v = Visualization("Title", (3, 3))
            """Visualization: Plotter instance."""

            for i in range(9):
                v._empty_subplot(i)

                # There should be x- or y-ticks.
                self.assertEqual(len(v._ax[i].get_xticks()), 0)
                self.assertEqual(len(v._ax[i].get_yticks()), 0)

            v.close()
def main():
    config = configparser.ConfigParser()
    config.read(r'config.ini')

    cluster_count = int(config['KMEANS']['ClusterCount'])

    file = open(
        config['DATASET']['InputDirectory'] + config['DATASET']['InputFile'],
        'r')
    fdata = np.genfromtxt(file, dtype=float, delimiter='\t')

    mapreduce_inputfile_name = config['HADOOP']['mapreduce_inputfile_name']
    streaming_jar = config['HADOOP']['StreamingJar']
    mapper = config['HADOOP']['mapper']
    reducer = config['HADOOP']['reducer']
    hdfs_input_dir = config['HADOOP']['hdfs_input_directory']
    hdfs_output_dir = config['HADOOP']['hdfs_output_directory']
    tmp_dir = config['HADOOP']['temporary_directory']

    mrkm = MapReduceKMeans(fdata.data[:, 2:], fdata.data[:, 1], cluster_count,
                           mapreduce_inputfile_name, streaming_jar, mapper,
                           reducer, hdfs_input_dir, hdfs_output_dir, tmp_dir)
    if config['KMEANS']['Random'] == 'True':
        mrkm.initial_random_centroids(config['DEFAULT']['ClusterCount'])
    else:
        indices = config['KMEANS']['Centroids']
        mrkm.initial_centroids([int(i) for i in indices.split(',')])

    mrkm.kmeans()

    ei = ExternalIndex(mrkm.ground_truth_clusters, mrkm.clusters)
    print('Rand Index : ', ei.rand_index())
    print('Jaccard Coefficient : ', ei.jaccard_coefficient())

    visual = Visualization(mrkm.data, mrkm.clusters,
                           mrkm.ground_truth_clusters)
    visual.plot('demo.jpg')

    return
Ejemplo n.º 30
0
def display_alch_profit():
    ge = OSRS_GE_Data()
    #This doesn't work right if the traded quantity is 0
    alchfunc = lambda x: ge.ge_json[x][
        'sp'] * 0.6 - ge.nature_rune - ge.ge_json[x][
            'overall_average'] if ge.ge_json[x]['overall_average'
                                                ] > 0 else -sys.maxsize - 1
    lf = alchfunc
    jl = ge.sort_json(ge.ge_json, lf, True)
    small_list = ge.get_top_values(10, jl)
    sorted_list = []
    for id in small_list:
        #TODO: This should be parameterized and maybe made into an insert function
        temp_entry = ge.get_entry(id)
        temp_entry['alch_diff'] = lf(id)
        print(temp_entry)
        sorted_list.append(temp_entry)
    '''header = sorted_list[0].keys()
    rows =  [x.values() for x in sorted_list]
    print(tabulate.tabulate(rows, header))'''
    vis = Visualization()
    vis.display(sorted_list, x='name', y='alch_diff')