Exemplo n.º 1
0
    def view(self, factor=1., subset=None):
        """Start an animation of the mode. The displacements can be
        scaled by a |factor| to make them better visible, and
        a |subset| of the total system can be specified as well.
        This function requires an external viewer, see module
        Module:MMTK.Visualization for details."""
	Visualization.viewMode(self, factor, subset)
Exemplo n.º 2
0
def classifier_train_predict(X_train, y_train, X_test, y_test) :
    print()
    print('Running various classifiers to predict the labels of the test data...')
    classifiers = {
        "Baseline Classifier": DummyClassifier(strategy='most_frequent', random_state=0),
        "Logistic Regression": LogisticRegression(),
        "Linear SVM": SVC(),
        "Non Linear SVM": SVC(kernel='rbf'),
        "Gradient Boosting Classifier": GradientBoostingClassifier(n_estimators=100),
        "Decision Tree": DecisionTreeClassifier(),
        "Random Forest": RandomForestClassifier(n_estimators=100),
        "Neural Network": MLPClassifier(alpha = 1),
        "Naive Bayes": GaussianNB(),
        "Decision tree" : DecisionTreeClassifier(criterion='entropy'),
        "Kernelized SVM" : SVC(decision_function_shape='ovo'),
        "AdaBoost": AdaBoostClassifier(),
        "XGBoost" : XGBClassifier()
    }
   
    for i in classifiers :
        clf = classifiers[i]
        clf = clf.fit(X_train, y_train)
        predicted = clf.predict(X_test)
        print()
        print (i," Accuracy Score: ",accuracy_score(y_test, predicted))
        cnf_matrix = confusion_matrix(y_test, predicted)
        print()
        Visualization.plotConfusionMatrix(cnf_matrix, i)
Exemplo n.º 3
0
def runAndSaveMultiple(run_amount):

    for i in range(run_amount):
        start = time.clock()
        grid.clearGrid()
        seed = i
        random.seed(str(seed))

        global img_no, nets_unsorted, index_path_dict, conn_per_chip, conn_free_per_chip, not_used_chips, \
            not_layed_paths, path_lay_amount, visualise_dict, netlist_sorted, hillclimber_visualisation
        img_no = 0
        nets_unsorted = []          # List with start and end points of lines
        index_path_dict = {}        # Dictionary with net-index as key and the path as value
        conn_per_chip = {}          # Dictionary with the number of
        conn_free_per_chip = {}
        not_used_chips = []
        not_layed_paths = []
        path_lay_amount = {}
        visualise_dict = {}
        netlist_sorted = []
        hillclimber_visualisation = [[], []]

        paths = main()
        if paths != []:
            total_length = 0
            for path in paths:
                total_length += len(path) - 1
            path_saver.saveToFile(paths, total_length, seed)
        Visualization.hillclimberVisualisation(hillclimber_visualisation, seed)
        print "Calculated in:", time.clock() - start
Exemplo n.º 4
0
def mhSample(probabilityX, probabilityYOfX, sampleYFromX, x_initial, x_list,
             sample_time, viz, *args):
    '''initial: x_list = range(sample_time)'''
    if len(args) == 1:
        args = args[0]
    x_old = x_initial
    x_new = sampleYFromX(x_old, args)
    alpha = computeAlpha(probabilityX, probabilityYOfX, x_old, x_new, args)
    if np.log(np.random.uniform(0, 1)) < alpha:
        received_x_new = x_new
    else:
        received_x_new = x_old
    x_list[-sample_time] = received_x_new
    sample_time = sample_time - 1
    print sample_time, np.exp(alpha), received_x_new, x_new, x_old
    if viz == 1:
        # Visualization.plotValue(x_list[:-sample_time],'', 0, np.size(x_list,0), 0, 11)
        # visual_list = np.sum(np.array(x_list[:-sample_time])*np.array([[1,20]]*len(x_list[:-sample_time])),1)
        Visualization.plotHistgram(x_list[:-sample_time], 10, -1, 2, 0, 100)
    if sample_time == 1:
        if viz == 1:
            Visualization.saveVis('masterchasing_hist.png')
        return x_list
    else:
        return mhSample(probabilityX, probabilityYOfX, sampleYFromX,
                        x_list[-sample_time - 1], x_list, sample_time, viz,
                        args)
Exemplo n.º 5
0
def plotPriceLabel(ticker, isGridSearch=True, isIndex=True):

    if isIndex:
        X_train, y_train, X_test, y_test = getXy(ticker,
                                                 isDrop=True,
                                                 featuresList=featuresList3,
                                                 dropList=dropList3)
    else:
        X_train, y_train, X_test, y_test = getXy(ticker,
                                                 isDrop=True,
                                                 featuresList=featuresList1,
                                                 dropList=dropList1)

    [ticker, clf, accuracy, fscore, bestTrainParm,
     predictions] = ml(SVM,
                       ticker,
                       X_train,
                       y_train,
                       X_test,
                       y_test,
                       isGridSearch=True,
                       parm=SVMParm2)
    df = getStock(ticker)
    startTestDate = dt.date(2016, 1, 1)
    endTestDate = dt.date(2016, 12, 31)

    df = df.ix[startTestDate:endTestDate]
    df = df[['Adj Close']]
    df['Prediction'] = predictions
    df['Actual Trend'] = y_test

    vs.closeLabel(df, ticker)
Exemplo n.º 6
0
def optimize_angle(cylinder, seed, img):

    # Calculating step size based on radius and height:
    step_dec = math.degrees(2 * np.arcsin(cylinder.radius /
                                          (2 * cylinder.height)))
    mod90 = 90 % step_dec
    mod360 = 360 % step_dec
    div90 = 90 // step_dec
    div360 = 360 // step_dec
    step_psi = step_dec + mod90 / div90
    step_theta = step_dec + mod360 / div360

    fit_score = []
    theta_arr = []
    psi_arr = []
    psi = 0

    while psi <= 90:
        theta = 0

        if psi == 0:  # Don't need to rotate if vertical
            cropped_img = mask_dataset(cylinder, seed, img)
            score = score_fit(cylinder, cropped_img, translated=False)
            vis.visualise_cylinder(str(score) + ' theta ' + str(theta) +
                                   ' psi ' + str(psi) + '.tif',
                                   img,
                                   cylinder,
                                   seed,
                                   translated=False)
            fit_score.append(score)
            theta_arr.append(theta)
            psi_arr.append(psi)
        else:
            while theta < 360:
                cylinder.rotate(theta, psi)
                cropped_img = mask_dataset(cylinder, seed, img)
                score = score_fit(cylinder, cropped_img, translated=True)
                vis.visualise_cylinder(str(score) + ' theta' + str(theta) +
                                       ' psi ' + str(psi) + '.tif',
                                       img,
                                       cylinder,
                                       seed,
                                       translated=True)

                fit_score.append(score)
                theta_arr.append(theta)
                psi_arr.append(psi)

                theta = step_theta + theta
        psi = step_psi + psi

    best_score = max(fit_score)
    high_score_index = fit_score.index(best_score)
    theta = theta_arr[high_score_index]
    psi = psi_arr[high_score_index]

    cylinder.rotate(theta, psi)

    return best_score, theta, psi
Exemplo n.º 7
0
    def __import_fuel_map(self):

        user_settings = UserSettings()
        file_filter = 'Ascii file (*' + AsciiParser.FILE_EXT + ')'
        file, filt = QFileDialog.getOpenFileName(self, 'Open File', user_settings.working_dir, file_filter)

        if file:
            # FIXME: increase SIZE when there are lots of cells in fuel map and ignition

            qApp.setOverrideCursor(Qt.WaitCursor)

            try:
                new_editor = FuelMapViewer(self, file)

            except IndexError:

                qApp.restoreOverrideCursor()
                QMessageBox.information(self, "Invalid file", "A problem occurred while loading the fuel map. Please "
                                                              "verify that the fuel map does not contain non-integer "
                                                              "numbers")
                return

            if self._fl_map_editor:
                self._fl_map_editor.deleteLater()

            self._fl_map_editor = new_editor

            self._fl_map_editor.setEnabled(True)

            self.__setup_fl_map_lgnd()

            # Enable relevant widgets
            self.action_export_fuel_map.setEnabled(True)

            # This means that a DEM has already been loaded,
            # so the user can now convert to FDS file
            if self._ign_pt_editor:
                self.__init_sim_settings()
                self.action_create_environment.setEnabled(True)

                if self._visualization:
                    self._visualization.deleteLater()

                self._visualization = Visualization(self._fl_map_editor.parser(), self._ign_pt_editor.parser(), self)
                self._visualization.setEnabled(True)
                self._visualization.hide()

            # Set current tab to fuel type legend
            self._tab_widget.setCurrentIndex(1)

            self._fm_title_label.setText("Fuel Map Title: " + util.get_filename(file))

            # Tab index might not change, so __tab_changed will never get called
            self._fl_map_editor.show()
            qApp.restoreOverrideCursor()
Exemplo n.º 8
0
def runMain():
    time_start = time.clock()
    path_list = main()
    time_end = time.clock()
    path_length = 0
    for index, path in enumerate(path_list):
        path_list[index] = superSmoothPath(path)
        path_length += len(path) - 1
    print "Path length:", path_length
    print "Calculated in:", int(time_end - time_start), "seconds"
    Visualization.runVisualization(path_list)
    Visualization.run3DVisualisation(path_list, 0)
Exemplo n.º 9
0
def tuneSVC(X_train_selected, y_train, X_test_selected, y_test):
    print()
    print('SVC gives better accuracy as compared to other classifiers, tuning hyperparameters of SVC...')
    print()
    param = svc_param_selection(X_train_selected, y_train, 3)
    model = svm.SVC(C=param['C'], kernel= 'rbf', gamma=param['gamma'])
    model = model.fit(X_train_selected, y_train)
    predicted = model.predict(X_test_selected)
    print()
    print("Accuracy Score of SVC with hyperparameters tuned: ",accuracy_score(y_test, predicted))
    cnf_matrix = confusion_matrix(y_test, predicted)
    print()
    Visualization.plotConfusionMatrix(cnf_matrix, 'Tuned SVC')
Exemplo n.º 10
0
def analyze_data(time_interval=TimeInterval, refined_type=FullyPreprocessedPath):
    print 'time_interval: ' + str(time_interval) + ' min'
    print 'refined_type: ' + refined_type
    print '--------------------------------------------'

    # Refine the data and save
    refined_data_path = Preprocess.preprocess_data(time_interval, refined_type)

    # Build similarity model and save
    Similarity.Build.similarity_model(time_interval, refined_type)

    # Set data for visualization
    Visualization.set_data4visualization(time_interval, refined_type)
Exemplo n.º 11
0
def checkPathDict(path_dict, iteration):
    points_occupied = []
    paths = path_dict.keys()
    for path in paths:
        for point in path_dict[path]:
            if point in points_occupied and point not in chips:
                print "Point", point
                print "Path no", path
                print "Iteration", iteration
                Visualization.runVisualization(path_dict.values())
                raise StandardError
            else:
                points_occupied.append(point)
Exemplo n.º 12
0
def cluster_analysis(
        PosData,
        ion_types=['Ranged'],
        method='OPTICS-APT',
        eps=2.0,
        minpts=10,
        min_node_size=None,
        significant_separation=0.75,
        k=None,
        node_similarity=0.9,
        cluster_member_prob=0.5,  # from here are experts
        est_bg=None,
        min_cluster_size=None,  # here background estimation could be added in future
        save_file=True,
        show_visual=False,
        show_background=False):  # here just to control file and visual output

    init_time = time.time()

    Clusterer = OPTICSAPT(PosData)
    Clusterer.ion_selection(ion_types)

    if method == 'DBSCAN':
        Clusterer.DBSCAN_clustering(eps, minpts)
    elif method == 'OPTICS-APT':
        Clusterer.do_optics(eps, minpts)
        Clusterer.hierarchy_clustering(
            min_node_size=min_node_size,
            significance_of_separation=significant_separation,
            k=k,
            similarity_level=node_similarity,
            cluster_member_prob=cluster_member_prob)
        Clusterer.create_clusters(est_bg=est_bg,
                                  min_cluster_size=min_cluster_size)

        if save_file:
            Clusterer.write_RD_to_file(output_filename + '_ol_RD_CD')

    if save_file:
        Clusterer.cluster_stats(output_filename + '_cluster_stats')
        Clusterer.write_cluster_to_file(output_filename + '_clusters')
        Clusterer.write_indexed_cluster_rrng(output_filename + '_clusters')
        Clusterer.write_log(output_filename + '_log')

    fin_time = time.time()
    print('total computation time is: ', fin_time - init_time)

    if show_visual:
        visual.visualization(Clusterer, background=show_background)

    return Clusterer
Exemplo n.º 13
0
def runTest():
    start = time.clock()
    #path = aStarPathFinder((1, 1, 0), (15, 8, 0))
    path = [(1, 1, 0), (1, 2, 0), (1, 3, 0), (1, 3, 1), (1, 4, 1), (1, 5, 1), (2, 5, 1), (3, 5, 1), (3, 5, 0), (3, 6, 0), (4, 6, 0), (5, 6, 0), (5, 7, 0), (5, 8, 0), (5, 8, 1), (4, 8, 1), (4, 7, 1), (4, 7, 2), (5, 7, 2), (6, 7, 2), (7, 7, 2), (7, 7, 1), (7, 8, 1), (8, 8, 1), (8, 9, 1), (8, 9, 0), (9, 9, 0), (10, 9, 0), (10, 8, 0), (10, 7, 0), (10, 6, 0), (11, 6, 0), (12, 6, 0), (12, 7, 0), (12, 8, 0), (13, 8, 0), (14, 8, 0), (15, 8, 0)]
    end = time.clock()
    print "Path calculate time", end - start
    print "Path length: ", len(path)

    Visualization.run3DVisualisation([path], 0)
    start = time.clock()
    smoother_path = smoothPath(path)
    end = time.clock()
    print "Smooth in:", end - start
    Visualization.run3DVisualisation([smoother_path], 0)
Exemplo n.º 14
0
def mask_3d_array(array):
    print("masking array")
    result = []

    for i in range(len(array)):
        print(f"masking slice {i + 1}/{len(array)}")
        slice = np.squeeze(array)[:][:][i]
        slice_mask = Visualization.make_lungmask(slice)
        masked_slice = Visualization.apply_lungmask(slice, slice_mask)
        result.append(masked_slice)

    result = np.asarray(result)
    print("masking done")
    return result
Exemplo n.º 15
0
def votingClassifier(X_train, y_train, X_test, y_test):
    clf1 = RandomForestClassifier(n_estimators=50)
    clf2 = SVC(kernel = 'rbf')
    clf3 = SVC()
    
    print()
    print('Voting Classifier with hard voting:')
    vote_model = VotingClassifier(estimators=[('Random', clf1), ('kernelized', clf2), ('SVC', clf3)], voting='hard')
    vote_model.fit(X_train,y_train)
    y_pred = vote_model.predict(X_test)
    print()
    print("Accuracy Score of Voting Classifier: ",accuracy_score(y_test, y_pred))
    cnf_matrix = confusion_matrix(y_test, y_pred)
    print()
    Visualization.plotConfusionMatrix(cnf_matrix, 'voting classifier')
Exemplo n.º 16
0
def main():
    Settings.init()

    default_radius = 2
    default_height = 5

    # Load in image and seeds
    filename = 'testdata.tif'
    img = tif.imread(filename)
    seedfilename = 'testdata_maxima_binary.tif'
    seeds = s.get_seeds(seedfilename)

    cylinder = cyl.Cylinder(default_radius, default_height)
    vis.render_gauss(cylinder)

    best_score, best_theta, best_psi = Optimize.optimize_angle(cylinder, seeds[257], img)
Exemplo n.º 17
0
 def open_visualisation(self, event):
     import Visualization
     # dlg = tk.Toplevel(self.master)
     r = tk.Tk()
     dialog = Visualization.Visualization(r)
     dialog.fill_combobox1()
     dialog.run()
Exemplo n.º 18
0
def average_over_seeds(model_type_list,
                       model_parameters_list,
                       n=N,
                       k=K,
                       t=T,
                       possible_rewards=POSSIBLE_REWARDS,
                       get_reward_probabilities=get_reward_probabilities,
                       min_seed=MIN_SEED,
                       max_seed=MAX_SEED):
    """
    run simulation for many models, with different seeds and return the required arrays for Visualization.py
    plot functions with vis.DATA list_type argument
    :param model_type_list: list of ModelType enums, containing the models to run
    :param model_parameters_list: list of dictionaries containing parameters for the models in model_type_list
    :param n: number of machines
    :param k: number of machines to choose each round
    :param t: number of trials
    :param possible_rewards: list of possible machine rewards
    :param get_reward_probabilities: function that returns a list of reward probabilities
    :param min_seed: lowest seed to run
    :param max_seed: highest seed to run
    :return: convergence, reward_sum, distance_from_real_distributions - arrays of tuples, first argument in tuple
    is the sim.type and the second is the actual data
    """
    convergence_list = np.zeros((len(model_type_list), t - 1))
    reward_sums = np.zeros((len(model_type_list), t))
    regrets = np.zeros((len(model_type_list), t))
    distances = [[] for _ in range(len(model_type_list))]
    sim_titles = []
    for seed in tqdm(range(min_seed, max_seed + 1)):
        optimal_reward = None
        cur_rewards = np.zeros_like(reward_sums)
        for i, model_type, model_parameters in tqdm(
                zip(np.arange(len(model_type_list)), model_type_list,
                    model_parameters_list)):
            np.random.seed(seed)
            sim = s.Simulation(n, k, t, possible_rewards,
                               get_reward_probabilities, model_type,
                               **model_parameters)
            if seed == min_seed:
                sim_titles.append(sim.type)
            sim.run_simulation()
            convergence_list[i, :] += sim.get_convergence_rate()
            if sim.type == "Optimal Model":
                optimal_reward = sim.get_reward_sum()
            cur_rewards[i, :] += sim.get_reward_sum()
            reward_sums[i, :] += cur_rewards[i, :]
            for j, machine in enumerate(sim.machine_list):
                distances[i].append(
                    vis.fr_metric(
                        machine.reward_probabilities,
                        sim.model.estimated_machine_reward_distribution[j, :]))
        regrets[...] += optimal_reward - cur_rewards
    convergence_list /= (max_seed - min_seed + 1)
    reward_sums /= (max_seed - min_seed + 1)
    regrets /= (max_seed - min_seed + 1)
    return [(sim_titles[i], convergence_list[i, :]) for i in range(len(model_type_list))], \
           [(sim_titles[i], reward_sums[i, :]) for i in range(len(model_type_list))], \
           [(sim_titles[i], distances[i]) for i in range(len(model_type_list))], \
           [(sim_titles[i], regrets[i]) for i in range(len(model_type_list))]
Exemplo n.º 19
0
class TestTrialResponse(unittest.TestCase):
	@data(('0.pkl',
		[50,11,3.3,1.83,0.92,0.31],
		12,
		36,
		4.0,
		0.0,
		0.7,
		0.0,
		8,
		Visualization.Visualize(circleSize=10, screenColor=[255,255,255], screen = pg.display.set_mode([800,800]), 
			saveImage=False, saveImageFile='image'),
		0.95,
		{50:5,11:30,3.3:60,1.83:90,0.92:120,0.31:150}))
	@unpack
	def testResponseForSimpleChasing(self,filename,subtletyHypothesis,updateFrequency,attentionSwitchFrequency,precisionPerSlot,precisionForUntracked,memoryratePerSlot,memoryrateForUntracked,attentionLimitation,visualize,responseRule,precisionToSubtletyDict):
		loadTrajectory=LoadTrajectory.loadTrajectory
		initialPrior=targetCode.initialPrior(attentionLimitation)
		computePosterior=calPosterior.calPosteriorLog
		attention=Attention.AttentionToPrecisionAndDecay(precisionPerSlot, precisionForUntracked, memoryratePerSlot, memoryrateForUntracked)
		attentionSwitch=Attention.AttentionSwitch(attentionLimitation)
		perception=Perception.Perception(attention, computePosterior, attentionSwitch, attentionSwitchFrequency)
		response=Response.RuleBasedResponse(responseRule, precisionToSubtletyDict)
		trial=targetCode.Trial(subtletyHypothesis, updateFrequency, loadTrajectory, visualize, initialPrior, response, perception)
		trialResponse=trial(filename)
		# self.assertTrue(trialResponse['action'])
		self.assertLessEqual(trialResponse['RT'],8000)
		self.assertEqual(trialResponse['wolfIdentity'],0)
		self.assertEqual(trialResponse['sheepIdentity'],1)
Exemplo n.º 20
0
def analyze_data(time_interval=TimeInterval, refined_type=FullyPreprocessedPath):

    print 'time_interval: ' + str(time_interval) + ' min'
    print 'refined_type: ' + refined_type
    print '--------------------------------------------'
    # Draw graphs and save the figures
    graph_directory = Graph.Save.raw_data2graph()

    # Refine the data and save
    refined_data_path = Preprocess.refining_data(time_interval, refined_type)

    # Build similarity model and save
    Similarity.Build.similarity_model(time_interval, refined_type)

    # Set data for visualization
    Visualization.set_data4visualization(time_interval, refined_type)
Exemplo n.º 21
0
def edit_list(dataframe, playlist_ids, mapping):
    dataframe = pd.read_json(dataframe)
    print('playlist_ids' + str(playlist_ids))
    print("Pl_name_Dict" + str(mapping))
    adding = playlist_ids[1]
    new_spec = playlist_ids[0]
    pl_name_dict = mapping[1]
    new_list = [
        html.Div([
            dcc.Checklist(id={
                "index": i,
                "type": "done"
            },
                          options=[{
                              "label": "",
                              "value": "done"
                          }],
                          value=done,
                          style={"display": "inline"},
                          labelStyle={"display": "inline"}),
            html.Div((lambda x: pl_name_dict[x]
                      if x in pl_name_dict.keys() else x)(text),
                     id={"index": i},
                     style=style_done if done else style_todo)
        ],
                 style={"clear": "both"})
        for i, (text, done) in enumerate(new_spec)
    ]
    radar_graph = get_radar_graph(dataframe)
    bar_graph = Visualization.make_bar_graph(dataframe)
    y = [new_list, adding, radar_graph, bar_graph]
    #print(y)
    return y
Exemplo n.º 22
0
def visualise_pos_neg_words():
    rule_word_dict = vn.visualize(emotion_words, sentiment_words, file_name)

    e_pos_words = get_words_from_emotion_list(rule_word_dict['e_pos'])
    s_pos_words = get_words_from_emotion_list(rule_word_dict['s_pos'])

    e_neg_words = get_words_from_emotion_list(rule_word_dict['e_neg'])
    s_neg_words = get_words_from_emotion_list(rule_word_dict['s_neg'])

    pos_words = list(set(e_pos_words + s_pos_words))
    neg_words = list(set(e_neg_words + s_neg_words))

    final_neg_words = []
    for wo in neg_words:
        if wo in pos_words:
            if wo in e_neg_words and wo in s_neg_words:
                final_neg_words.append(wo)
        else:
            final_neg_words.append(wo)

    final_pos_words = []

    for wo in pos_words:
        if wo not in final_neg_words:
            final_pos_words.append(wo)

    return {
        'popular_words': final_pos_words,
        'non_popular_words': final_neg_words
    }
Exemplo n.º 23
0
    def __import_dem(self):

        user_settings = UserSettings()
        file_filter = 'Ascii file (*' + AsciiParser.FILE_EXT + ')'
        file, filt = QFileDialog.getOpenFileName(self, 'Open File',
                                                 user_settings.working_dir,
                                                 file_filter)

        if file:

            qApp.setOverrideCursor(Qt.WaitCursor)

            if self._ign_pt_editor:
                self._ign_pt_editor.deleteLater()

            self._ign_pt_editor = IgnitionPointViewer(self, file)
            self._ign_pt_editor.setEnabled(True)

            self._setup_ign_pt_map_lgnd()

            # Enable relevant widgets
            self.action_export_dem.setEnabled(True)

            # This means that a fuel map has already been loaded,
            # so the user can now convert to FDS file
            if self._fl_map_editor:
                self.__init_sim_settings()
                self.action_create_environment.setEnabled(True)

                if self._visualization:
                    self._visualization.deleteLater()

                self._visualization = Visualization(
                    self._fl_map_editor.parser(), self._ign_pt_editor.parser(),
                    self)
                self._visualization.setEnabled(True)
                self._visualization.hide()

            # Set current tab to fuel type legend
            self._tab_widget.setCurrentIndex(2)

            self._dem_title_label.setText("DEM Title: " +
                                          util.get_filename(file))

            self._ign_pt_editor.show()
            qApp.restoreOverrideCursor()
Exemplo n.º 24
0
def sampling_def(image1, dist_img, threshold, Sample_Density):
    Samples2 = SpatialSampling.DiskSamples(image1, dist_img, threshold)
    Samples2.GenSamples(
        Sample_Density,
        80000)  # This distance will be used to calculate strains
    point_arr2 = Samples2.samples
    sample_point2 = np.transpose(point_arr2)
    Visualization.DataVisulization(image1, threshold).scatterplot(point_arr2)
    return sample_point2
Exemplo n.º 25
0
def plotPriceLabel():

    ticker = 'MMM'
    X_train, y_train, X_test, y_test = getXy(ticker,
                                             isDrop=True,
                                             predict_period=15)

    [clf_Name, clf] = SVM(X_train, y_train, isGridSearch=True)
    predictions = clf.predict(X_test)
    df = getStock(ticker)
    startTestDate = dt.date(2016, 1, 1)
    endTestDate = dt.date(2016, 12, 31)

    df = df.ix[startTestDate:endTestDate]
    df = df[['Adj Close']]
    df['Prediction'] = predictions

    vs.closeLabel(df)
Exemplo n.º 26
0
def runMain():
    start = time.clock()
    paths = main()
    print "Calculated in:", time.clock() - start
    if paths == []:
        print "Could not finish in given number of iterations"
    else:
        print "Amount of paths", len(paths)
        total_length = 0
        for path in paths:
            total_length += len(path) - 1
        print "Total length:", total_length
        print "Not layed paths", not_layed_paths
        if not_used_chips != []:
            print "Deleted chips:", not_used_chips
        print paths
        Visualization.run3DVisualisation(paths)
        Visualization.runVisualization(paths)
def showPath(path, graph, ranges=[]):
    edgesToHighlight = []
    visitedEdge = dict()
    for node in graph.nodes():
        for neighbour in list(graph.neighbors(node)):
            for level in list(graph[node][neighbour]):
                visitedEdge[(node, neighbour, level)] = False

    number = 1
    for id in range(1, len(path)):
        level = 0
        while visitedEdge[((path[id - 1], path[id], level))]:
            level += 1
        visitedEdge[((path[id - 1], path[id], level))] = True

        edgesToHighlight.append((path[id - 1], path[id], level))
        vis.draw(graph, edgesToHighlight, True, ranges, number)
        number += 1
Exemplo n.º 28
0
def question3():
    rate = 0.00005
    count = 0
    con_ws = Perceptrons.Weights([-1, 0, 0.5, -0.5, 0.5], rate)
    cla_ws = Perceptrons.Weights([-1.101, -0.008, 0.652, -0.372, 0.412], rate)
    conf = []
    classf = []
    col = []
    for sample in data:
        if con_ws.activation(sample) == sample[-1]:
            count += 1
            col.append('g')
        else:
            col.append('r')
        conf.append(round(con_ws.result, 3))
        cla_ws.activation(sample)
        classf.append(round(cla_ws.result, 3))
    Visualization.confidence(conf, classf, col)
    print(count / len(data))
Exemplo n.º 29
0
def question1():
    ws = []
    for rate in [0.005, 0.001, 0.00001]:
        Perceptrons.initalweights = [-1, 1, 1, 1, 1]
        accuracy, arr = Perceptrons.crossvalidation(data, 10, rate)
        ws.append(arr)
        print("learning rate: ", rate)
        print("accuracy: ", accuracy)
        print("weights:", arr, '\n')
    Visualization.RatesComparsion(ws)
Exemplo n.º 30
0
 def view(self, configuration=None, format='pdb'):
     """Starts an external viewer for the object in the given
     |configuration|. The optional parameter |format| indicates
     which format (and hence which viewer) should be used;
     the formats are "pdb" and "vrml". An optional subformat
     specification can be added to the format name, separated
     by a dot. The subformats of "pdb" are defined by the
     module 'Scientific.IO.PDB', the subformats of "vrml" are
     "wireframe" (the default, yielding a wireframe representation),
     "ball_and_stick" (yielding a ball-and-stick representation),
     "highlight" (like wireframe, but with a small sphere for
     all atoms that have an attribute "highlight" with a non-zero value),
     and "charge" (wireframe plus small spheres for the atoms with colors
     from a red-to-green color scale to indicate the charge)."""
     universe = self.universe()
     if universe is not None:
         configuration = universe.contiguousObjectConfiguration(
             [self], configuration)
     Visualization.viewConfiguration(self, configuration, format)
Exemplo n.º 31
0
def auto(tiss, data="Female"):
    print('\n')
    segmentation, dico = getDSubstance(data, tiss)
    dicomdir = segmentation.dicomdir
    print('Dicom Dir : ', dicomdir)
    print('Initialize ImageDicom')
    image = segmentation.dicomimage
    if "box" in dico.keys():
        image.cropBoxImages(dico["box"]["dheight"], dico["box"]["drow"],
                            dico["box"]["dcolumn"])
        segmentation.data_dico[segmentation.name]["box"] = dico["box"]
        print("height :", image.height)
    segmentation.pipeline()
    print('Initialize Visualization')
    myMesh = Visualization(segmentation)
    myMesh.pipeline()
    print('Object Created')
    print('----------')
    print('\n')
Exemplo n.º 32
0
    def view(self, configuration = None, format = 'pdb'):
        """Starts an external viewer for the object in the given
        |configuration|. The optional parameter |format| indicates
        which format (and hence which viewer) should be used;
        the formats are "pdb" and "vrml". An optional subformat
        specification can be added to the format name, separated
        by a dot. The subformats of "pdb" are defined by the
        module 'Scientific.IO.PDB', the subformats of "vrml" are
        "wireframe" (the default, yielding a wireframe representation),
        "ball_and_stick" (yielding a ball-and-stick representation),
        "highlight" (like wireframe, but with a small sphere for
        all atoms that have an attribute "highlight" with a non-zero value),
        and "charge" (wireframe plus small spheres for the atoms with colors
        from a red-to-green color scale to indicate the charge)."""
        universe = self.universe()
        if universe is not None:
            configuration = universe.contiguousObjectConfiguration([self],
                                                                configuration)
	Visualization.viewConfiguration(self, configuration, format)
Exemplo n.º 33
0
def Update(Bodies, Steps=100, Snaps=10, L=3, njobs=1, Plots=False, direct=''):
    '''
    This function updates the system.
        
    In Args:
         - Bodies:      Array containing the bodies to be updated.
         - *Steps:      (Optional) The number of update steps. If not given, it will update 100 steps.
         - *Snaps:      (Optional) The number of snapshots to take. This will only be useful when Plots = True.
         - *L = 3       The scale of the system. Default is 3 AU.
         - *Plots       (Optional) If true, will save certain stapshots on the given directory.
         - *direct      (Needed if Plots = True) A string with the Path where you want to save the outputs.
         - *njobs:      (Optional) The number of cores to be used during the computation. By default is 1
                        i.e. the program will run sequentially.
        
    Out Args:
        - Bodies:      Array containing the updated bodies. 
        
    '''
    j = 0  # This is a counter for the number of snapshots.

    for k in tqdm(range(Steps)):

        time.sleep(0.01)

        if Plots == True:

            N_steps_snapshot = int(Steps / Snaps)

            if k % N_steps_snapshot == 0:  # This will plot only the necesary steps.

                Visualization.Plot(
                    Bodies, k, j, L, direct
                )  # This will make a plot of the actual step of the simulation.

                j += 1

        B = Bodies  # This is a "temporal array" that will serve to compute the acceleration in parallel
        # without updating the positions of the bodies before we compute all the accelerations.

        Parallel(n_jobs=njobs)(delayed(Compute_Accel)(i, B) for i in Bodies)

        for i in range(len(Bodies)):

            Bodies[i].velocity = Bodies[i].Compute_velocity()

            Bodies[i].position = Bodies[i].Compute_position()

            # Once the main parameters have been updated we define the acceleration (i) equal to the
            # computed acceleration (i+1).

            Bodies[i].acceleration_i = Bodies[i].acceleration_i_1

    return Bodies
Exemplo n.º 34
0
def question2():
    Perceptrons.epochs = 20
    ys = []
    for rate in [0.005, 0.001, 0.00001]:
        print("learning rate: ", rate)
        Perceptrons.count = 0
        Perceptrons.train_errors = []

        Perceptrons.initalweights = [-1, 1, 1, 1, 1]
        accuracy, arr = Perceptrons.crossvalidation(data, 10, rate)
        print(Perceptrons.train_errors, '\n')
        ys.append(Perceptrons.train_errors)
    Visualization.AccuracyInEpochs(ys, 20)
Exemplo n.º 35
0
def performKMeans(inputDataClass, k, mode, num_runs, visualize=False):
    covar = -1
    if mode == 3:
        covar = performanceAnalyser.getFullCovariance(
            inputDataClass.Train[:, :-1])
    labels, means, rms, Ypred = kmeans.kfit(inputDataClass.Train[:, :-1],
                                            k,
                                            inputDataClass.Train[:, -1],
                                            inputDataClass.Test[:, :-1],
                                            num_runs=num_runs,
                                            mode=mode,
                                            covar=covar)
    print("rms = " + str(rms))
    print("Kmeans done")

    Ytrue = inputDataClass.Test[:, -1]
    print("Testing Accuracy = " +
          str(performanceAnalyser.calcAccuracyTotal(Ypred, Ytrue)))

    if visualize:
        Visualization.visualizeKMeans(inputDataClass.Train[:, :-1], labels, k)
        print("Kmeans visualized")

    return Ytrue, Ypred
Exemplo n.º 36
0
def sampling_def(image1, dist_img, threshold):
    """
    this is the sampling function
    
    input:
        image1 --- (N,d) np.array
        dist_img --- (N,d) np.array
        threshold --- a float value
    output:
        sample_point2 ---(N,3) np.array
    """
    Samples2 = SpatialSampling.DiskSamples(image1, dist_img, threshold)
    Samples2.GenSamples(3, 80000)  # this number controls the density
    point_arr2 = Samples2.samples
    sample_point2 = np.transpose(point_arr2)
    Visualization.DataVisulization(image1, threshold).scatterplot(point_arr2)
    return sample_point2
Exemplo n.º 37
0
def get_radar_graph(data):
    #global data
    # print("Valid data"+ str(valid_data))
    # print("Current ids" + str(current_ids))
    # if len(filter) > 0:
    #     data = PlaylistRetriever.get_user_playlists(filter)
    # else:
    #     data = PlaylistRetriever.get_user_playlists(current_ids)
    print("got here")

    fig = Visualization.make_radar_chart(data)
    fig.update_layout(margin=dict(l=100, r=0, t=0, b=0),
                      #paper_bgcolor="LightSteelBlue"
                      )
    fig.update_layout(legend=dict(
        yanchor="top",
        #y=0.99,
        xanchor="left",
        #x=0.01
    ))
    return fig
Exemplo n.º 38
0
    shortest_paths = {path_number: [] for path_number in range(len(netlist))}    # shortest_paths2 = [[(2, 10, 0), (2, 9, 0), (2, 8, 0)], [(12, 3, 0), (11, 3, 0), (10, 3, 0), (9, 3, 0), (9, 4, 0), (8, 4, 0)], [(8, 4, 0), (8, 3, 0), (8, 2, 0), (8, 2, 1), (7, 2, 1), (6, 2, 1), (5, 2, 1), (4, 2, 1), (3, 2, 1), (3, 2, 0)], [(16, 7, 0), (15, 7, 0), (15, 6, 0), (15, 6, 1), (14, 6, 1), (14, 6, 2), (14, 6, 3), (14, 6, 4), (13, 6, 4), (12, 6, 4), (11, 6, 4), (10, 6, 4), (9, 6, 4), (8, 6, 4), (7, 6, 4), (6, 6, 4), (5, 6, 4), (5, 5, 4), (5, 4, 4), (4, 4, 4), (4, 3, 4), (3, 3, 4), (2, 3, 4), (1, 3, 4), (0, 3, 4), (0, 3, 3), (0, 3, 2), (0, 3, 1), (0, 3, 0), (0, 2, 0), (1, 2, 0), (1, 1, 0)], [(3, 2, 0), (3, 3, 0), (3, 3, 1), (3, 3, 2), (3, 4, 2), (3, 4, 3), (3, 4, 4), (3, 5, 4), (4, 5, 4), (4, 6, 4), (4, 6, 3), (4, 7, 3), (5, 7, 3), (6, 7, 3), (7, 7, 3), (8, 7, 3), (9, 7, 3), (10, 7, 3), (10, 7, 2), (11, 7, 2), (12, 7, 2), (12, 7, 1), (13, 7, 1), (13, 7, 0)], [(2, 8, 0), (2, 7, 0), (1, 7, 0), (0, 7, 0), (0, 6, 0), (0, 6, 1), (0, 5, 1), (0, 5, 2), (1, 5, 2), (1, 5, 3), (1, 4, 3), (1, 3, 3), (2, 3, 3), (2, 2, 3), (2, 2, 2), (2, 1, 2), (2, 0, 2), (2, 0, 1), (2, 0, 0), (3, 0, 0), (4, 0, 0), (5, 0, 0), (6, 0, 0), (6, 1, 0)], [(1, 5, 0), (1, 6, 0), (2, 6, 0), (3, 6, 0), (3, 6, 1), (3, 5, 1), (3, 4, 1), (4, 4, 1), (5, 4, 1), (6, 4, 1), (7, 4, 1), (8, 4, 1), (8, 4, 0)], [(1, 5, 0), (2, 5, 0), (2, 5, 1), (2, 5, 2), (3, 5, 2), (3, 5, 3), (4, 5, 3), (5, 5, 3), (6, 5, 3), (7, 5, 3), (8, 5, 3), (9, 5, 3), (10, 5, 3), (10, 5, 4), (11, 5, 4), (12, 5, 4), (13, 5, 4), (14, 5, 4), (14, 5, 3), (15, 5, 3), (15, 5, 2), (15, 5, 1), (15, 5, 0), (16, 5, 0)], [(12, 3, 0), (12, 4, 0), (11, 4, 0), (10, 4, 0), (10, 4, 1), (10, 4, 2), (9, 4, 2), (8, 4, 2), (7, 4, 2), (7, 5, 2), (7, 5, 1), (7, 6, 1), (7, 7, 1), (7, 8, 1), (6, 8, 1), (6, 8, 0)], [(2, 8, 0), (3, 8, 0), (3, 7, 0), (4, 7, 0), (5, 7, 0), (5, 6, 0), (6, 6, 0), (7, 6, 0), (7, 7, 0), (7, 8, 0), (8, 8, 0), (9, 8, 0)], [(1, 5, 0), (1, 4, 0), (1, 3, 0), (2, 3, 0), (2, 2, 0), (3, 2, 0)], [(6, 1, 0), (7, 1, 0), (7, 2, 0), (7, 3, 0), (7, 4, 0), (8, 4, 0)], [(15, 1, 0), (15, 0, 0), (15, 0, 1), (15, 0, 2), (14, 0, 2), (13, 0, 2), (12, 0, 2), (11, 0, 2), (10, 0, 2), (9, 0, 2), (8, 0, 2), (7, 0, 2), (6, 0, 2), (5, 0, 2), (4, 0, 2), (4, 1, 2), (4, 2, 2), (4, 3, 2), (4, 4, 2), (4, 5, 2), (4, 5, 1), (4, 5, 0)], [(16, 5, 0), (16, 4, 0), (16, 3, 0), (16, 2, 0), (16, 2, 1), (15, 2, 1), (15, 2, 2), (14, 2, 2), (14, 2, 3), (13, 2, 3), (12, 2, 3), (11, 2, 3), (11, 2, 2), (11, 2, 1), (11, 2, 0), (12, 2, 0)], [(4, 5, 0), (5, 5, 0), (6, 5, 0), (7, 5, 0), (8, 5, 0), (8, 4, 0)], [(12, 11, 0), (12, 10, 0), (12, 9, 0), (13, 9, 0), (14, 9, 0), (15, 9, 0), (16, 9, 0), (17, 9, 0), (17, 8, 0), (17, 7, 0), (17, 6, 0), (17, 5, 0), (16, 5, 0)], [(1, 11, 0), (1, 12, 0), (2, 12, 0), (3, 12, 0), (4, 12, 0), (5, 12, 0), (6, 12, 0), (7, 12, 0), (8, 12, 0), (9, 12, 0), (10, 12, 0), (11, 12, 0), (12, 12, 0), (12, 11, 0)], [(1, 1, 0), (1, 1, 1), (1, 2, 1), (1, 3, 1), (1, 4, 1), (2, 4, 1), (2, 4, 0), (3, 4, 0), (3, 5, 0), (4, 5, 0)], [(6, 1, 0), (5, 1, 0), (4, 1, 0), (4, 2, 0), (3, 2, 0)], [(1, 9, 0), (1, 9, 1), (1, 9, 2), (1, 9, 3), (2, 9, 3), (3, 9, 3), (4, 9, 3), (4, 9, 2), (5, 9, 2), (6, 9, 2), (7, 9, 2), (8, 9, 2), (9, 9, 2), (9, 8, 2), (10, 8, 2), (11, 8, 2), (11, 8, 1), (11, 8, 0)], [(2, 10, 0), (2, 11, 0), (3, 11, 0), (4, 11, 0), (5, 11, 0), (5, 11, 1), (6, 11, 1), (7, 11, 1), (8, 11, 1), (9, 11, 1), (10, 11, 1), (10, 10, 1), (10, 9, 1), (11, 9, 1), (12, 9, 1), (12, 8, 1), (13, 8, 1), (14, 8, 1), (15, 8, 1), (15, 8, 0)], [(6, 8, 0), (6, 7, 0), (6, 7, 1), (6, 6, 1), (6, 6, 2), (6, 5, 2), (6, 4, 2), (6, 3, 2), (6, 2, 2), (6, 1, 2), (7, 1, 2), (7, 1, 1), (8, 1, 1), (8, 1, 0), (9, 1, 0), (10, 1, 0)], [(9, 10, 0), (8, 10, 0), (7, 10, 0), (7, 10, 1), (6, 10, 1), (5, 10, 1), (4, 10, 1), (3, 10, 1), (3, 9, 1), (3, 9, 2), (2, 9, 2), (2, 8, 2), (2, 8, 3), (2, 7, 3), (2, 6, 3), (2, 5, 3), (2, 4, 3), (2, 4, 2), (2, 3, 2), (2, 3, 1), (2, 2, 1), (2, 1, 1), (2, 1, 0), (1, 1, 0)], [(14, 2, 0), (14, 2, 1), (14, 1, 1), (14, 0, 1), (14, 0, 0), (13, 0, 0), (12, 0, 0), (11, 0, 0), (10, 0, 0), (10, 1, 0)], [(10, 1, 0), (10, 1, 1), (10, 2, 1), (10, 3, 1), (11, 3, 1), (11, 4, 1), (12, 4, 1), (12, 5, 1), (13, 5, 1), (13, 6, 1), (13, 6, 0), (13, 7, 0)], [(16, 7, 0), (16, 6, 0), (16, 5, 0)], [(10, 1, 0), (10, 2, 0), (9, 2, 0), (9, 2, 1), (9, 3, 1), (9, 4, 1), (9, 5, 1), (10, 5, 1), (10, 6, 1), (10, 7, 1), (10, 8, 1), (10, 8, 0), (11, 8, 0)], [(13, 7, 0), (12, 7, 0), (12, 6, 0), (12, 6, 1), (12, 6, 2), (12, 6, 3), (11, 6, 3), (10, 6, 3), (9, 6, 3), (8, 6, 3), (7, 6, 3), (6, 6, 3), (5, 6, 3), (5, 6, 2), (5, 5, 2), (5, 4, 2), (5, 3, 2), (5, 2, 2), (5, 1, 2), (5, 1, 1), (6, 1, 1), (6, 1, 0)], [(2, 8, 0), (2, 8, 1), (2, 9, 1), (2, 10, 1), (2, 11, 1), (2, 12, 1), (3, 12, 1), (4, 12, 1), (5, 12, 1), (6, 12, 1), (7, 12, 1), (8, 12, 1), (9, 12, 1), (10, 12, 1), (11, 12, 1), (11, 11, 1), (12, 11, 1), (12, 11, 0)], [(1, 1, 0), (0, 1, 0), (0, 1, 1), (0, 1, 2), (1, 1, 2), (1, 1, 3), (2, 1, 3), (3, 1, 3), (3, 2, 3), (3, 3, 3), (4, 3, 3), (5, 3, 3), (6, 3, 3), (7, 3, 3), (8, 3, 3), (8, 3, 2), (9, 3, 2), (10, 3, 2), (11, 3, 2), (11, 4, 2), (11, 5, 2), (11, 5, 1), (11, 5, 0)], [(12, 2, 0), (12, 3, 0)], [(15, 1, 0), (14, 1, 0), (13, 1, 0), (12, 1, 0), (11, 1, 0), (10, 1, 0)], [(15, 8, 0), (16, 8, 0), (16, 8, 1), (17, 8, 1), (17, 7, 1), (17, 6, 1), (17, 5, 1), (17, 4, 1), (17, 4, 0), (17, 3, 0), (17, 2, 0), (17, 1, 0), (16, 1, 0), (15, 1, 0)], [(6, 1, 0), (6, 2, 0), (5, 2, 0), (5, 3, 0), (4, 3, 0), (4, 4, 0), (4, 5, 0)], [(15, 8, 0), (14, 8, 0), (13, 8, 0), (13, 7, 0)], [(11, 5, 0), (10, 5, 0), (9, 5, 0), (9, 6, 0), (8, 6, 0), (8, 7, 0), (8, 7, 1), (8, 8, 1), (8, 9, 1), (9, 9, 1), (9, 10, 1), (9, 10, 0)], [(3, 2, 0), (3, 1, 0), (3, 1, 1), (3, 0, 1), (4, 0, 1), (5, 0, 1), (6, 0, 1), (7, 0, 1), (7, 0, 0), (8, 0, 0), (9, 0, 0), (9, 0, 1), (10, 0, 1), (11, 0, 1), (11, 1, 1), (12, 1, 1), (13, 1, 1), (13, 2, 1), (13, 2, 0), (14, 2, 0)], [(4, 5, 0), (4, 6, 0), (4, 6, 1), (4, 7, 1), (5, 7, 1), (5, 7, 2), (6, 7, 2), (7, 7, 2), (7, 6, 2), (8, 6, 2), (9, 6, 2), (10, 6, 2), (11, 6, 2), (11, 6, 1), (11, 7, 1), (11, 7, 0), (11, 8, 0)], [(9, 8, 0), (9, 7, 0), (10, 7, 0), (10, 6, 0), (11, 6, 0), (11, 5, 0)], [(1, 5, 0), (1, 5, 1), (1, 6, 1), (1, 6, 2), (1, 7, 2), (2, 7, 2), (3, 7, 2), (3, 8, 2), (4, 8, 2), (5, 8, 2), (6, 8, 2), (7, 8, 2), (8, 8, 2), (8, 7, 2), (9, 7, 2), (9, 7, 1), (9, 8, 1), (9, 8, 0)], [(6, 8, 0), (6, 9, 0), (6, 10, 0), (6, 11, 0), (7, 11, 0), (8, 11, 0), (9, 11, 0), (10, 11, 0), (11, 11, 0), (12, 11, 0)], [(13, 7, 0), (14, 7, 0), (14, 6, 0), (14, 5, 0), (14, 5, 1), (14, 5, 2), (14, 4, 2), (14, 4, 3), (13, 4, 3), (12, 4, 3), (11, 4, 3), (10, 4, 3), (9, 4, 3), (9, 3, 3), (9, 2, 3), (8, 2, 3), (7, 2, 3), (6, 2, 3), (5, 2, 3), (4, 2, 3), (4, 1, 3), (4, 0, 3), (3, 0, 3), (2, 0, 3), (1, 0, 3), (1, 0, 2), (1, 0, 1), (1, 0, 0), (1, 1, 0)], [(15, 1, 0), (15, 2, 0), (15, 3, 0), (15, 4, 0), (14, 4, 0), (13, 4, 0), (13, 5, 0), (12, 5, 0), (11, 5, 0)], [(1, 11, 0), (1, 11, 1), (1, 11, 2), (1, 11, 3), (1, 10, 3), (2, 10, 3), (3, 10, 3), (4, 10, 3), (5, 10, 3), (5, 9, 3), (5, 8, 3), (6, 8, 3), (7, 8, 3), (8, 8, 3), (9, 8, 3), (10, 8, 3), (11, 8, 3), (11, 7, 3), (12, 7, 3), (13, 7, 3), (13, 6, 3), (13, 5, 3), (12, 5, 3), (12, 5, 2), (12, 4, 2), (12, 3, 2), (12, 3, 1), (12, 3, 0)], [(16, 7, 0), (16, 7, 1), (16, 6, 1), (16, 5, 1), (16, 4, 1), (15, 4, 1), (14, 4, 1), (14, 3, 1), (14, 3, 2), (13, 3, 2), (13, 2, 2), (12, 2, 2), (12, 2, 1), (12, 2, 0)], [(2, 10, 0), (3, 10, 0), (3, 9, 0), (4, 9, 0), (5, 9, 0), (5, 9, 1), (6, 9, 1), (7, 9, 1), (7, 9, 0), (8, 9, 0), (9, 9, 0), (9, 8, 0)], [(11, 8, 0), (11, 9, 0), (10, 9, 0), (10, 10, 0), (9, 10, 0)], [(14, 2, 0), (14, 3, 0), (13, 3, 0), (13, 3, 1), (13, 4, 1), (13, 4, 2), (13, 5, 2), (13, 6, 2), (13, 7, 2), (13, 8, 2), (12, 8, 2), (12, 9, 2), (11, 9, 2), (10, 9, 2), (10, 10, 2), (9, 10, 2), (8, 10, 2), (7, 10, 2), (6, 10, 2), (5, 10, 2), (4, 10, 2), (3, 10, 2), (2, 10, 2), (1, 10, 2), (1, 10, 1), (1, 10, 0), (1, 9, 0)], [(1, 5, 0), (0, 5, 0), (0, 4, 0), (0, 4, 1), (0, 4, 2), (1, 4, 2), (1, 3, 2), (1, 2, 2), (1, 2, 3), (1, 2, 4), (2, 2, 4), (3, 2, 4), (4, 2, 4), (5, 2, 4), (5, 1, 4), (5, 1, 3), (6, 1, 3), (7, 1, 3), (8, 1, 3), (8, 1, 2), (9, 1, 2), (10, 1, 2), (11, 1, 2), (12, 1, 2), (13, 1, 2), (14, 1, 2), (15, 1, 2), (15, 1, 1), (15, 1, 0)], [(6, 8, 0), (5, 8, 0), (4, 8, 0), (4, 8, 1), (3, 8, 1), (3, 7, 1), (2, 7, 1), (1, 7, 1), (1, 8, 1), (1, 8, 0), (1, 9, 0)]]
    

    for i in range(0):
##        random.seed("piza")
        skips = 0
        netlist = Data.netlist_4
        netlist = Grid.sortDistance(netlist)
        connections_per_chip = netlist_checker.connectionsPerChip(netlist, chips)

        path_grid = Grid.createPathGrid()
        grid = Grid.createGrid()
        neighbour_grid = createNeighbourGrid()
        # assert False
        relay_list = [0 for i in netlist]
        relay_badness, max_path_length, max_itterations, max_random_moves, num_random_tries = 15, 60, 3000, 10, 3
        shortest_paths = findPaths(netlist, relay_badness, max_path_length, max_itterations, max_random_moves, num_random_tries)
        print shortest_paths
        print relay_list
        print skips


        aantal_paden_gelegd = len([path for path in shortest_paths.values() if len(path) > 0])
        totaal_paden = len(netlist)
        # safe output
        if aantal_paden_gelegd == totaal_paden:
            Controle.safe(netlist, shortest_paths, relay_list)
    layer = 0
    Visualization.runVisualization(shortest_paths.values(), layer)
        # Visualization.run3DVisualisation(shortest_paths.values(), "joris is cool")h
Exemplo n.º 39
0
 def show_all(self):
     Visualization.plot_data_pointsXX(self.part.windows, xlim=None, ylim=None, filename=False, dims=None)
Exemplo n.º 40
0
            for node in queue:
                if node[3] == successor[3] and successor[1] < node[1]:
                    queue.remove(node)
                    break

        for successor in successors:
            for node in closed_list:
                if len(closed_list) != 0:
                    if node[3] == successor[3] and successor[1] < node[1]:
                        closed_list.remove(node)
                        break

        for successor in successors:
            heappush(queue, successor)

        heappush(closed_list, current_point)
        # print queue, end

if __name__ == "__main__":
    netlist = data.sortDistance(netlist)
    # netlist = [netlist[i] for i in range(40, 50)]
    netlist = [netlist[40]]
    paths = []
    for net in netlist:
        start_end = calculateEndStart(chips[net[0]], chips[net[1]])
        path = Astar(start_end[0], start_end[1])
        paths.append(path[0])
    print path

    Visualization.runVisualization(paths, 0)
def parse_one_file(input_data):
    # looking for cached results
    cache_filename = input_data.infile + '.cache'
    
    ## BRANCH: parsed infile already cached
    if ( USE_PCAP_CACHE and os.path.isfile(cache_filename) ):
        print "Reading cache:", cache_filename
        pickle_input = open(cache_filename, 'rb')
        senders = pickle.load(pickle_input)
        pickle_input.close()
    
    ## BRANCH: no cache
    else:
        ## * parse pcap file *
        senders = Pcap_Parser.parse(input_data.infile, PACKETS)  # 100000
    
        ## caching parser results
        if ( USE_PCAP_CACHE ):
            pickle_output = open(cache_filename, 'wb')
            pickle.dump(senders, pickle_output, pickle.HIGHEST_PROTOCOL)
            pickle_output.close()



    ##
#    types = (   Storage_Classes.Sender.TYPE.AP, \
#                Storage_Classes.Sender.TYPE.STATION, \
#                Storage_Classes.Sender.TYPE.UNKNOWN)
    types = (   Storage_Classes.Sender.TYPE.AP, )
            
#    ## FIXME at the moment "s.show(addr)" is important for proper functionality
    for t in types:
        for addr in senders:
            s = senders[addr]
            if ( s.type == t ):
#                s.show(addr)
                s.set_addr(addr)


    
    ## Visualization
#        Visualization.show_biggest_ap(senders)

    ## Units
    sender = get_biggest_ap(senders)
#    sender = get_biggest_ap(senders)
    print
    print "Using:"
    sender.show()

    # window sizes
#    UNIT_WINDOW = 0.1
    UNIT_WINDOW = 1.0
    DATA_POINT_WINDOW = 6.0
#    DATA_POINT_WINDOW = 5.0

#    SKIP_FRONT = 20
    SKIP_FRONT = 0
    
    OVERLAPPING = True
    
    
    ## Data Grouping ##
    units = Data_Grouping.build_units(sender, input_data.annotation, UNIT_WINDOW, SKIP_FRONT)
    print "created units:", len(units), "(" + str(len(units) * units[0].length) + "s)"
    parts = Data_Grouping.separation(units)

    part = parts[0]    
    part.windows = Data_Grouping.windowing(part, DATA_POINT_WINDOW, OVERLAPPING)
    part.features = Features.calculate_features(part)

#    ## XXX
#    for x in units:
#        print x.show()

    ## XXX    
#    for x in part.features:
#        if ( not x.invalid ):
#            print "|||", x
    print "new features:", len(part.features)
    


    

    ## plot into file
    prepare_dir(input_data.group.plot_base_dir)
    filename = input_data.group.plot_base_dir + "/" + os.path.splitext(os.path.basename(input_data.infile))[0] + ".pdf"
    
    
    ## XXX do pickles in the plot dir, too
    if ( PICKLE_FEATURES ):
        pick_path = input_data.group.plot_base_dir + "/" + os.path.splitext(os.path.basename(input_data.infile))[0] + "_features.pickle"
        pickle_output = open(pick_path, 'wb')
        pickle.dump(part, pickle_output, pickle.HIGHEST_PROTOCOL)
        pickle_output.close()

    
##    xlim = [units[0].start, 200]
#    xlim = [20, 200]
#    ylim = [-75, -15]
#    print ">>>>>>> Plotting into:", filename
#    Visualization.plot_mean_and_variance_into_file(units, xlim, ylim, filename)
#    Visualization.plot_min_max_rssi(units, xlim, ylim, filename)


    ## plot data_points into file
#    xlim = [0, 350]
#    ylim = [-5, 10]
    xlim = None
    ylim = None
#    Visualization.plot_data_points(data_points, xlim, ylim, filename)
#    Visualization.plot_data_points(merged_points, xlim, ylim, filename, invalid_points = invalid_points)
    Visualization.plot_data_points(part.features, xlim, ylim, filename)
#    ylim = [0, 2]
#    Visualization.plot_data_points(level_points, xlim, ylim, filename)

    ## XXX visualize raw rssi data (interactive)
    ##   -- BE CAREFUL: this will not work in parallel processing mode
    Visualization.plot_raw_rssi(sender)
    
    print "-----------------------------------------------------------------------"
    
    
    header = None
#    header = \
#'''Activity\tmean\tvariance\tdistance\tdiff\tsign\tlevels
#discrete\tcontinuous\tcontinuous\tcontinuous\tcontinuous\tdiscrete\tcontinuous
#class\t\t\t\t\t
#'''

#    orange_data = Orange_Export.Data_Collection(merged_points, header)
    orange_data = Orange_Export.Data_Collection(part.features, None)

    ## return
#    return units
    return orange_data
Exemplo n.º 42
0
    netlist = Grid.sortDistance(netlist)
    relay_list = [0 for i in netlist]

    path_grid = Grid.createPathGrid()
    grid = Grid.createGrid()
    start_time = time.time()
    comp = layIntersectingPaths()
    intersecting_paths, paths_dict = comp[0], comp[1]
    paths = simulatedAnnealing(intersecting_paths, paths_dict, max_iteration=1800)
    end_time = time.time()
    total_length = calculateWireLenght(paths.values())

    print paths
    print "Total wire length = ", total_length, "Computing time = ", end_time - start_time, " seconds or ", (end_time - start_time)/60, "minutes"

    Visualization.runVisualization(paths.values(), 0)
    Visualization.run3DVisualisation(paths.values(), 0)

    smoothed_paths = []
    for path in paths.values():
        smoothed_paths.append(superSmoothPath(path))

    total_length = calculateWireLenght(smoothed_paths)

    print smoothed_paths
    print "Total wire length = ", total_length, "Computing time = ", end_time - start_time, " seconds or ", (end_time - start_time)/60, "minutes"

    Visualization.runVisualization(smoothed_paths, 0)
    Visualization.run3DVisualisation(smoothed_paths, 0)

Exemplo n.º 43
0
 def showButton(self):
     filename=self.getCurrentDataset()
     dataset,attr=arff.parseArff(filename)
     vis.visualizeLabels(dataset)
def simulate(ntrials, region, T, printonswap=False, showvis=True, newfig=False,
             teamdesc=None, printbrackets=True):
    """
    If region is "west" "midwest" "south" or "east" we'll run a bracket based 
    just on those teams.
    If it's "all" we'll run a full bracket.
    If it's a list of teams, we'll run a bracket based just on that list.

    So, one way you might want to do things is to simulate 10000 runs for each 
    of the four brackets,
    then run your final four explicitly, e.g.

    T = 1.5
    simulate(10000,'midwest',T)
    # record results
    simulate(10000,'south',T)
    # record results
    simulate(10000,'west',T)
    # record results
    simulate(10000,'east',T)
    # record results

    simulate(10000,['Louisville','Kansas','Wisconsin','Indiana'],T)
    """

    if type(region)  in (type([]), type(())):
        teams = region[:]
    else:
        teams = RAS.teams[region]
    b = Bracket(teams, T)
    energy = b.energy()
    ng = sum(b.games_in_rounds) # total number of games
    # Let's collect some statistics
    brackets = []
    for trial in xrange(ntrials):
        g = randint(0, ng) # choose a random game to swap
        #print "attempted swap for game",g#,"in round",round[g]
        #newbracket = deepcopy(b)
        newbracket = b.copy()
        newbracket.swap(g)
        newenergy = newbracket.energy()
        ediff = newenergy - energy
        if ediff <= 0:
            b = newbracket
            energy = newenergy
            if printonswap:
                print "LOWER"
                print b
        else:
            if random() < exp(-ediff/T):
                b = newbracket
                energy = newenergy
                if printonswap:
                    print "HIGHER"
                    print b
        brackets.append(b)


    lb, mcb, mcb_count, unique_brackets, lowest_sightings = \
        Stats.gather_uniquestats(brackets)
    if showvis:
        Visualization.showstats(brackets, unique_brackets, lowest_sightings, 
                                newfig=newfig, teamdesc=teamdesc)
    if printbrackets:
        print "Lowest energy bracket"
        print lb
        print "Most common bracket (%s)"%mcb_count
        print mcb
    return (lb,mcb,mcb_count)
Exemplo n.º 45
0
def cluster_map(request, data_set, classification1, classification2):
    errors = []
    warnings = []
    form = None
    valid = False
    hasDataSet = False
    clusterData = None
    dataSetNames = []
    selectedClassificationDisplay = ''
    selectedClass = ''

    minNodeSize = 99999
    maxNodeSize = 0
    minEdgeWeight = 99999
    maxEdgeWeight = 0
    minNodeSizeWithLabel = 20
    maxNNodes = 20
    topN = 10
    previousNYears = 20

    targetCPCColumnName = 'CPCs'
    outFileName = 'clusterMapInput'
    outFolderName = '../templates/visualization/'
    fileType = '.json'

    dataSetNames = []
    datasets = Datasets.objects.all()
    for dataset in datasets:
        dataSetNames.append(dataset.name)
    dataSetNames.insert(0, 'index')

    classificationNames = dbq.getClassificationList()
    classificationNames.insert(0, 'index')

    # Model setup view
    if (not (data_set == 'index' or classification1 == 'index'
             or classification2 == 'index'
             or classification1 == classification2)):
        # df = dbq.getDataSetPatents(data_set)
        # if(len(df.index)>1000):
        #     df = df.sample(n=500, replace=False, random_state=17)]
        df = pd.DataFrame()
        df = dbq.getDataSetPatentColumn(data_set, df, classification1)
        selectedClass1 = df[classification1].tolist()
        df = dbq.getDataSetPatentColumn(data_set, df, classification2)
        selectedClass2 = df[classification2].tolist()
        df = dbq.getDataSetPatentYears(data_set, df)
        years = df['Years']
        maxYear = max(years)
        minYear = maxYear - previousNYears + 1
        df = df[df.Years >= minYear]

        allCategories = []
        uniqueCategories1 = []
        uniqueCategories2 = []
        combinedCategories = []

        allClass1 = []
        allClass2 = []
        for cList1 in selectedClass1:
            if (cList1 == cList1 and cList1 != None):
                for c in cList1:
                    if (c != 'nan' and c != '' and c != 'NAN' and c != 'Nan'):
                        allClass1.append(c)
        for cList2 in selectedClass2:
            if (cList2 == cList2 and cList2 != None):
                for c in cList2:
                    if (c != 'nan' and c != '' and c != 'NAN' and c != 'Nan'):
                        allClass2.append(c)
        expandedDF1 = pd.DataFrame()
        expandedDF1[classification1] = allClass1
        expandedDF2 = pd.DataFrame()
        expandedDF2[classification2] = allClass2

        grouped = expandedDF1.groupby([classification1
                                       ]).size().reset_index(name='nPPA')
        topNClassification1 = grouped.nlargest(
            topN, 'nPPA')[classification1].tolist()
        grouped = expandedDF2.groupby([classification2
                                       ]).size().reset_index(name='nPPA')
        topNClassification2 = grouped.nlargest(
            topN, 'nPPA')[classification2].tolist()

        # for c2, c3, c4 in zip(categories2, categories3, categories4):
        for c1, c2 in zip(selectedClass1, selectedClass2):
            if (not c1):
                c1 = []
            if (not c2):
                c2 = []
            c1 = [c for c in c1 if c in topNClassification1]
            c2 = [c for c in c2 if c in topNClassification2]
            allCategories = allCategories + c1 + c2
            combinedCategories.append(c1 + c2)
            uniqueCategories1 = uniqueCategories1 + c1
            uniqueCategories2 = uniqueCategories2 + c2
        uniqueCategories1 = list(set(uniqueCategories1))
        uniqueCategories2 = list(set(uniqueCategories2))
        # expanded = pd.DataFrame()
        # expanded['Categories'] = allCategories
        # categorySizes = expanded.groupby(['Categories']).size().reset_index(name='nPPA')
        # categoryList = categorySizes['Categories'].tolist()
        # categorySizesList = categorySizes['nPPA'].tolist()

        selectedClass = combinedCategories
        # selectedClass = categoryByKeywords

        # wordList = []
        # f = open('../out/words.txt', 'r')
        # for line in f:
        #     wordList.append(line.rstrip())
        # f.close()
        # titleWords = pp.normalizeCorpus(df['Titles'].tolist(), wordList)

        allClass = []
        for c in selectedClass:
            if (c):
                allClass = allClass + list(filter(lambda a: a != '', c))
        expanded = pd.DataFrame()
        expanded['Class'] = allClass
        classSizes = expanded.groupby(['Class'
                                       ]).size().reset_index(name='nPPA')
        classList = classSizes['Class'].tolist()
        classSizesList = classSizes['nPPA'].tolist()

        grouped = expanded.groupby(
            ['Class']).size().reset_index(name='Number of P/PA')
        topNClass = grouped.nlargest(10, 'Number of P/PA')['Class'].tolist()

        # Cleaning of CPC
        relationships = selectedClass
        relationshipsEval = []

        if (maxNNodes > 0):
            topNNodes = v.getTopNNodes(relationships, maxNNodes)
            for rList in relationships:
                tempRList = []
                for node in list(filter(lambda a: a != '', rList)):
                    if (node in topNNodes):
                        tempRList.append(node)
                relationshipsEval.append(tempRList)
        else:
            for rList in relationships:
                relationshipsEval.append(list(filter(lambda a: a != '',
                                                     rList)))
        source = []
        target = []
        weight = []
        for r in relationshipsEval:
            pairs = combinations(r, 2)
            for p in pairs:
                if ((p[0] in uniqueCategories1 and p[1] in uniqueCategories1)
                        or
                    (p[0] in uniqueCategories2 and p[1] in uniqueCategories2)):
                    continue
                else:
                    source.append(p[0])
                    target.append(p[1])
                    weight.append(1)
                # source.append(p[0])
                # target.append(p[1])
                # weight.append(1)

        newDF = pd.DataFrame()
        newDF['source'] = source
        newDF['target'] = target
        newDF['weight'] = weight

        graphDF = newDF.groupby(['source', 'target']).sum().reset_index()
        maxEdgeWeight = graphDF['weight'].max()
        minEdgeWeight = graphDF['weight'].min()
        # graphDF.to_excel(outFolderName + 'edgelist.xlsx')
        G = nx.from_pandas_edgelist(graphDF, 'source', 'target', 'weight')
        G2 = nx.convert_node_labels_to_integers(G, label_attribute='name')

        # Determine node groups using Louvain modularity
        # communities = best_partition(G2, weight='size')
        d = nx.readwrite.json_graph.node_link_data(G2, {'name': 'index'})
        nodeNames = []
        nodeCommunities = []
        nodeSizes = []
        nodeTop10 = []
        for node in d['nodes']:
            name = node['name']
            # size = G2.degree[list(G.nodes()).index(node['name'])]
            size = classSizesList[classList.index(node['name'])]
            community = 2
            if (name in uniqueCategories1):
                community = 0
            if (name in uniqueCategories2):
                community = 1
            # community = communities[list(G.nodes()).index(node['name'])]
            node['size'] = size
            node['group'] = community
            nodeNames.append(name)
            nodeSizes.append(size)
            nodeCommunities.append(community)
            name = node['name']
            if (node['size'] < minNodeSize):
                minNodeSize = node['size']
            if (node['size'] > maxNodeSize):
                maxNodeSize = node['size']
        # minNodeSizeWithLabel = 0.2 * maxNodeSize
        # for node in d['nodes']:
        #     if(node['size'] < minNodeSizeWithLabel):
        #         node['name'] = None
        for node in d['nodes']:
            if (not node['name'] in topNClass):
                node['fontSize'] = 8
                node['opacity'] = 0.5
            else:
                node['fontSize'] = node['size']
                node['opacity'] = 1

        nodesDF = pd.DataFrame()
        nodesDF['CPC'] = nodeNames
        nodesDF['Size'] = nodeSizes
        nodesDF['Community'] = nodeCommunities

        del d["directed"]
        del d["multigraph"]
        del d["graph"]
        clusterData = d
        hasDataSet = True
        valid = True

    templateHTML = 'visualization/cluster_map.html'
    mainHTML = render_to_string(
        templateHTML, {
            'form': form,
            'valid': valid,
            'errors': errors,
            'warnings': warnings,
            'data_set': data_set,
            'classification1': classification1,
            'classification2': classification2,
            'classificationNames': classificationNames,
            'selectedClassificationDisplay': selectedClassificationDisplay,
            'hasDataSet': hasDataSet,
            'dataSetNames': dataSetNames,
            'minNodeSize': minNodeSize,
            'maxNodeSize': maxNodeSize,
            'maxEdgeWeight': maxEdgeWeight,
            'minEdgeWeight': minEdgeWeight,
            'clusterData': clusterData,
            'previousNYears': previousNYears,
        })
    return mainHTML
Exemplo n.º 46
0
    def view(self, first=0, last=None, step=1, object = None):
        """Show an animation of |object| using the positions in the
        trajectory at all time steps from |first| to |last| with an
        increment of |skip|. |object| defaults to the entire universe."""
	Visualization.viewTrajectory(self, first, last, step, object)
Exemplo n.º 47
0
def word_cluster_map(request, data_set, column):
    errors = []
    warnings = []
    form = None
    valid = False
    hasDataSet = False
    clusterData = None
    dataSetNames = []
    selectedClassificationDisplay = ''

    minNodeSize = 99999
    maxNodeSize = 0
    minEdgeWeight = 99999
    maxEdgeWeight = 0
    minNodeSizeWithLabel = 20
    maxNNodes = 30
    previousNYears = 20
    topN = 10

    dataSetNames = []
    datasets = Datasets.objects.all()
    for dataset in datasets:
        dataSetNames.append(dataset.name)
    dataSetNames.insert(0, 'index')

    columnNames = ['titles', 'abstracts', 'independent_claims']
    columnNames.insert(0, 'index')

    # Model setup view
    if (not (data_set == 'index' or column == 'index')):
        if (request.method == "POST"):
            maxNNodes = int(request.POST.get('target-n-nodes'))
            previousNYears = int(request.POST.get('target-n-years'))
        # if(len(df.index)>1000):
        #     df = df.sample(n=500, replace=False, random_state=17)]
        df = pd.DataFrame()
        df = dbq.getDataSetPatentTACs(data_set, df)
        df = dbq.getDataSetPatentYears(data_set, df)
        years = df['Years']
        maxYear = max(years)
        minYear = maxYear - previousNYears + 1
        df = df[df.Years >= minYear]

        wordList = []
        f = open('../out/words.txt', 'r')
        for line in f:
            wordList.append(line.rstrip())
        f.close()
        columnWords = []
        if (column == 'titles'):
            columnWords = pp.normalizeCorpus(df['Titles'].tolist(), wordList)
        elif (column == 'abstracts'):
            columnWords = pp.normalizeCorpus(df['Abstracts'].tolist(),
                                             wordList)
        elif (column == 'independent_claims'):
            columnWords = pp.normalizeCorpus(df['Independent Claims'].tolist(),
                                             wordList)
        selectedColumn = columnWords

        uniqueWords = []
        combinedWords = []

        allWords = []
        for wordList in selectedColumn:
            if (wordList == wordList and wordList != None):
                for word in wordList:
                    if (word != 'nan' and word != '' and word != 'NAN'
                            and word != 'Nan'):
                        allWords.append(word)
        expandedDF = pd.DataFrame()
        expandedDF[column] = allWords
        uniqueWords = list(set(allWords))

        wordSizes = expandedDF.groupby([column
                                        ]).size().reset_index(name='nPPA')
        topNWords = wordSizes.nlargest(topN, 'nPPA')[column].tolist()
        wordList = wordSizes[column].tolist()
        wordSizesList = wordSizes['nPPA'].tolist()

        # Cleaning of CPC
        relationships = selectedColumn
        relationshipsEval = []

        if (maxNNodes > 0):
            topNNodes = v.getTopNNodes(relationships, maxNNodes)
            for rList in relationships:
                tempRList = []
                for node in list(filter(lambda a: a != '', rList)):
                    if (node in topNNodes):
                        tempRList.append(node)
                relationshipsEval.append(tempRList)
        else:
            for rList in relationships:
                relationshipsEval.append(list(filter(lambda a: a != '',
                                                     rList)))
        source = []
        target = []
        weight = []
        for r in relationshipsEval:
            pairs = combinations(r, 2)
            for p in pairs:
                source.append(p[0])
                target.append(p[1])
                weight.append(1)

        newDF = pd.DataFrame()
        newDF['source'] = source
        newDF['target'] = target
        newDF['weight'] = weight

        graphDF = newDF.groupby(['source', 'target']).sum().reset_index()
        maxEdgeWeight = graphDF['weight'].max()
        minEdgeWeight = graphDF['weight'].min()
        # graphDF.to_excel(outFolderName + 'edgelist.xlsx')
        G = nx.from_pandas_edgelist(graphDF, 'source', 'target', 'weight')
        G2 = nx.convert_node_labels_to_integers(G, label_attribute='name')

        # Determine node groups using Louvain modularity
        communities = best_partition(G2, weight='size')
        d = nx.readwrite.json_graph.node_link_data(G2, {'name': 'index'})
        nodeNames = []
        nodeCommunities = []
        nodeSizes = []
        nodeTop10 = []
        for node in d['nodes']:
            name = node['name']
            # size = G2.degree[list(G.nodes()).index(node['name'])]
            size = wordSizesList[wordList.index(node['name'])]
            community = communities[list(G.nodes()).index(node['name'])]
            node['size'] = size
            node['group'] = community
            nodeNames.append(name)
            nodeSizes.append(size)
            nodeCommunities.append(community)
            name = node['name']
            if (node['size'] < minNodeSize):
                minNodeSize = node['size']
            if (node['size'] > maxNodeSize):
                maxNodeSize = node['size']
        # minNodeSizeWithLabel = 0.2 * maxNodeSize
        # for node in d['nodes']:
        #     if(node['size'] < minNodeSizeWithLabel):
        #         node['name'] = None
        for node in d['nodes']:
            if (not node['name'] in topNWords):
                node['fontSize'] = 8
                node['opacity'] = 0.5
            else:
                node['fontSize'] = node['size']
                node['opacity'] = 1

        nodesDF = pd.DataFrame()
        nodesDF['CPC'] = nodeNames
        nodesDF['Size'] = nodeSizes
        nodesDF['Community'] = nodeCommunities

        del d["directed"]
        del d["multigraph"]
        del d["graph"]
        clusterData = d
        hasDataSet = True
        valid = True

    templateHTML = 'visualization/word_cluster_map.html'
    mainHTML = render_to_string(
        templateHTML, {
            'form': form,
            'valid': valid,
            'errors': errors,
            'warnings': warnings,
            'data_set': data_set,
            'column': column,
            'columnNames': columnNames,
            'hasDataSet': hasDataSet,
            'dataSetNames': dataSetNames,
            'minNodeSize': minNodeSize,
            'maxNodeSize': maxNodeSize,
            'maxEdgeWeight': maxEdgeWeight,
            'minEdgeWeight': minEdgeWeight,
            'clusterData': clusterData,
            'maxNNodes': maxNNodes,
            'previousNYears': previousNYears,
        })
    return mainHTML
Exemplo n.º 48
0
            with open('action_history_backup.pkl', 'wb') as output:
                pickle.dump(action_history, output, pickle.HIGHEST_PROTOCOL)

            with open('state_history_backup.pkl', 'wb') as output:
                pickle.dump(state_history, output, pickle.HIGHEST_PROTOCOL)

            with open('mean_error_history_backup.pkl', 'wb') as output:
                pickle.dump(mean_error_history, output, pickle.HIGHEST_PROTOCOL)


    expert.print()

    # find out what are the ids that existed
    region_ids = sorted(list(zip(*mean_error_history[-1]))[0])

    Viz.plot_expert_tree(expert, region_ids)
    #Viz.plot_evolution(state_history, title='State vs Time', y_label='S(t)', fig_num=1, subplot_num=261)
    Viz.plot_evolution(action_history, title='Action vs Time', y_label='M(t)[1]', y_dim=1, fig_num=1, subplot_num=261)
    Viz.plot_evolution(action_history, title='Action vs Time', y_label='M(t)[0]', y_dim=0, fig_num=1, subplot_num=262)
    Viz.plot_model(expert, region_ids, x_idx=1, y_idx=0, fig_num=1, subplot_num=263)
    Viz.plot_model(expert, region_ids, x_idx=2, y_idx=0, fig_num=1, subplot_num=269)
    Viz.plot_regional_mean_errors(mean_error_history, region_ids, fig_num=1, subplot_num=234)
    #Viz.plot_model_3D(expert, region_ids, x_idx=(0, 1), y_idx=0, fig_num=1, subplot_num=122)
    Viz.plot_model_3D(expert, region_ids, x_idx=(1, 2), y_idx=0, fig_num=1, subplot_num=122, data_only=False)


    plt.ioff()
    plt.show()

Exemplo n.º 49
0
    def view(self, first=0, last=None, step=1, object = None):
	Visualization.viewTrajectory(self, first, last, step, object)
Exemplo n.º 50
0
#    netlist = [netlist[0], netlist[1], netlist[2], netlist[3], netlist[4], netlist[5]]
    netlist = [netlist[i] for i in range(35)]
#    netlist = [netlist[6]]
    netlist = [netlist[i] for i in range(11)]
    for net in netlist:
        path = []
        start, end = chips[net[0]], chips[net[1]]
        print "finding a path betweeen: ", chips[net[0]], chips[net[1]]
        original_value_start, original_value_end = isFree(start), isFree(end)
        while len(path) < 1:
            try:
                path, grid = findPossiblePath(start, end, grid)
                break
            except PathLengthError:
                break
                setOccupation(start, original_value_start)
                setOccupation(end, original_value_end)
                print "Occupation is changed"
                print 'PathLengthError'

        shortest_paths.append(path)


    print "The number of complete paths should be %i, the actual number of complete paths is %i " % (len(netlist), len(shortest_paths))
 #   print "The total wire length is %i and there are %i intersections of which there are %i on the chips" % (
  #      calculateWireLenght(shortest_paths),
   #     checkIntersections(shortest_paths), doubleStartEndPoints(netlist))
    print shortest_paths
    layer = 3
    Visualization.runVisualization(shortest_paths, layer)