def chains(im, df):
    chains = pyramid.build_chains(df) 
    chains_f = []
    for c in chains:
        if len(c) > 2:
            chains_f.append(c)
            
    for i in range(len(chains_f)):
        output.plot_image(im)  
        for segment in chains_f[i]:        
            plot_line(xy, segment['a'], segment['b'], 'r')
    
    avg_slopes = []
    avg_intercepts = []
    for c in chains:
        slopes = [segment['slope'] for segment in c]
        intercepts = [segment['intercept'] for segment in c]
        avg_slopes.append(average(slopes))
        avg_intercepts.append(average(intercepts))
        
    for i in range(len(chains)):
        for j in range(len(chains)):
            slope_ratio = avg_slopes[i] / avg_slopes[j]
            intercept_ratio = avg_intercepts[i] / avg_intercepts[j]
            if 0.5 < slope_ratio < 1.5 and 0.5 < intercept_ratio < 1.5:
                print i, j, slope_ratio, intercept_ratio
Exemplo n.º 2
0
def get_green_object_coordinates(data):
    #rate = rospy.Rate(10)
    
    bridge=CvBridge()
    cv_image=bridge.imgmsg_to_cv2(data, "bgr8")

    image = cv_image.copy()
    cv_image = cv2.cvtColor(cv_image,cv2.COLOR_RGB2BGR)
    imghsv = cv2.cvtColor(cv_image,cv2.COLOR_BGR2HSV)
    lower = np.array([32, 89, 21])
    upper = np.array([69, 255, 255])
    mask = cv2.inRange(imghsv, lower, upper)  
    result = cv2.bitwise_and(cv_image,cv_image, mask = mask)
    result = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)


    # Find contours and extract the bounding rectangle coordintes
    # then find moments to obtain the centroid
    cnts, hierarchy = cv2.findContours(result, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    #cnts = imutils.grab_contours(cnts)
    #try:
    #    cnts = cnts[0] if len(cnts) == 2 else cnts[1]
    #except IndexError:
    #    print("Camera goruntusunde probe yok.")
    coordinate_x = []
    coordinate_y = []
    for c in cnts:
        # compute the center of the contour
        s = str(c[0])
        s = s[2:-2]
        temp = s.split()
        coordinate_x.append(int(temp[0]))
        coordinate_y.append(int(temp[1]))

        # draw the contour and center of the shape on the image
        cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
        # show the image
        cv2.imshow("Image", image)
        cv2.waitKey(1)
    print("[center_publisher]", end="     ")
    try:
        if int(average(coordinate_y)) > 400:
            center = [int(average(coordinate_x)), int(average(coordinate_y))]
            print("Green object coordinates : ",end="")
        else:
            center = [0,0]
            print("No green object", end=" ")
    except ValueError:
        center = [0,0]
        print("No green object", end=" ")

    p = str(center[0])+" "+str(center[1])
    print(p)
    pub.publish(p)
    if center[0] < 700 and center[0] < 580 and center[1] > 500 and center[1] < 550:
        print("\n\n\n\n\nhhhh\n\n\n\n")
        rospy.sleep(5)
Exemplo n.º 3
0
    def test_all_sudokus(self):
        total = 1000
        sudokus = np.zeros((total, 81), np.int32)
        solutions = np.zeros((total, 81), np.int32)
        for i, line in enumerate(open('data/sudoku.csv', 'r').read().splitlines()[1:]):
            if i >= total:
                break

            quiz, solution = line.split(",")
            for j, q_s in enumerate(zip(quiz, solution)):
                q, s = q_s
                sudokus[i, j] = q
                solutions[i, j] = s

        sudokus = sudokus.reshape((-1, 9, 9))
        solutions = solutions.reshape((-1, 9, 9))

        times = []

        for i in range(0, len(sudokus)):
            start_time = time.process_time()
            solution = sudoku_solver(sudokus[i])
            end_time = time.process_time()
            times.append(end_time - start_time)
            np.testing.assert_array_equal(solution, solutions[i])

        avg = str(round(average(times) * 1000, 2))
        total_time = str(round(sum(times), 2))
        
        print('-----')
        print('Done!')
        print(f' - Solved: {total} sudokus')
        print(f' - Average time: {avg}ms')
        print(f' - Total time: {total_time}s')
Exemplo n.º 4
0
 def __sum_relative_values_per_year(self, relative_values_per_year, n):
     total_counts = []
     if self.language == model.Language.GERMAN:
         total_counts = config.TOTAL_NGRAM_COUNTS_GERMAN
     elif self.language == model.Language.ENGLISH:
         total_counts = config.TOTAL_NGRAM_COUNTS_ENGLISH
     return int(average(relative_values_per_year) / (1 / total_counts[n]))
Exemplo n.º 5
0
def interpolate_grid(grid_z, fill_value):
    def Z(X, Y, params):
        a, b, c, d = params
        return -(a*X + b*Y + d)/c

    grid_c = np.zeros(256)
    grid_s = np.zeros(256)
    for j in range(16):
        for i in range(16):
            if len(grid_z[j][i]) > 10:
                x = np.array(grid_z[j][i])[:, 0]
                y = np.array(grid_z[j][i])[:, 1]
                z = np.array(grid_z[j][i])[:, 2]

                A = np.c_[x, y, np.ones(x.shape)]
                C, _, _, _ = np.linalg.lstsq(A, z, rcond=None)
                vert_params = C[0], C[1], -1., C[2]

                a = Z(rpm_bins[i], map_bins[j], vert_params)
                b = average(z)
                c = abs(a-b)
                d = (a + b*c) / (c+1)
                if abs(a-d) > 1:
                    print(rpm_bins[i], map_bins[j], a, '->', d)
                a = round(d, ndigits)
                grid_z[j][i] = a
                grid_c[i + j * 16] = a
                grid_s[i + j * 16] += len(x)
            else:
                grid_z[j][i] = 0
                grid_c[i + j * 16] = fill_value

    return grid_z, grid_c, grid_s
def get_average_cross_validation_accuracy(args):
	try:
		conv_layer_count, dense_layer_count, compiler_optimizer, active_fun = args
		print("Testing parameters:", args)
		csv_x_data, csv_y_data = parse_csv_data("train.csv")
		feature = []
		if(conv_layer_count == 3):
			feature = [Conv2D(4, kernel_size=3, activation=active_fun, input_shape=(600,600,1)), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Flatten(),]
		elif(conv_layer_count == 4):
			feature = [Conv2D(4, kernel_size=3, activation=active_fun, input_shape=(600,600,1)), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Flatten(),]
		elif(conv_layer_count == 5):
			feature = [Conv2D(4, kernel_size=3, activation=active_fun, input_shape=(600,600,1)), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Flatten(),]
		elif(conv_layer_count == 6):
			feature = [Conv2D(4, kernel_size=3, activation=active_fun, input_shape=(600,600,1)), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Flatten(),]
		elif(conv_layer_count == 7):
			feature = [Conv2D(4, kernel_size=3, activation=active_fun, input_shape=(600,600,1)), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Conv2D(4, kernel_size=3, activation=active_fun), MaxPooling2D(pool_size=2), Flatten(),]
		classifier = []
		if dense_layer_count == 2:
			classifier = [Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(1, activation='sigmoid'),]
		elif dense_layer_count == 3:
			classifier = [Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(1, activation='sigmoid'),]
		elif dense_layer_count == 4:
			classifier = [Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(1, activation='sigmoid'),]
		elif dense_layer_count == 5:
			classifier = [Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(1, activation='sigmoid'),]
		elif dense_layer_count == 6:
			classifier = [Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(1, activation='sigmoid'),]
		elif dense_layer_count == 6:
			classifier = [Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(1, activation='sigmoid'),]
		elif dense_layer_count == 7:
			classifier = [Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(8, activation=active_fun), Dropout(0.1), Dense(1, activation='sigmoid'),]
		image_filenames = csv_x_data[:,0]
		accuracy_vals = []
		folds = StratifiedKFold(n_splits=5, random_state=None, shuffle=False)
		for train_index, test_index in folds.split(image_filenames, csv_y_data):
			raw_x_train_data, raw_x_test_data = image_filenames[train_index], image_filenames[test_index]
			x_train_data = format_images("resized_train", raw_x_train_data)
			x_train_data = x_train_data.reshape(200, 600, 600, 1)
			x_test_data = format_images("resized_train", raw_x_test_data)
			x_test_data = x_test_data.reshape(50, 600, 600, 1)
			y_train_data, y_test_data = csv_y_data[train_index], csv_y_data[test_index]
			model = Sequential(feature+classifier)
			model.compile(optimizer=compiler_optimizer, loss='binary_crossentropy',metrics=['accuracy'],)
			model.fit(x_train_data, y_train_data, epochs=10, batch_size=5, verbose=0)
			#model.fit(x_train_data, to_categorical(y_train_data), epochs=10, batch_size=5, verbose=0)
			y_expected = model.predict(x_test_data)
			#accuracy_vals.append(accuracy_percent(y_expected, to_categorical(y_test_data)))
			accuracy_vals.append(accuracy_percent(y_expected, y_test_data))
		print("Accuracy vals: ", accuracy_vals)
		correctness = average(accuracy_vals)
		print("Average percent correct:", correctness)
		global best_hyper_parameter_correctness
		global best_hyper_parameter_tuple
		if correctness > best_hyper_parameter_correctness:
			best_hyper_parameter_tuple = (conv_layer_count, dense_layer_count, compiler_optimizer, active_fun)
			best_hyper_parameter_correctness = correctness
		return 1 - correctness
	except:
		return 1
def createHistogram(list1,list2=None,_title='histogram: % correcteness for required data set',firstTitle='random_reordering',secondTitle='spins_first',thirdTitle='moves_first',fourthTitle='interleve_reordering', fifthTitle = 'no_reordering'):
    n, bins, patches = hist([list1,list2],bins=20,
    color=['crimson', 'orange', ], label=[firstTitle + " (" + str(average(list1))[0:5] + ")", secondTitle + " (" + str(average(list2))[0:5] + ")"])
    legend()
    xlabel('%correct')
    ylabel('number of results')
    title(_title)
    show()
Exemplo n.º 8
0
 def period_guess(self, y_s, t_0):
     extrema = []
     spacings = [log(extrema[i+1] - t_0) - log(extrema[i] - t_0)
                 for i in range(1, len(extrema))]
     omegas = spacings / 2 * 22 / 7
     omega_guess = average(omegas)
     omega_std = std(omegas)
     return omega_guess, omega_std
Exemplo n.º 9
0
 def runMultipleEpisodes(self, episodes: int) -> float:
     rewards = []
     for episode in range(episodes):
         reward = self.runSingleEpisode()
         rewards.append(reward)
     averageEpisodeReward = average(rewards)
     print("Average episode reward: " + str(averageEpisodeReward))
     self.plotHistogram(rewards)
     return averageEpisodeReward
def _tile_tile_distance(self, tile1, tile2):
    """
    we define distance between two tiles as the average
     Euclidian distance between the pixels of the tiles in RGB(0..255, 0..255, 0..255)
    :param tile1:
    :param tile2:
    :return: distance in (0 ... 1)
    math.sqrt(255**2 + 255**2 + 255**2) = 441.67
    """
    return average(norm(tile1.astype(float) - tile2.astype(float), axis=2)) / math.sqrt(255.0**2 + 255.0**2 + 255.0**2)
Exemplo n.º 11
0
def trimmers(samps):
    avg = average(samps)
    nodc = np.array(samps) - avg
    rect = np.absolute(nodc)
    window = 20
    medi = pd.Series(rect).rolling(window).median()
    level = np.max(medi)/5
    first = np.argmax(medi>level)
    last = first + np.argmax(medi[first:]<level)
    return first, last
Exemplo n.º 12
0
def afisare():
    #Functie scrisa de @Mihalea Andreas (github.com/FreemanLX)
    #Aici afisam minimul de mutari, maximul de mutari, media mutariilor, mediana mutariilor
    #Numarul total de mutari folosit de jucator si de calculator

                print("Minimul de mutari: " + str(min(mutari_arr)))
                print("Maximul de mutari: " + str(max(mutari_arr)))
                print("Media mutariilor: " + str(average(mutari_arr)))
                print("Mediana mutariilor: " + str(statistics.median(mutari_arr)))
                t_dupa = int(round(time.time() * 1000)) 
                print("Timpul efectuat de joc este: "+str(t_dupa-t_inainte)+" milisecunde.")  
                print("Numarul total de mutari folosit de jucator este " + str(count_mutari_jucator))
                print("Numarul total de mutari folosit de calculator este: " + str(sum(mutari_arr)))  
Exemplo n.º 13
0
 def process(self, phonems: PhonemList):
     for phonem in phonems:
         if phonem.name in FrenchPhonems.VOWELS and random.randint(1,
                                                                   4) == 1:
             phonem.duration *= 8
             if phonem.pitch_modifiers:
                 orgnl_pitch_avg = average(
                     [pitch for pos, pitch in phonem.pitch_modifiers])
             else:
                 orgnl_pitch_avg = 150
             phonem.set_from_pitches_list(
                 [orgnl_pitch_avg + ((-1)**i * 40) for i in range(4)])
     return phonems
Exemplo n.º 14
0
    def __init__(self, alpha, dataset):

        self.alpha = alpha
        self.searched = {}

        if dataset == '1':
            with open('datasets\Cranfield\CRAN.ALL.json') as data:
                self.dataset = json.load(data)

            with open('datasets\Cranfield\CRAN.QRY.json') as data:
                self.querys = json.load(data)

            with open('datasets\Cranfield\CRAN.REL.json') as data:
                self.rel = json.load(data)
        elif dataset == '2':
            with open('datasets\Med\MED.ALL.json') as data:
                self.dataset = json.load(data)

            with open('datasets\Med\MED.QRY.json') as data:
                self.querys = json.load(data)

            with open('datasets\Med\MED.REL.json') as data:
                self.rel = json.load(data)
        else:
            raise Exception

        self.data = {}
        self.relevant_docs = int(
            average([len(queries.values()) for queries in self.rel.values()]))

        for doc in self.dataset.values():
            self.data[doc['id']] = {
                'id':
                doc['id'],
                'title':
                word_tokenize(str(self.__preprocess(doc['title'])))
                if 'title' in doc.keys() else [],
                'text':
                word_tokenize(str(self.__preprocess(doc['abstract'])))
                if 'abstract' in doc.keys() else []
            }

        self.N = len(self.data)
        self.__df()
        self.__tf_idf()

        for query in self.querys.values():
            self.search(query['text'], query_id=query['id'])
Exemplo n.º 15
0
def prep_experiments(qc_list: List[QuantumCircuit],
                     backend: IBMQBackend,
                     physical_dist_list: List[int],
                     save_path,
                     output=False):
    """prepare experiments multiple qcs varing hardware usage"""

    # prepare pandas dataframe
    columns = [
        "Backend", "Physical distance", "Hardware Usage (%)",
        "Total Circuit Duration Time", "Quantum Circuits", "Scheduled Pulse"
    ]
    df = pd.DataFrame(columns=columns)

    # backend info
    num_hw_qubit = backend.configuration().num_qubits

    for physical_dist in physical_dist_list:
        transpiled, num_usage = dynamic_multiqc_compose(
            queued_qc=qc_list,
            backend=backend,
            routing_method="sabre",
            scheduling_method="alap",
            num_hw_dist=physical_dist,
            return_num_usage=True,
        )

        scheduled = [schedule(_tqc, backend=backend) for _tqc in transpiled]
        usage = "{:.2%}".format(
            average([_usage / num_hw_qubit for _usage in num_usage[0:-1]]))
        tdt = sum([_sched._duration for _sched in scheduled])
        df = df.append(
            {
                "Backend": backend.name,
                "Physical distance": physical_dist,
                "Hardware Usage (%)": usage,
                "Total Circuit Duration Time": tdt,
                "Quantum Circuits": transpiled,
                "Scheduled Pulse": scheduled,
            },
            ignore_index=True)

    # save the DataFrame as pickle file
    pickle_dump(obj=df, path=save_path)

    if output:
        return df
Exemplo n.º 16
0
def hist_analysis(im):
    blurred_images = []
    plt.figure()
    for i in range(1, 100, 2):
        im_blurred = cv2.medianBlur(im, i)
        blurred_images.append(im_blurred)
        
        hist_res = output.plot_image_histogram(im_blurred)
        n = hist_res[0]
        max_n = max(n)
        w, h = images.get_image_size(im_blurred)
        total_pixels = w * h
        max_ratio = max_n / float(total_pixels)
        
        avg = average(n)
        
        print i, max_ratio, avg/total_pixels
def custom_score(labels, predictions):
    '''
    Calculate the score for the predictions, based on the labels passed to the
    function, using a combination of accuracy, recall and precision, in an
    attempt to get a model with a good accuracy and good enough precision and
    recall values.

    Args:
        labels : ndarray
            Array with the labels for each data point in the dataset.
        predictions : ndarray
            Array with the predictions for each data point in the dataset.

    Returns:
        total_score : double
            The score assigned to the model, given the predictions.
    '''
    accuracy = accuracy_score(labels, predictions)
    precision = precision_score(labels, predictions)
    recall = recall_score(labels, predictions)
    total_score = average([accuracy, precision, recall])

    return total_score
Exemplo n.º 18
0
 def apply_alpha(self):
     count_a = 0
     count_r = 0
     corrected_target = []
     alpha_lst = self.alpha_num_set
     target = self.df[self.target_header]
     temp_lst = self.df[self.temp_header]
     self.ref_temp = average(temp_lst).round(5)
     # print('target: ', target)
     while count_a <= len(alpha_lst) - 1:
         corrected_target.append([])
         while count_r <= len(target) - 1:
             # print('134:',count_a,' target=', target[count_r],', alpha_lst=', alpha_lst[count_a],', temp=', temp_lst[count_r],', ref_temp=', ref_temp)
             correction = (target[count_r] /
                           ((alpha_lst[count_a] *
                             (temp_lst[count_r] - self.ref_temp)) + 1)
                           ) + self.alpha_offset  # apply alpha number
             corrected_target[count_a].append(correction)
             count_r += 1  # inc for loop
         self.df[alpha_lst[count_a]] = corrected_target[count_a]
         # print('168: ', count_a)
         count_r = 0  # reset count for while loop
         count_a += 1
     self.corrected_target = corrected_target
Exemplo n.º 19
0
        d = d.split(",")
        start_time.append(int(d[0]))
        end_time.append(int(d[1]))
        interval.append(int(d[2]))
        statusCode.append(int(d[3]))
    info = data[-1]
    info_cqs = data[-2]

start_time = np.array(start_time)
end_time = np.array(end_time)
interval = np.array(interval)
statusCode = np.array(statusCode)

maximum1 = np.max(interval)
minimum1 = np.min(interval)
legend1 = '(ms): min={} max={} avr={}'.format(minimum1, maximum1,
                                              int(average(interval)))

plt.ylabel("time (ms)")
plt.xlabel("")
plt.title(info + '\n' + info_cqs + legend1)

# plt.plot(interval, color="orange")
plt.legend([legend1])
plt.grid(color="grey", linewidth=1, axis="x", alpha=0.1)

colormap = np.array(['r', 'g'])
plt.scatter(start_time, interval, 3, c=colormap[statusCode])

plt.show()
Exemplo n.º 20
0
def predicting(AwayTeam, HomeTeam, new_data_file):
    if not (path.exists("home_data.csv") and path.exists("away_data.csv")
            and path.exists("home_scores.csv") and path.exists("away_scores.csv")):
        buildData()

    reg_h = []
    reg_a = []
    nn_h = []
    nn_a = []

    teamData = pd.read_csv(new_data_file)
    HomeTeam_data = teamData.loc[teamData['team_name']
                                 == HomeTeam.strip()].values[0]
    AwayTeam_data = teamData.loc[teamData['team_name']
                                 == AwayTeam.strip()].values[0]

    HomeTeam_win = int(HomeTeam_data[-21])
    HomeTeam_proj_win = int(HomeTeam_data[-19])
    HomeTeam_data_value = list(HomeTeam_data)
    HomeTeam_data_value.pop()
    HomeTeam_data_value.pop(0)
    HomeTeam_data_value.pop(0)

    AwayTeam_win = int(AwayTeam_data[-21])
    AwayTeam_proj_win = int(AwayTeam_data[-19])
    AwayTeam_data_value = list(AwayTeam_data)
    AwayTeam_data_value.pop()
    AwayTeam_data_value.pop(0)
    AwayTeam_data_value.pop(0)

    stat_home = []
    stat_home.extend(HomeTeam_data_value)
    stat_home.extend(AwayTeam_data_value)
    stat_away = []
    stat_away.extend(AwayTeam_data_value)
    stat_away.extend(HomeTeam_data_value)
    stat_home = np.array(stat_home).reshape((1, len(stat_home)))
    stat_away = np.array(stat_away).reshape((1, len(stat_away)))

    train_percentage = 0.7
    acc1 = []
    acc2 = []
    print("Running Models", end='')
    for i in range(3):
        regression, model, acc1_, acc2_ = createModel(train_percentage)

        home_score_r = regression.predict(stat_home)
        away_score_r = regression.predict(stat_away)
        home_score_n = model.predict(stat_home)
        away_score_n = model.predict(stat_away)
        reg_h.append(home_score_r[0])
        reg_a.append(away_score_r[0])
        nn_h.append(home_score_n[0][0])
        nn_a.append(away_score_n[0][0])
        acc1.append(acc1_)
        acc2.append(acc2_)
        print('.', end='')

    print('\n')
    avg_acc1 = average(acc1)
    avg_acc2 = average(acc2)
    print("With Train Test Split = {:d}:{:d}".format(
        int(100*train_percentage), int(100*(1-train_percentage))))
    print(
        "Average Ridge Regression accuracy on testing sets: {:.2f}%".format(avg_acc1*100))
    print("Average NN accuracy on testing sets: {:.2f}%".format(avg_acc2*100))

    home_score_r = average(reg_h)
    away_score_r = average(reg_a)
    if home_score_r > away_score_r:
        print("With the {} being the home team, regression predicts that they beat the {} by {:.2f} pts".format(
            HomeTeam, AwayTeam, round(home_score_r-away_score_r, 2)))
    else:
        print("With the {} being the home team, regression predicts that they lose to the {} by {:.2f} pts".format(
            HomeTeam, AwayTeam, round(away_score_r-home_score_r, 2)))

    home_score_n = average(nn_h)
    away_score_n = average(nn_a)
    if home_score_n > away_score_n:
        print("With the {} being the home team, NN predicts that they beat the {} by {:.2f} pts".format(
            HomeTeam, AwayTeam, round(home_score_n-away_score_n, 2)))
    else:
        print("With the {} being the home team, NN predicts that they lose to the {} by {:.2f} pts".format(
            HomeTeam, AwayTeam, round(away_score_n-home_score_n, 2)))

    if home_score_r > away_score_r and home_score_n > away_score_n:
        print("The average win margin for {} is {:.2f} pts".format(HomeTeam,
                                                                   avg(home_score_r-away_score_r, home_score_n-away_score_n)))

    if home_score_r < away_score_r and home_score_n < away_score_n:
        print("The average win margin for {} is {:.2f} pts".format(AwayTeam,
                                                                   avg(away_score_r-home_score_r, away_score_n-home_score_n)))

    if HomeTeam_win > HomeTeam_proj_win:
        print("The {} is exceeding expectations by {} wins".format(
            HomeTeam, HomeTeam_win - HomeTeam_proj_win))
    elif HomeTeam_proj_win > HomeTeam_win:
        print("The {} is short of the expectations by {} wins".format(
            HomeTeam, -HomeTeam_win + HomeTeam_proj_win))

    if AwayTeam_win > AwayTeam_proj_win:
        print("The {} is exceeding expectations by {} wins".format(
            AwayTeam, AwayTeam_win - AwayTeam_proj_win))
    elif AwayTeam_proj_win > AwayTeam_win:
        print("The {} is short of the expectations by {} wins".format(
            AwayTeam, -AwayTeam_win + AwayTeam_proj_win))
Exemplo n.º 21
0
 def _followxSingleDirection(  self, 
                               x, 
                               direction = Direction.FORWARD,
                               forward_curve = None,
                               last_eigenvector = None, 
                               weights = 1.):
   '''Generates a partial lpc curve dictionary from the start point, x.
   Arguments
   ---------
   x : 1-dim, length m, numpy.array of floats, start point for the algorithm when m is dimension of feature space
   
   direction :  bool, proceeds in Direction.FORWARD or Direction.BACKWARD from this point (just sets sign for first eigenvalue) 
   
   forward_curve : dictionary as returned by this function, is used to detect crossing of the curve under construction with a
       previously constructed curve
       
   last_eigenvector : 1-dim, length m, numpy.array of floats, a unit vector that defines the initial direction, relative to
       which the first eigenvector is biased and initial cos_neu_neu is calculated  
       
   weights : 1-dim, length n numpy.array of observation weights (can also be used to exclude
       individual observations from the computation by setting their weight to zero.),
       where n is the number of feature points 
   '''
   x0 = copy(x)
   N = self.Xi.shape[0]
   d = self.Xi.shape[1]
   it = self._lpcParameters['it']
   h = array(self._lpcParameters['h'])
   t0 = self._lpcParameters['t0']
   rho0 = self._lpcParameters['rho0']
   
   save_xd = empty((it,d))
   eigen_vecd = empty((it,d))
   c0 = ones(it)
   cos_alt_neu = ones(it)
   cos_neu_neu = ones(it)    
   lamb = empty(it) #NOTE this is named 'lambda' in the original R code
   rho = zeros(it)
   high_rho_points = empty((0,d))    
   count_points = 0
   
   for i in range(it):
     kernel_weights = self._kernd(self.Xi, x0, c0[i]*h) * weights
     mu_x = average(self.Xi, axis = 0, weights = kernel_weights)
     sum_weights = sum(kernel_weights)
     mean_sub = self.Xi - mu_x 
     cov_x = dot( dot(transpose(mean_sub), numpy.diag(kernel_weights)), mean_sub) / sum_weights 
     #assert (abs(cov_x.transpose() - cov_x)/abs(cov_x.transpose() + cov_x) < 1e-6).all(), 'Covariance matrix not symmetric, \n cov_x = {0}, mean_sub = {1}'.format(cov_x, mean_sub)
     save_xd[i] = mu_x #save first point of the branch
     count_points += 1
     
     #calculate path length
     if i==0:
       lamb[0] = 0
     else:
       lamb[i] = lamb[i-1] + sqrt(sum((mu_x - save_xd[i-1])**2))
     
     #calculate eigenvalues/vectors
     #(sorted_eigen_cov is a list of tuples containing eigenvalue and associated eigenvector, sorted descending by eigenvalue)
     eigen_cov = eigh(cov_x)
     sorted_eigen_cov = zip(eigen_cov[0],map(ravel,vsplit(eigen_cov[1].transpose(),len(eigen_cov[1]))))
     sorted_eigen_cov.sort(key = lambda elt: elt[0], reverse = True)   
     eigen_norm = sqrt(sum(sorted_eigen_cov[0][1]**2))
     eigen_vecd[i] = direction * sorted_eigen_cov[0][1] / eigen_norm  #Unit eigenvector corresponding to largest eigenvalue
     
     #rho parameters
     rho[i] = sorted_eigen_cov[1][0] / sorted_eigen_cov[0][0] #Ratio of two largest eigenvalues
     if i != 0 and rho[i] > rho0 and rho[i-1] <= rho0:
       high_rho_points = vstack((high_rho_points, x0))
     
     #angle between successive eigenvectors
     if i==0 and last_eigenvector is not None:
       cos_alt_neu[i] = direction * dot(last_eigenvector, eigen_vecd[i])
     if i > 0:
       cos_alt_neu[i] = dot(eigen_vecd[i], eigen_vecd[i-1])
     
     #signum flipping
     if cos_alt_neu[i] < 0:
       eigen_vecd[i] = -eigen_vecd[i]
       cos_neu_neu[i] = -cos_alt_neu[i]
     else:
       cos_neu_neu[i] = cos_alt_neu[i]
    
     #angle penalization
     pen = self._lpcParameters['pen']
     if pen > 0:
       if i == 0 and last_eigenvector is not None:
         a = abs(cos_alt_neu[i])**pen
         eigen_vecd[i] = a * eigen_vecd[i] + (1-a) * last_eigenvector
       if i > 0:
         a = abs(cos_alt_neu[i])**pen
         eigen_vecd[i] = a * eigen_vecd[i] + (1-a) * eigen_vecd[i-1]
             
     #check curve termination criteria
     if i not in (0, it-1):
       #crossing
       cross = self._lpcParameters['cross']
       if forward_curve is None:
         full_curve_points = save_xd[0:i+1]
       else:
         full_curve_points = vstack((forward_curve['save_xd'],save_xd[0:i+1])) #inefficient, initialize then append? 
       if not cross:
         prox = where(ravel(cdist(full_curve_points,[mu_x])) <= mean(h))[0]
         if len(prox) != max(prox) - min(prox) + 1:
           break
         
       #convergence
       convergence_at = self._lpcParameters['convergence_at']
       conv_ratio = abs(lamb[i] - lamb[i-1]) / (2 * (lamb[i] + lamb[i-1]))
       if conv_ratio  < convergence_at:
         break
       
       #boundary
       boundary = self._lpcParameters['boundary']
       if conv_ratio < boundary:
         c0[i+1] = 0.995 * c0[i]
       else:
         c0[i+1] = min(1.01*c0[i], 1)
     
     #step along in direction eigen_vecd[i]
     x0 = mu_x + t0 * eigen_vecd[i]
   
   #trim output in the case where convergence occurs before 'it' iterations    
   curve = { 'save_xd': save_xd[0:count_points],
             'eigen_vecd': eigen_vecd[0:count_points],
             'cos_neu_neu': cos_neu_neu[0:count_points],
             'rho': rho[0:count_points],
             'high_rho_points': high_rho_points,
             'lamb': lamb[0:count_points],
             'c0': c0[0:count_points]
           }
   return curve  
Exemplo n.º 22
0
 def get_ave(self):
     return average(self.ylist)
Exemplo n.º 23
0
 def avg_degree(self):
     return average(self.degree().values())
Exemplo n.º 24
0
field1 = np.array(field1)
field2 = np.array(field2)
field3 = np.array(field3)

plt.ylabel("time (ms)")
plt.xlabel("")
plt.title(info)

plt.plot(field1, color="orange")
plt.plot(field2, color="blue")
plt.plot(field3, color="green")

maximum1 = np.max(field1)
minimum1 = np.min(field1)
maximum2 = np.max(field2)
minimum2 = np.min(field2)
maximum3 = np.max(field3)
minimum3 = np.min(field3)

legend1 = 'WiFi (ms): min={} max={} avr={}'.format(minimum1, maximum1,
                                                   int(average(field1)))
legend2 = 'SIM  (ms): min={} max={} avr={}'.format(minimum2, maximum2,
                                                   int(average(field2)))
legend3 = 'Sum  (ms): min={} max={} avr={}'.format(minimum3, maximum3,
                                                   int(average(field3)))

plt.legend([legend1, legend2, legend3])
plt.grid(color="grey", linewidth=1, axis="x", alpha=0.1)
plt.show()
Exemplo n.º 25
0
    output.append([link, title[0].text, name, score])

# Calculate and plot the frequency of player mentions in the articles
namecount = sorted(namecount.items(), key=lambda x: x[1], reverse=True)
for i in range(5):
    top5_name.append(namecount[i][0])
    top5_freq.append(namecount[i][1])
plt.bar(np.arange(0, 10, step = 2),top5_freq,width = 0.8)
plt.xticks(np.arange(0, 10, step = 2),top5_name, rotation=0)
plt.xlabel("Players")
plt.ylabel("Frequency")
plt.title("Frequency of articles writen on tennis players")
plt.savefig("task4.png")

# Calculate and plot the average game diff and win pct by player
avg_diff=[average(game_difference[player]) for player in game_difference]
player_names=[player for player in game_difference]
fig=plt.figure()
with open('tennis.json') as f:
    Data = json.load(f)
    for name in player_names:
        for entry in Data:
            if entry["name"]==name.upper():
                win_percentage.append(float(entry["wonPct"][:-1]))
                break
plt.scatter(avg_diff, win_percentage)
plt.xlabel('Average Game Difference')
plt.ylabel('Win Percenctage')
plt.title('Scatter of Average Game Gifference by Win Percentage')
texts = []
for i in range(0,len(avg_diff)):
Exemplo n.º 26
0
#new range:
new_range_ig_naive_bayse = [0.7169642857142857, 0.6879464285714286, 0.7044642857142858, 0.6910714285714286, 0.6941964285714286, 0.6955357142857143, 0.6959821428571429, 0.6915178571428572, 0.6901785714285714, 0.6745535714285714, 0.6821428571428572, 0.6790178571428571, 0.6683035714285714, 0.6669642857142857, 0.6486607142857143, 0.646875, 0.6464285714285715, 0.6455357142857143, 0.6450892857142857, 0.6495535714285714, 0.6508928571428572, 0.6508928571428572, 0.6513392857142857, 0.6504464285714285, 0.646875, 0.646875, 0.6477678571428571, 0.6464285714285715, 0.6464285714285715, ]
new_range_ig_linear_svm = [0.7098214285714286, 0.6861607142857142, 0.6991071428571428, 0.6959821428571429, 0.6825892857142857, 0.6915178571428572, 0.6785714285714286, 0.6959821428571429, 0.6799107142857143, 0.6745535714285714, 0.6763392857142857, 0.6785714285714286, 0.6772321428571428, 0.6638392857142857, 0.6651785714285714, 0.6638392857142857, 0.6642857142857143, 0.6575892857142858, 0.6580357142857143, 0.6598214285714286, 0.6540178571428571, 0.6544642857142857, 0.6571428571428571, 0.6665178571428572, 0.6647321428571429, 0.6616071428571428, 0.6638392857142857, 0.6651785714285714, 0.6584821428571429, ]
new_range_ig_hyperbolic_svm = [0.6964285714285714, 0.6870535714285714, 0.6977678571428572, 0.6941964285714286, 0.6866071428571429, 0.6986607142857143, 0.6848214285714286, 0.6834821428571428, 0.6901785714285714, 0.671875, 0.6700892857142857, 0.6790178571428571, 0.6566964285714286, 0.6611607142857143, 0.6709821428571429, 0.6598214285714286, 0.6678571428571428, 0.6696428571428571, 0.6709821428571429, 0.6602678571428572, 0.6629464285714286, 0.6669642857142857, 0.6714285714285714, 0.6665178571428572, 0.6700892857142857, 0.6584821428571429, 0.6633928571428571, 0.6709821428571429, 0.665625, ]

stochastic_naive_bayse = [0.5205357142857143, 0.565625, 0.6017857142857143, 0.6200892857142857, 0.6352678571428572, 0.6142857142857143, 0.6348214285714285, 0.6433035714285714, 0.6303571428571428, 0.6334821428571429, 0.640625, 0.6410714285714286, 0.6508928571428572, 0.6495535714285714, 0.6486607142857143, 0.659375, 0.6575892857142858, 0.6607142857142857, 0.6714285714285714, 0.6741071428571429, 0.6683035714285714, 0.6776785714285715, 0.6651785714285714, 0.6830357142857143, 0.6852678571428571, 0.6700892857142857, 0.6732142857142858, 0.6803571428571429, 0.6741071428571429, ]
stochastic_linear_svm = [0.5294642857142857, 0.5428571428571428, 0.6013392857142857, 0.6334821428571429, 0.6272321428571429, 0.603125, 0.6129464285714286, 0.640625, 0.6419642857142858, 0.6508928571428572, 0.6544642857142857, 0.6580357142857143, 0.6633928571428571, 0.66875, 0.6714285714285714, 0.6464285714285715, 0.6723214285714286, 0.6785714285714286, 0.6772321428571428, 0.6803571428571429, 0.6803571428571429, 0.6879464285714286, 0.6830357142857143, 0.6839285714285714, 0.7008928571428571, 0.6982142857142857, 0.69375, 0.6745535714285714, 0.6910714285714286, ]
stochastic_hyperbolic_svm = [0.5482142857142858, 0.5732142857142857, 0.6022321428571429, 0.628125, 0.6196428571428572, 0.6366071428571428, 0.6142857142857143, 0.634375, 0.65, 0.6540178571428571, 0.6522321428571428, 0.6763392857142857, 0.6736607142857143, 0.66875, 0.6736607142857143, 0.6660714285714285, 0.6736607142857143, 0.6696428571428571, 0.6607142857142857, 0.6741071428571429, 0.7008928571428571, 0.6857142857142857, 0.6799107142857143, 0.6852678571428571, 0.6830357142857143, 0.6794642857142857, 0.690625, 0.6799107142857143, 0.6897321428571429, ]

pca_hyperbolic_svm = [0.6571428571428571, 0.6352678571428572, 0.6160714285714286, 0.5513392857142857, 0.6169642857142857, 0.6223214285714286, 0.5508928571428572, 0.6352678571428572, 0.6321428571428571, 0.634375, 0.6450892857142857, 0.6267857142857143, 0.6410714285714286, 0.646875, 0.6361607142857143, 0.6459821428571428, 0.6348214285714285, 0.6375, 0.6433035714285714, 0.6424107142857143, 0.6388392857142857, 0.6303571428571428, 0.6419642857142858, 0.6334821428571429, 0.6285714285714286, 0.6375, 0.6330357142857143, 0.6379464285714286, 0.6375, ]
pca_linear_svm = [0.6316964285714286, 0.6196428571428572, 0.5370535714285715, 0.35401785714285716, 0.603125, 0.6017857142857143, 0.60625, 0.6160714285714286, 0.6013392857142857, 0.6053571428571428, 0.621875, 0.6330357142857143, 0.6410714285714286, 0.63125, 0.6241071428571429, 0.6366071428571428, 0.6339285714285714, 0.6370535714285714, 0.6223214285714286, 0.6339285714285714, 0.6348214285714285, 0.6392857142857142, 0.6263392857142858, 0.6223214285714286, 0.6415178571428571, 0.6339285714285714, 0.6357142857142857, 0.6392857142857142, 0.6223214285714286, ]
pca_naive_bayse = [0.3665178571428571, 0.3879464285714286, 0.590625, 0.5785714285714286, 0.378125, 0.5794642857142858, 0.6040178571428572, 0.35625, 0.5785714285714286, 0.5785714285714286, 0.5803571428571429, 0.6236607142857142, 0.5790178571428571, 0.5763392857142857, 0.5816964285714286, 0.5763392857142857, 0.5741071428571428, 0.625, 0.5665178571428572, 0.5696428571428571, 0.5919642857142857, 0.63125, 0.5852678571428571, 0.5816964285714286, 0.5803571428571429, 0.5910714285714286, 0.5848214285714286, 0.5803571428571429, 0.5803571428571429, ]

list1 = new_range_ig_naive_bayse +  new_range_ig_linear_svm + new_range_ig_hyperbolic_svm
list2 = stochastic_naive_bayse + stochastic_linear_svm + stochastic_hyperbolic_svm
list3 = pca_hyperbolic_svm + pca_linear_svm + pca_naive_bayse

print 'ig VS stochastic'
print wilcoxon(list1, list2)
print 'avg ig=', average(list1), ' avg stochastic=', average(list2)

print 'ig VS PCA'
print wilcoxon(list1, list3)
print 'avg ig=', average(list1), ' avg PCA=', average(list3)

print 'stochastic VS PCA'
print wilcoxon(list2, list3)
print 'avg stochastic=', average(list2), ' avg PCA=', average(list3)

Exemplo n.º 27
0
            else:
                grid_z[j][i] = 0
                grid_c[i + j * 16] = fill_value

    return grid_z, grid_c, grid_s


print(end='.', flush=True)
log = parse_file(open(sys.argv[1], encoding='windows-1252'))
print(end='.', flush=True)
points = get_points(log, logfct)
print(end='.', flush=True)
grid_z = fill_grid(np.digitize(points[0], rpm_bins), np.digitize(
    points[1], map_bins), points)
print(end='.', flush=True)
z, c, s = interpolate_grid(grid_z, average(points[2]))
print('!')

flip_z = np.flip(np.array(grid_z), 0)

if print_array:
    print(flip_z)

if plot_array:
    grid_x, grid_y = np.meshgrid(rpm_bins, map_bins)
    plt.scatter(grid_x, grid_y, c=c, s=s, alpha=0.25, cmap='viridis')
    plt.grid()
    plt.show()

output_file = open("output.csv", "w+")
for j in range(16):
Exemplo n.º 28
0
    def train(self,
              train_episodes=2000000,
              episodes_log_freq=10000,
              alpha=0.1,
              discount_factor=0.999):
        #perfBeforeTraining = self.runMultipleEpisodes(10000)
        epsilon = 1  #0.7#1   (large epsilon => random)
        max_epsilon = 1
        min_epsilon = 0.01  #0.001
        decay = 1 / train_episodes  #0.01

        #Training the agent
        #Creating lists to keep track of reward and epsilon values
        training_rewards = []
        epsilons = []
        average_rewards = []

        for episode in range(train_episodes):
            #Reseting the environment each time as per requirement
            state = self.env.reset()
            #Starting the tracker for the rewards (total for the episode = life time)
            total_episode_award = 0
            done = False
            while not done:
                #Choosing an action given the states based on a random number
                exp_exp_tradeoff = random.uniform(0, 1)

                ### STEP 2: SECOND option for choosing the initial action - exploit
                #If the random number is larger than epsilon: employing exploitation
                #and selecting best action
                if exp_exp_tradeoff > epsilon:
                    action = self.chooseAction(state)

                ### STEP 2: FIRST option for choosing the initial action - explore
                #Otherwise, employing exploration: choosing a random action
                else:
                    action = self.env.action_space.sample()
                    #assert len(action)==3

                ### STEPs 3 & 4: performing the action and getting the reward
                #Taking the action and getting the reward and outcome state
                new_state, reward, done, info = self.env.step(action)

                ### STEP 5: update the Q-table
                #Updating the Q-table using the Bellman equation
                #Q[state, action] = Q[state, action] + alpha * (reward + discount_factor * np.max(Q[new_state, :]) - Q[state, action])
                index = tuple(np.concatenate([state, action]))
                self.Q[index] = self.Q[index] + alpha * (
                    reward + discount_factor * np.max(self.Q[tuple(new_state)])
                    - self.Q[index])
                #Increasing our total reward and updating the state
                total_episode_award += reward
                state = new_state

            #Cutting down on exploration by reducing the epsilon
            #epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay*episode)
            epsilon = epsilon - decay

            #Adding the total reward and reduced epsilon values
            training_rewards.append(total_episode_award)
            epsilons.append(epsilon)

            if (episode + 1) % episodes_log_freq == 0:
                latest = slice(-episodes_log_freq, None)
                avg = average(training_rewards[latest])
                print("Mean training score: " + str(avg))
                average_rewards.append(avg)
                #print ("Mean score from start: " + str(sum(training_rewards)/(episode+1)))
                #print ("Training score over time: " + str(sum(training_rewards)/train_episodes))

        #Visualizing results and total reward over all episodes
        #x = range(train_episodes)
        plt.plot(average_rewards)
        plt.xlabel('Episode')
        plt.ylabel('Training total reward')
        plt.title('Total rewards over all episodes in training')
        plt.show()

        #Visualizing the epsilons over all episodes
        plt.plot(epsilons)
        plt.xlabel('Episode')
        plt.ylabel('Epsilon')
        plt.title("Epsilon for episode")
        plt.show()
    ylabel('number of results')
    title(_title)
    show()

from matplotlib.pyplot import plot,show, legend
def createXYSpreadGraph(list1,list2,name1,name2,_title='average % correcteness for required data set'):
    l1=plot(range(1,len(list1)*2+1,2),list1,'blue',label=name1)
    l2=plot(range(1,len(list2)*2+1,2),list2,'red',label=name2)
    xlabel('k')
    ylabel('%correct (average)')
    legend()
    title(_title)
    show()

ignoreAverages = []
ignoreAverages.append(average(ignoreAttributes1NN))
ignoreAverages.append(average(ignoreAttributes3NN))
ignoreAverages.append(average(ignoreAttributes5NN))
ignoreAverages.append(average(ignoreAttributes7NN))
ignoreAverages.append(average(ignoreAttributes9NN))
ignoreAverages.append(average(ignoreAttributes11NN))
ignoreAverages.append(average(ignoreAttributes13NN))
ignoreAverages.append(average(ignoreAttributes15NN))
ignoreAverages.append(average(ignoreAttributes17NN))
ignoreAverages.append(average(ignoreAttributes19NN))
ignoreAverages.append(average(ignoreAttributes21NN))
ignoreAverages.append(average(ignoreAttributes23NN))
ignoreAverages.append(average(ignoreAttributes25NN))
useAllAverages = []
useAllAverages.append(average(useAllAttributes1NN))
useAllAverages.append(average(useAllAttributes3NN))
Exemplo n.º 30
0
print('-------txt读写-----------')
e=eye(2)
print(e)
savetxt("eye.txt",e)

print('-------csv-----------')
t,y=loadtxt('haha1.csv', delimiter=',', usecols=(6,7), unpack=True)
print(t)
print(y)
c=loadtxt('haha1.csv', delimiter=',', usecols=(6,7), unpack=False)


print('-------均值 最大小值 排序-----------')
print(c)
print(average(t))
print(mean(t))
print(max(t))
print(min(t))

c=arange(5)
c[2]=77
c[3]=88
print(c)
print(median(c))
print(msort(c))
print('-------方差-----------')
print(var(c))
print('c:',c)
print('len:',len(c))
print('c.size:',c.size)
Exemplo n.º 31
0
            TN_in_TP += 1

    if (FP_in_TP != 0) & (FN_in_TP == 0) & (TN_in_TP == 0):
        the_errors = [err for err in total_error_list[l] if err >= 0]

        error_save_data.append(the_errors)

        if len(the_errors) == 0:
            continue
        else:
            column_error_min = min(the_errors)
            column_error_max = max(the_errors)

        Total_max_list.append(column_error_max)

        column_error_average = average(the_errors)
        Total_average_list.append(column_error_average)

        print(
            "NO.%d seed corresponding MT:     min_err %f     max_err %f     avg_err %f     FP %d"
            % (non_nan_columns_index_cnn_MT_label_seed[l], column_error_min,
               column_error_max, column_error_average, FP_in_TP))

    elif (FP_in_TP != 0) & (FN_in_TP != 0) & (TN_in_TP == 0):
        the_errors = [err for err in total_error_list[l] if err >= 0]

        error_save_data.append(the_errors)

        if len(the_errors) == 0:
            continue
        else:
Exemplo n.º 32
0
 def analytic_score_sentences(self, sentence_tuples):
     return {'ref-lev': average([levenshtein(h, r) for h, r in sentence_tuples])} 
Exemplo n.º 33
0
 def avg_degree(self):
     return average(self.degree().values())
Exemplo n.º 34
0
def compute_segment_center(p1, p2):
    return [average((p1[dimension], p2[dimension])) for dimension in range(len(p1))]
Exemplo n.º 35
0
        if lmatched.__contains__(name): 
            print 
        else: 
            lmatched.append(name)
    else: 
        print(key + "was not matched my the face API")         
        
        
        
        
        
        

        
print("the very famous category matched " + str(vcorrect) + " out of " + str(len(Very_famous)) + 
      " Pictures with an average confidence rating of " + str(average(vscores)))  

print("a total of " + str(len(vmatched)) + " People were matched out of " + str(len(Very_famous)/3))  


print("the medium famous category matched " + str(mcorrect) + " out of " + str(len(Medium_famous)) + 
      " Pictures with an average confidence rating of " + str(average(mscores)))  

print("a total of " + str(len(mmatched)) + " People were matched out of " + str(len(Medium_famous)/3))  


print("the low famous category matched " + str(lcorrect) + " out of " + str(len(Low_famous)) + 
      " Pictures with an average confidence rating of " + str(average(lscores)))   

print("a total of " + str(len(lmatched)) + " People were matched out of " + str(len(Low_famous)/3))
            
Exemplo n.º 36
0
def simulateAmount(amount, times=1):
    evaluations = [evaluatePrintedAmount(generateGaussianSample(mu, sigma), amount) for _ in range(0, times)]

    return average(evaluations).round().astype(int)
Exemplo n.º 37
0
from scipy.stats.morestats import wilcoxon
from numpy.lib.function_base import average, median

#tree = [96.19047619047619, 96.28571428571429, 95.61904761904762, 96.0, 96.57142857142857, 96.57142857142857, 95.14285714285714, 96.0, 96.19047619047619, 95.9047619047619, 96.28571428571429, 95.61904761904762, 97.33333333333333, 94.85714285714286, 95.14285714285714, 94.28571428571429, 94.19047619047619, 96.38095238095238, 95.04761904761905, 94.85714285714286, 96.19047619047619, 96.38095238095238, 96.38095238095238, 96.47619047619048, 96.19047619047619, 94.57142857142857, 96.38095238095238, 96.47619047619048, 96.47619047619048, 94.95238095238095, 96.19047619047619, 95.14285714285714, 95.71428571428571, 94.85714285714286, 95.9047619047619, 96.66666666666667, 94.95238095238095, 94.57142857142857, 94.19047619047619, 94.19047619047619, 95.9047619047619, 95.9047619047619, 94.66666666666667, 96.0952380952381, 96.0, 95.9047619047619, 97.14285714285714, 96.19047619047619, 96.28571428571429, 96.19047619047619]
#C457NN = [96.0, 94.95238095238095, 94.28571428571429, 95.61904761904762, 95.23809523809524, 96.38095238095238, 95.23809523809524, 96.0, 95.04761904761905, 96.0952380952381, 96.19047619047619, 94.57142857142857, 96.47619047619048, 96.38095238095238, 95.42857142857143, 94.85714285714286, 94.57142857142857, 96.19047619047619, 94.76190476190476, 94.47619047619048, 94.95238095238095, 95.61904761904762, 96.19047619047619, 96.0, 95.04761904761905, 95.33333333333333, 96.28571428571429, 95.52380952380952, 96.47619047619048, 95.61904761904762, 95.80952380952381, 95.14285714285714, 96.0952380952381, 95.04761904761905, 95.33333333333333, 96.85714285714286, 95.23809523809524, 96.0952380952381, 93.52380952380952, 95.52380952380952, 95.71428571428571, 96.0, 94.57142857142857, 96.66666666666667, 96.0, 95.61904761904762, 96.38095238095238, 95.61904761904762, 96.0, 95.61904761904762]


tree = [96.18604651162791, 96.18604651162791, 96.37209302325581, 96.46511627906976, 96.74418604651163, 96.09302325581395, 95.90697674418605, 96.18604651162791, 96.27906976744185, 96.27906976744185, 96.09302325581395, 96.18604651162791, 96.65116279069767, 96.18604651162791, 95.81395348837209, 96.0, 96.55813953488372, 96.18604651162791, 95.72093023255815, 95.72093023255815, 96.0, 95.90697674418605, 96.09302325581395, 96.0, 96.46511627906976, 96.18604651162791, 96.09302325581395, 95.90697674418605, 95.81395348837209, 96.55813953488372, 96.0, 96.46511627906976, 96.0, 96.09302325581395, 96.18604651162791, 95.72093023255815, 96.27906976744185, 95.62790697674419, 94.79069767441861, 95.81395348837209, 96.09302325581395, 96.18604651162791, 96.37209302325581, 96.37209302325581, 96.27906976744185, 96.09302325581395, 96.46511627906976, 96.74418604651163, 96.0, 96.18604651162791]

C457NN = [94.13953488372093, 95.53488372093024, 96.46511627906976, 95.44186046511628, 95.72093023255815, 95.53488372093024, 95.06976744186046, 96.0, 94.88372093023256, 95.81395348837209, 94.97674418604652, 94.69767441860465, 96.37209302325581, 95.25581395348837, 94.79069767441861, 95.53488372093024, 96.46511627906976, 96.0, 95.90697674418605, 95.62790697674419, 95.81395348837209, 94.32558139534883, 95.16279069767442, 94.4186046511628, 94.97674418604652, 96.0, 95.81395348837209, 95.34883720930233, 95.72093023255815, 95.90697674418605, 95.53488372093024, 95.72093023255815, 95.25581395348837, 95.62790697674419, 96.55813953488372, 96.37209302325581, 96.09302325581395, 94.51162790697674, 95.16279069767442, 94.79069767441861, 95.25581395348837, 94.69767441860465, 96.46511627906976, 95.44186046511628, 95.81395348837209, 96.55813953488372, 95.25581395348837, 96.46511627906976, 94.97674418604652, 94.97674418604652]


print 'average tree = ',average(tree)
print 'average C4.5(7NN) = ', average(C457NN)
print 'median tree = ',median(tree)
print 'median C4.5(7NN) = ', median(C457NN)


print 'wilcoxon test for J48 or C4.5(7NN):', wilcoxon(tree, C457NN)


Exemplo n.º 38
0
people.pop(31)
people.pop(30)
people.pop(0)

person = (random.choice(people))

questions = []
unique_questions = []
occurence_count = []

for i in range(0, 100):
    question = (random.choice(people))
    if questions.__contains__(question):
        occurence_count.append(question)
    else:
        unique_questions.append(question)
    questions.append(question)

print(len(questions))
print(len(unique_questions))

ocount = []

for uquestion in unique_questions:
    count = questions.count(uquestion)
    print('This question ' + uquestion + ' appeared  ' + str(count) + ' times')
    ocount.append(count)
print("There was a total of " + str(len(occurence_count)) +
      " Same occurences of a question")
print(str(average(ocount)))
Exemplo n.º 39
0
def draw_pose(dwg, pose, src_size, inference_box, depthFrame,intrinsics,color='yellow', threshold=0.2):
    global timestamps
    global checkfpsNow
    global fps
    box_x, box_y, box_w, box_h = inference_box
    scale_x, scale_y = src_size[0] / box_w, src_size[1] / box_h
    xys = {}
    cv2.namedWindow('motion', cv2.WINDOW_AUTOSIZE)

#    print(pose)
#    print(dwg.shape)
#    print(len(depthFrame[0]))
    realXYZ={}
    oldPoints={}
    for label, keypoint in pose.keypoints.items():
        if keypoint.score < threshold : continue
        # Offset and scale to source coordinate space.
        #print(keypoint.yx[0],keypoint.yx[1])
        
        kp_y = int((keypoint.yx[0] - box_y) * scale_y)
        kp_x = int((keypoint.yx[1] - box_x) * scale_x)
        #print(kp_x,kp_y)
        scale=10
        kp_x=int(kp_x-(kp_x%scale))
        kp_y=int(kp_y-(kp_y%scale))
        xys[label] = (kp_x, kp_y)
        dwg=cv2.circle(dwg,(int(kp_x), int(kp_y)), 3,(0,0,255),5)
        realX,realY,realZ=rs.rs2_deproject_pixel_to_point(intrinsics,[kp_y,kp_x],depthFrame[int(kp_y)-1][int(kp_x)])
        realXYZ[label]=[realX,realY,realZ]

        #dwg=cv2.putText(dwg,"({0},{1},{2})".format(round(realY),round(realX),round(realZ)),(int(kp_x),int(kp_y) ),(cv2.FONT_HERSHEY_COMPLEX),0.6,(0,255,0),1,cv2.LINE_AA)
#        dwg=cv2.putText(dwg,"({0},{1},{2})".format((kp_x),(kp_y),str(depthFrame[int(kp_y)-1][int(kp_x)])),(int(kp_x),int(kp_y) ),(cv2.FONT_HERSHEY_COMPLEX),0.6,(0,255,0),1,cv2.LINE_AA)
    for a, b in EDGES:
        if a not in xys or b not in xys: continue
        ax, ay = xys[a]
        bx, by = xys[b]
        dwg=cv2.line(dwg,(ax, ay),(bx, by), (0,255,0),2)

    for A,B,C,D in shoulderAngles:
        if  B not in xys or C not in xys or D not in xys: continue
        if("right hip"  in xys): A="right hip"
        elif("left hip"  in xys): A="left hip"
        else: continue
#        print(list(xys[A])[1],xys[B],xys[C])
#       print((depthFrame[list(xys[A])[1]-1] [list(xys[A])[0]]))
#        a,b,c,d=list(map(lambda p: ((np.asarray(xys[p]+tuple([depthFrame[list(xys[p])[1]-1][list(xys[p])[0]]])))),[A,B,C,D]))
        a,b,c,d=list(map(lambda p: np.asarray(realXYZ[p]),[A,B,C,D]))
#        print(a,b,c)
        ANGLES=projectedAngle(c-d,a,b,d)
        dwg=cv2.putText(dwg,str(ANGLES),(xys[D]),(cv2.FONT_HERSHEY_COMPLEX),0.6,(0,255,0),1,cv2.LINE_AA)
    
    for A,B,C in elbowAngles:
        if A not in xys or B not in xys or C not in xys: continue
#        print(list(xys[A])[1],xys[B],xys[C])
#        print((depthFrame[list(xys[A])[1]-1] [list(xys[A])[0]]))
#        realX,realY,realZ=rs.rs2_deproject_pixel_to_point(intrinsics,[kp_y,kp_x],depthFrame[int(kp_y)-1][int(kp_x)])
#        a,b,c=list(map(lambda p: ((np.asarray(xys[p]+tuple([depthFrame[list(xys[p])[1]-1][list(xys[p])[0]]])))),[A,B,C]))
        a,b,c=list(map(lambda p: np.asarray(realXYZ[p]),[A,B,C]))

#        a,b,c=list(map(lambda p: ((np.asarray(xys[p]+tuple([depthFrame[list(xys[p])[1]-1][list(xys[p])[0]]])))),[A,B,C]))
#        print(a,b,c)
        ANGLE=str(angle(b-a,c-b,True))
        if(ANGLE!=-1):
            dwg=cv2.putText(dwg,ANGLE,(xys[B]),(cv2.FONT_HERSHEY_COMPLEX),1,(0,255,0),2,cv2.LINE_AA)
    
    if(len(timestamps)>30 and checkfpsNow>5):
        checkfpsNow=0
        fps=1/average(timestamps)
    checkfpsNow+=1
    dwg=cv2.putText(dwg,str(round(fps))+' fps',(10,20),(cv2.FONT_HERSHEY_COMPLEX),1,(255,0,0),2,cv2.LINE_AA)

    cv2.imshow('motion', dwg)