예제 #1
0
def main():
    image_raw = cv2.imread('../cheetah-640x480.jpg')
    Q1.convert_gray_bin(image_raw)
    Q2.dither_matrix()
    Q2.floyd_steinberg_dither('../cheetah-640x480.jpg')
    Q2.floyd_steinberg_without_error_diffusion('../cheetah-640x480.jpg')
    Q2.ordered_dithering('../cheetah-640x480.jpg')
    Q3.draw_histograms(image_raw, '../cheetah-640x480.jpg')
예제 #2
0
파일: Q2.py 프로젝트: sendurr/CSCE-206
def printstarx(n,y):
	answer=(Q1.printstar(n)*y)
	return answer
	if i<=1:
		print Q1.printstar(n)
	else:
		print (Q1.printstar(n)/
		Q1.printstar(n)*y)
	i=+1
예제 #3
0
 def testKnownCases(self):
     """Tests some known cases."""
     # Test the given example
     self.assertEquals(
         3, Q1.counting_islands(_map_grid("FTFT|TTFF|FFTF|FFTF")))
     # Test other map example
     self.assertEquals(
         2, Q1.counting_islands(_map_grid("FTFFF|FTTTT|TTFFF|FFTTF")))
     # Test other map example - ring island
     self.assertEquals(
         2, Q1.counting_islands(_map_grid("TTTTT|TFFFT|TFTFT|TFFFT|TTTTT")))
예제 #4
0
 def testKnownCases(self):
     """Tests some known cases."""
     # Test the given example
     given_dictionary = ["ART", "RAT", "CAT", "CAR"]
     self.assertTrue(
         Q1.find_ordered_alphabet(given_dictionary) in
         [['T', 'A', 'R', 'C'], ['A', 'T', 'R', 'C']])
     test_dictionary1 = ["baa", "abcd", "abca", "cab", "cad"]
     self.assertTrue(
         Q1.find_ordered_alphabet(test_dictionary1) in
         [['b', 'd', 'a', 'c'], ['d', 'b', 'a', 'c']])
     test_dictionary2 = ["ab", "bd", "c", "d"]
     self.assertEquals(Q1.find_ordered_alphabet(test_dictionary2),
                       ['a', 'b', 'c', 'd'])
예제 #5
0
def record_experment_data(fs, fnus):

    # print(fnus[0].Neq)
    m2 = 800000
    threshold = 400000

    temperature, Humidity = dataset.import_data(fs)

    number = []
    decomposition = []
    extension_rate = []
    litter = []

    record_x = []
    record_y = []
    # training
    for i in range(5000):
        for j in range(50):
            fnus[j].T_real = temperature[i]
            fnus[j].W_real = Humidity[i]

        total_number, total_decomposition_rate, m2, d_number, flag, threshold = Q1.update_real_number(
            fnus, m2, threshold)

        number.append(total_number)
        decomposition.append(total_decomposition_rate)
        extension_rate.append(d_number)
        litter.append(m2)
        if flag == 1:
            record_x.append(i)
            record_y.append(threshold * 2)
        # plt.plot()
        # plt.show()
    return extension_rate, number, fnus, decomposition, litter, record_x, record_y, temperature, Humidity
예제 #6
0
    def simulate(self, num_simulated_cohorts, cohort_size, time_steps):
        """
        :param num_simulated_cohorts: number of cohorts to simulate
        :param cohort_size: population size of cohorts
        :param time_steps: simulation length
        :param cohort_ids: ids of cohort to simulate
        :return:
        """

        # resample cohort IDs and mortality probabilities based on weights
        sampled_row_indices = np.random.choice(a=range(0, len(self._weights)),
                                               size=num_simulated_cohorts,
                                               replace=True,
                                               p=self._weights)

        # use the sampled indices to populate the list of cohort IDs and mortality probabilities
        resampled_IDs = []
        resampled_mortalityprobs = []
        for i in sampled_row_indices:
            resampled_IDs.append(self._cohortIDs[i])
            resampled_mortalityprobs.append(self._mortalityProbs[i])

        # simulate the desired number of cohorts
        self._multiCohorts = SurvivalCls.MultiCohort(
            ids=resampled_IDs,
            pop_sizes=[cohort_size] * num_simulated_cohorts,
            mortality_probs=resampled_mortalityprobs)

        # simulate all the cohorts
        self._multiCohorts.simulate(time_steps)
예제 #7
0
def startSearching(fileName, mode):
    baseImage = Image.open(fileName)
    fileList = os.listdir(fileName[:-16])
    fileName = fileName[-16:]
    if mode == "Q1-ColorHistogram":
        rank = Q1.Q1_run(baseImage, fileList)

    elif mode == "Q2-ColorLayout":
        if os.path.exists("./offline/Q2_DCTData.csv"):
            rank = Q2.Q2_offline_run(fileName, fileList)
        else:
            rank = Q2.Q2_run(baseImage, fileList)

    elif mode == "Q3-SIFT Visual Words":
        if os.path.exists("./offline/Q3.csv"):
            rank = Q3.Q3_offline_run(fileName, fileList)
    elif mode == "Q4-Visual Words using stop words":
        if os.path.exists("./offline/Q3.csv"):
            rank = Q4.Q4_offline_run(fileName, fileList)

    for i in xrange(10):
        imgName = "./dataset/" + rank[i][1]
        image = Image.open(imgName)
        image = ImageTk.PhotoImage(
            image.resize((int(image.size[0] * 0.8), int(image.size[1] * 0.8)),
                         Image.ANTIALIAS))
        app.imgs[i].configure(image=image)
        app.imgs[i].image = image
        print "Rank " + str(
            i + 1) + " is number " + rank[i][1][7:-4] + ", distance is " + str(
                round(rank[i][0], 3))
def find_knn(test_x, train_x, train_y, k):
    """

    :param test_x: a sample to test
    :param train_x: the data for training
    :param train_y: the training target
    :param k: the number of nearest neighbours
    :return: the prediction target
    """

    # compute the distances between train and test data points
    distances = Q1.distanceFunc(test_x, train_x)
    neg_distance = -distances
    # take top k element
    _, indices = tf.nn.top_k(neg_distance, k=k)

    # build a N2 dim vector, with targets for the test data points
    shape = test_x.shape[0]
    prediction_y = tf.zeros([shape], tf.int32)

    # find the nearest neighbor of each point
    for i in range(shape):
        k_neighbors = tf.gather(train_y, indices[i, :])

        # find the most possible neighbor
        values, _, counts = tf.unique_with_counts(
            tf.reshape(k_neighbors, shape=[-1]))
        _, max_count_idx = tf.nn.top_k(counts, k=1)
        prediction = tf.gather(values, max_count_idx)

        # add the dense to the prediction set
        sparse_test_target = tf.SparseTensor([[i]], prediction, [shape])
        prediction_y = tf.add(prediction_y,
                              tf.sparse_tensor_to_dense(sparse_test_target))
    return prediction_y
예제 #9
0
파일: Q1_test.py 프로젝트: ustya-k/codeu
 def setUp(self):
     self.binary_tree = Q1.BinaryTree(16)
     self.binary_tree.left = Q1.BinaryTree(9)
     self.binary_tree.left.left = Q1.BinaryTree(3)
     self.binary_tree.left.left.left = Q1.BinaryTree(1)
     self.binary_tree.left.left.right = Q1.BinaryTree(5)
     self.binary_tree.left.right = Q1.BinaryTree(14)
     self.binary_tree.right = Q1.BinaryTree(18)
     self.binary_tree.right.right = Q1.BinaryTree(19)
예제 #10
0
    def testKnownCases(self):
        """Tests some known cases."""
        # Test the given example
        self.assertEqual({"CAR", "CARD", "CAT"},
                         Q1.word_search(_grid("AAR|TCD"), _dictionary(["CAR", "CARD", "CART", "CAT"])))

        # Check if the insensitivity works and if two words starting from the same letter are find with different word
        self.assertEqual({"LODY", "LDYO", "OYDL"},
                         Q1.word_search(_grid("lo|yd"), _dictionary(["LODY", "ldyo", "oydl"])))

        # Check if the word formed from all letters in a grid is found
        self.assertEqual({"VERYLONGWORD", "WOOL", "VOW"},
                         Q1.word_search(_grid("very|rool|dwgn"), _dictionary(["verylongword", "wool", "vow", "wrong"])))

        # Test for duplicates with different combinations of upper-case lower-case characters.
        self.assertEqual({"CAR", "CARD", "CAT"},
                         Q1.word_search(_grid("AaR|TcD"), _dictionary(["CAR", "CARD", "CART", "CAT", "caT"])))
예제 #11
0
def _dictionary(words):
    """Create a dictionary with the given words.

        Args:
            words: a list of strings, the words to be added to the dictionary.

        Returns:
            an instance of Q1.Dictionary containing the given words.
    """
    return Q1.Dictionary(words)
예제 #12
0
def clustering_spectral(A, k, option):
    """
    Fait le clustering spectral sur le graph definit par la matrice d'adjacence A
    NB : A est symetrique (graph non orienté)
    
    Input :     np.array([[]])  A : matrice d'adjacence
                int             k : nombre de classes
                int        option : 1 ou 2 
    """
    if option == 1:
        
        eigval, eigvec = np.linalg.eigh(A)
        
        n = len(A)
        U = np.zeros((k,n))
        
        eigvec = np.transpose(eigvec)
        
        for i in range(k):
            U[i] = eigvec[-i-1]
        
        U = np.transpose(U)
        kmeans = skc.KMeans(n_clusters=k).fit(U)        
        C = kmeans.labels_
        graph = q1.graph_from_A(A)
    
        print A
        print C
        colors = []
        for i in range(0, max(C)+1):
            colors.append('%06X' % np.random.randint(0, 0xFFFFFF))
        for vertex in graph.vs():
            vertex["color"] = str('#') + colors[C[vertex.index]]          
        q1.graph_plot(graph,"Clustering_spectral option "+str(option))   

    if option == 2:
        print '2'
    
    return C
예제 #13
0
def quantityMesure(nom):
    temps_debut = time.time()
    p=Q1.degre(nom)
    somme=0
    f = open(nom, "r")
    for n in f:
        buff=n.split()
        if(buff!=[]):
             s=len(buff)
             for i in range (0,s-1):
                 if(buff[i] != buff[i+1]):
                     somme+=p[buff[i]]*p[buff[i+1]]
    return somme, time.time() - temps_debut
예제 #14
0
def cg(bc, mask, maxIter=10000, V0=None, thresh=1e-2):
    """
    this runs a conjugate gradient solver to solve Ax=b where A
    is the Laplacian operator interpreted as a matrix, and b is the contribution from the 
    boundary conditions.  Incidentally, one could add charge into the region by adding it
    to b (the right-hand side or rhs variable)
    """
    if V0 is None:
        V0 = bc.copy()
    st = time.time()
    r = Q1.get_rhs(bc, mask) - Q1.get_laplacian(V0, mask)
    p = r.copy()
    V = V0.copy()
    rtr = np.sum(r * r)
    for k in range(maxIter):
        Ap = Q1.get_laplacian(p, mask)
        alpha = rtr / np.sum(Ap * p)
        V += alpha * p
        r -= alpha * Ap
        rtr_new = np.sum(r * r)
        beta = rtr_new / rtr
        p = r + beta * p
        rtr = rtr_new
        if rtr < thresh:
            fi = time.time() - st
            print(
                f"The conjugate gradient converged in {fi}s after {k} iterations"
            )
            break
        elif k == maxIter - 1:
            fi = time.time() - st
            print(
                f"The maximum number of iterations was reached before CG can converge."
                f" Please increase the max number of iterations. The potential might be solved"
                f" poorly. This took {fi}s")
            break
    V[mask] = bc[mask]
    return V, k
예제 #15
0
def adjacence(nom):
    f = open(nom, "r")
    n = Q1.degre(nom)
    s = dict()
    for (k,v) in n.items():
        for (i,j) in n.items():
            s[(k,i)]=0
    for n in f:
        buff=n.split()
        
        if(buff!=[]):
             s[(buff[0],buff[1])]=1
             s[(buff[1],buff[0])]=1
    f.close()
    return s
예제 #16
0
    def sample_posterior(self):
        """ sample the posterior distribution of the mortality probability """

        # find values of mortality probability at which the posterior should be evaluated
        self._mortalitySamples = np.random.uniform(low=POST_L,
                                                   high=POST_U,
                                                   size=POST_N)

        # create a multi cohort
        multiCohort = SurvivalCls.MultiCohort(
            ids=self._cohortIDs,
            pop_sizes=[POST_N] * POST_N,
            mortality_probs=self._mortalitySamples)

        # simulate the multi cohort
        multiCohort.simulate(TIME_STEPS)

        # calculate the likelihood of each simulated cohort
        for i in self._cohortIDs:

            # get the 5-year OS for this cohort
            survival = multiCohort.get_cohort_FIVEyear_OS(i)

            # construct weight utilizing study's k and n; and simulated five-year OS
            weight = stat.binom.pmf(k=STUDY_K, n=STUDY_N, p=survival)

            # store the weight
            self._weights.append(weight)

        # normalize the likelihood weights
        sum_weights = np.sum(self._weights)
        self._normalizedWeights = np.divide(self._weights, sum_weights)

        # re-sample mortality probability (with replacement) according to likelihood weights
        self._mortalityResamples = np.random.choice(a=self._mortalitySamples,
                                                    size=NUM_SIM_COHORTS,
                                                    replace=True,
                                                    p=self._normalizedWeights)

        # produce the list to report the results
        for i in range(0, len(self._mortalitySamples)):
            self._csvRows.append([
                self._cohortIDs[i], self._normalizedWeights[i],
                self._mortalitySamples[i]
            ])

        # write the calibration result into a csv file
        InOutSupport.write_csv('CalibrationResults.csv', self._csvRows)
예제 #17
0
def main(n, k, cin, cout, option):
    pin = cin / n
    pout = cout / n

    B = np.ones((k, k)) * pout

    for i in range(k):
        B[i, i] = pin

    print B

    V = np.arange(n)
    C0 = np.ones(n)
    C0[:n / 2] = 0

    print C0
    # Stochastic block model
    A = q1.stochastic_block_model(V, C0, B)

    # Clustering
    C1 = clusp.clustering_spectral(A, k, option)
예제 #18
0
    def solver(self, load_from_file=True):
        for acti_fn in ['relu', 'sigmoid', 'linear', 'tanh']:
            if (load_from_file == False):
                model = Q1.MyNeuralNetwork(5, [784, 256, 128, 64, 10], acti_fn,
                                           0.1, 'normal', 500, 100)
                model.fill_testing_data(self.X_test, self.y_test, verbose=1)
                print('-------' * 8)
                print('Begening fit for activation function', acti_fn)
                model.fit(self.X_train, self.y_train)
                joblib.dump(model, 'models/saved_model_' + acti_fn)

            else:
                model = joblib.load('models/saved_model_' + acti_fn)

            err = model.get_errors_featres()
            self.__plot_errorVSepochs(err[0], err[1],
                                      'Actvation function = ' + acti_fn)

            NNfeatures = TSNE(n_components=2).fit_transform(err[2])
            self.__plot_cluster(NNfeatures, self.y_test,
                                'Features using ' + acti_fn)

            print('Accuracy for', acti_fn, '= ',
                  model.score(self.X_test, self.y_test) * 100, '%')
예제 #19
0
def plotMesure(nom):
    temps_debut = time.time()
    return Q1.p(nom), time.time() - temps_debut
예제 #20
0
    Mz = N

    tic = time.clock()
    x, y, z, pts, hexes = hex_cube(0, 1, 0, 1, 0, 1, Mx, My, Mz)
    bc_nodes = set_bc_nodes_cube(pts)
    toc = time.clock()
    print "Mesh generation took %f s" % (toc - tic)

    # forcing term at mesh nodes
    f_pts = f(pts)
    # Dirichlet boundary conditions at *all* mesh nodes (most of these values are not used)
    u_bc_pts = u_bc(pts)

    # Set up the system and solve:
    tic = time.clock()
    E = Q1.Q13DEquallySpaced(Q1.Gauss2x2x2())
    A, b = poisson.poisson(pts,
                           hexes,
                           bc_nodes,
                           f_pts,
                           u_bc_pts,
                           E,
                           scaling=1.0)
    toc = time.clock()
    print "Matrix assembly took %f s" % (toc - tic)

    tic = time.clock()
    u = spsolve(A.tocsr(), b)
    toc = time.clock()
    print "Sparse solve took %f s" % (toc - tic)
예제 #21
0
import Q1
import time
import sys

print("Please enter the size of the matrix you want to generate : ")
inp = input()
n = int(inp)
# n = 0
print("Random Sparse Matrix is generating .....")
matrix_1 = Q1.generate_sparse_matrix(n, 90)
matrix_2 = Q1.generate_sparse_matrix(n, 90)
print("Matrix 1 :")
print(matrix_1)

print("Matrix 2 :")
print(matrix_2)

print("Converting the Matrices to 'CRS' format......")
crs_1 = Q1.matrix_to_crs(matrix_1)
crs_2 = Q1.matrix_to_crs(matrix_2)

print("Random Dense Vector is Generating")
vector = Q1.generate_dense_vector(n, 90)

print("Matrix Vector Multiplication Sparse Algorithm Running....")
time_sparse_start = time.time()
result_vector_sparse = Q1.matrix_X_vector_sparse_algorithm(crs_1, vector)
time_sparse_end = time.time()

elapsed_time_sparse = (time_sparse_end - time_sparse_start) * 1000.0  #in ms
예제 #22
0
 def test_words_anagram_1(self):
     self.assertEqual(Q1.are_words_anagram(" Aba...", "baa"), True)
예제 #23
0
파일: Q2.py 프로젝트: J-Larose/SchoolWork

def ImportTxtFile():
    list = []
    #take str for name of save file
    while True:
        fileName = input("What is the ascii file name: ")
        try:
            #add each line of txt file to list as list
            with open(fileName, "r") as file:
                for txtLine in file:
                    list.append(txtLine)
            break
        except IOError:
            print(
                "ERR: Something has gone wrong (That might not be a valid file name)"
            )
        except:
            e = sys.exc_info()[0]
            print("ERR", e)

    #remove all newline chars
    for row in list:
        while "\n" in row:
            row.remove("\n")
    return list


list = ImportTxtFile()
list = Q1.InsertSort(list)
예제 #24
0
def test_sf():
    assert Q1.smallest_factor(15) == 3, "failed on positive odd integers"
    assert Q1.smallest_factor(6) == 2, "failed on integers less than 8"
    assert Q1.smallest_factor(32) == 2
예제 #25
0
 def testEmptyCase(self):
     empty_dictionary = []
     with self.assertRaises(ValueError):
         Q1.find_ordered_alphabet(empty_dictionary)
예제 #26
0
파일: Q1_test.py 프로젝트: ustya-k/codeu
 def testNoneCases(self):
     self.assertEqual(None, Q1.get_ancestors(self.binary_tree, 13))
예제 #27
0
파일: main.py 프로젝트: shirazavishai/Trie
import Q1
import Q1d
import Q2

##Trie
print("Q1:")
trie = Q1.TrieNode(' ')
print("Load Words : ")
Q1.load_words_and_add(trie)
print(" %d words" % Q1.SUM)
Q1.print_all(trie)
print("")
Q1.print_all_reverse(trie)
print("done Q1")

#Trie using Patricia
print("\nQ1d:")
patricia = Q1d.Patricia('')
print("Add words: ")
Q1d.start_to_add(patricia, trie)
print("Number of nodes in patricia tree = ", Q1d.NODES_P)
print("Number of nodes in trie = ", Q1.NODES)
print("done Q1d")

예제 #28
0
파일: Q1_test.py 프로젝트: ustya-k/codeu
 def testBaseCases(self):
     self.assertEqual([], Q1.get_ancestors(self.binary_tree, 16))
     self.assertEqual([3, 9, 16], Q1.get_ancestors(self.binary_tree, 5))
     self.assertEqual([16], Q1.get_ancestors(self.binary_tree, 18))
예제 #29
0
 def testCornerCases(self):
     self.assertTrue(Q1.if_permutation('', ''))
     self.assertTrue(Q1.if_permutation('abb', 'bba'))
     self.assertFalse(Q1.if_permutation('abb', 'bbba'))
예제 #30
0
 def testWrongOrderCase(self):
     """Tests a case with wrong order of words in a dictionary."""
     # Test the given example
     wrong_dictionary = ["ART", "RAT", "CRT", "CAR"]
     with self.assertRaises(ValueError):
         Q1.find_ordered_alphabet(wrong_dictionary)
예제 #31
0
 def testExampleCases(self):
     self.assertTrue(Q1.if_permutation('Listen', 'Silent'))
     self.assertTrue(Q1.if_permutation('Triangle', 'Integral'))
     self.assertFalse(Q1.if_permutation('Apple', 'Pabble'))