Exemplo n.º 1
0
    def draw(self):
        """
        if sample_priors = True and random_sample = True:
           draw returns a random draw of a categorical distribution with parameters drawn from a Dirichlet distribution
           the hyperparameters on the Dirichlet are given by the bandit's metric with laplacian smoothing
        if sample_priors = False and random_sample = True:
            draw returns a random draw of a categorical distribution with parameters given by the bandit's metric
        if sample_priors = True and random_sample = False:
            draw returns argmax(random.dirichlet((x_0 + 1, ... , x_n_arms + 1))) where x_i is the ith value returned by
            the bandit's metric.
        if sample_priors = False and random_sample = False:
            become a purely greedy bandit with the selected arm given by argmax(metric)

        :return: The numerical index of the selected arm
        """
        temp = self._schedule_fn(self.total_draws)
        x = array(self._metric_fn()) * temp + 1

        if self.sample_priors:
            pvals = random.dirichlet(x)
        else:
            pvals = x / sum(x)

        if self.random_sample:
            return argmax(random.multinomial(1, pvals=pvals))
        else:
            return argmax(pvals)
Exemplo n.º 2
0
def bandpower(f, Pxx, fmin, fmax):
    """ integrate the power spectral density between fmin and fmax
        using the trapezoidal method
    """
    ind_min = scipy.argmax(f > fmin) - 1
    ind_max = scipy.argmax(f > fmax) - 1
    return scipy.trapz(Pxx[ind_min: ind_max], f[ind_min: ind_max])
def plot_disc_policy():
    #First compute policy function...==========================================
    N = 500
    w = sp.linspace(0,100,N)
    w = w.reshape(N,1)
    u = lambda c: sp.sqrt(c)
    util_vec = u(w)
    alpha = 0.5
    alpha_util = u(alpha*w)
    alpha_util_grid = sp.repeat(alpha_util,N,1)
    
    m = 20
    v = 200
    f = discretelognorm(w,m,v)
    
    VEprime = sp.zeros((N,1))
    VUprime    = sp.zeros((N,N))
    EVUprime = sp.zeros((N,1))
    psiprime = sp.ones((N,1))
    gamma = 0.1
    beta = 0.9
    
    m = 15
    tol = 10**-9
    delta = 1+tol
    it = 0
    while (delta >= tol):
        it += 1
        
        psi = psiprime.copy()
        arg1 = sp.repeat(sp.transpose(VEprime),N,0)
        arg2 = sp.repeat(EVUprime,N,1)
        arg = sp.array([arg2,arg1])
        psiprime = sp.argmax(arg,axis = 0) 
        
        for j in sp.arange(0,m):
            VE = VEprime.copy()
            VU = VUprime.copy()
            EVU = EVUprime.copy()
            VEprime = util_vec + beta*((1-gamma)*VE + gamma*EVU)
            arg1 = sp.repeat(sp.transpose(VE),N,0)*psiprime
            arg2 = sp.repeat(EVU,N,1)*(1-psiprime)
            arg = arg1+arg2
            VUprime = alpha_util_grid + beta*arg
            EVUprime = sp.dot(VUprime,f)  
    
        
    
        delta = sp.linalg.norm(psiprime -psi) 

    wr_ind = sp.argmax(sp.diff(psiprime), axis = 1)
    wr = w[wr_ind]
    print w[250],wr[250]
        
    #Then plot=================================================================
    plt.plot(w,psiprime[250,:]) 
    plt.ylim([-.5,1.5])      
    plt.xlabel(r'$w\prime$')
    plt.yticks([0,1])
    plt.savefig('disc_policy.pdf')
	def testPercentErrorIsSame(self):
		NN.pat = zip(self.trn_d['input'], self.trn_d['target'])		
		pyb_ws = self.net.params.copy()
		nn = NN()
		nn.wi = pyb_ws[:nn.wi.size].reshape(NN.nh, NN.ni).T
		nn.wo = pyb_ws[nn.wi.size:].reshape(NN.no, NN.nh).T
		correct = 0
		wrong = 0
		argmax_cor = 0
		argmax_wng = 0
		all_aos = []
		for i, x in enumerate(self.trn_d['input']):
			nn.activate(x)
			out = self.net.activate(x)
			# print 'ga bp trg', nn.ao, out, self.trn_d['target'][i], '++++' if not (out - self.trn_d['target'][i]).any() else '-'
			all_aos.append(nn.ao.copy())
			if not (out - self.trn_d['target'][i]).any():
				correct += 1
			else:
				wrong += 1
			if argmax(out) == argmax(self.trn_d['target'][i]):
				argmax_cor += 1
			else:
				argmax_wng += 1
		print 'actual', wrong, 'wrong', correct, 'correct', float(wrong) / (wrong + correct) * 100
		print 'using argmax', argmax_wng, 'wrong', argmax_cor, 'correct', float(argmax_wng) / (argmax_wng + argmax_cor) * 100
		argmax_perc_err = float(argmax_wng) / (argmax_wng + argmax_cor) * 100
		res = nn.sumErrors()
		nn_perc_err = 100 - res[1]
		pb_nn_perc_err = percentError(self.trainer.testOnClassData(), self.trn_d['class'])
		self.assertAlmostEqual(nn_perc_err, pb_nn_perc_err)
		self.assertAlmostEqual(nn_perc_err, pb_nn_perc_err, argmax_perc_err)
 def plot_REL_ERR_SU2(self,which_case):
      i=0;
      thermo1 = self.select[which_case][0]
      thermo2 = self.select[which_case][1]
      get_REL_ERR_SU2(self,which_case)
      
      print 'Median error SU2', sp.median(self.REL_ERR)
      print 'Mean error SU2', sp.mean(self.REL_ERR)
      print 'Max error SU2', max(self.REL_ERR)
      print 'Min error SU2', min(self.REL_ERR)
      x = getattr(self.SU2[which_case],thermo1)
      y = getattr(self.SU2[which_case],thermo2)
      #trusted_values = sp.where(self.REL_ERR>0<0.9*max(self.REL_ERR))
      self.REL_ERR = self.REL_ERR[trusted_values]
      x = x[trusted_values]
      y = y[trusted_values]
      scat=plt.scatter(x,y,c=self.REL_ERR, s=1)                
      plt.grid(which='both')
      scat.set_array(self.REL_ERR)        
      plt.colorbar(scat)
      plt.xlim((min(x)*0.95,max(x)*1.05));
      plt.ylim((min(y)*0.95,max(y)*1.05));
      print 'x argmax %i , x_val: %f ' %(sp.argmax(self.REL_ERR),x[sp.argmax(self.REL_ERR)])
      print 'y argmax %i , y_val: %f ' %(sp.argmax(self.REL_ERR),y[sp.argmax(self.REL_ERR)])
      return;
    def error(self, results, expected):

        err = 0
        for i in range(results.shape[1]):

            err += self.lsexp(results[:, i]) - sp.dot(expected[:, i], results[:, i])

        misclassified = sp.sum(sp.argmax(results, axis=0) != sp.argmax(expected, axis=0))

        return err, misclassified
Exemplo n.º 7
0
def Problem6Real():
    N = 500
    w = sp.linspace(0,100,N)
    w = w.reshape(N,1)
    u = lambda c: sp.sqrt(c)
    util_vec = u(w)
    alpha = 0.5
    alpha_util = u(alpha*w)
    alpha_util_grid = sp.repeat(alpha_util,N,1)
    
    m = 20
    v = 200
    f = discretelognorm(w,m,v)
    
    VEprime = sp.zeros((N,1))
    VUprime    = sp.zeros((N,N))
    EVUprime = sp.zeros((N,1))
    psiprime = sp.ones((N,1))
    gamma = 0.1
    beta = 0.9
    
    m = 15
    tol = 10**-9
    delta = 1+tol
    it = 0
    while (delta >= tol):
        it += 1
        
        psi = psiprime.copy()
        arg1 = sp.repeat(sp.transpose(VEprime),N,0)
        arg2 = sp.repeat(EVUprime,N,1)
        arg = sp.array([arg2,arg1])
        psiprime = sp.argmax(arg,axis = 0)    
        
        for j in sp.arange(0,m):
            VE = VEprime.copy()
            VU = VUprime.copy()
            EVU = EVUprime.copy()
            VEprime = util_vec + beta*((1-gamma)*VE + gamma*EVU)
            arg1 = sp.repeat(sp.transpose(VE),N,0)*psiprime
            arg2 = sp.repeat(EVU,N,1)*(1-psiprime)
            arg = arg1+arg2
            VUprime = alpha_util_grid + beta*arg
            EVUprime = sp.dot(VUprime,f)  
    
        
    
        delta = sp.linalg.norm(psiprime -psi)
        #print(delta)    
        
    wr_ind = sp.argmax(sp.diff(psiprime), axis = 1)
    wr = w[wr_ind]
    plt.plot(w,wr)
    plt.show()
    return wr
Exemplo n.º 8
0
Arquivo: mir.py Projeto: zangsir/pymir
def pitch(x, fs, pitchrange=[12,120], mode='corr'):
    if mode=='corr':
        corr = scipy.correlate(x, x, mode='full')[len(x)-1:]
        corr[:int(fs/midi2hz(pitchrange[1]))] = 0
        corr[int(fs/midi2hz(pitchrange[0])):] = 0
        indmax = scipy.argmax(corr)
    elif mode=='ceps':
        y = rceps(x)
        y[:int(fs/midi2hz(pitchrange[1]))] = 0
        y[int(fs/midi2hz(pitchrange[0])):] = 0
        indmax = scipy.argmax(y)
    return hz2midi(fs/indmax)
Exemplo n.º 9
0
def generate(hmm,observation_space,n_sim):
	A = hmm[0]
	B = hmm[1]
	pi = hmm[2]
	states = sp.zeros(n_sim)
	observations = []
	states[0] = sp.argmax(sp.random.multinomial(1,pi))
	observations.append(observation_space[sp.argmax(sp.random.multinomial(1,B[states[0],:]))])
	for i in range(1,n_sim):
		states[i] = sp.argmax(sp.random.multinomial(1,A[states[i-1],:]))
		observations.append(observation_space[sp.argmax(sp.random.multinomial(1,B[states[i],:]))])
	return states,observations
Exemplo n.º 10
0
def calcError(trainer, dataset=None):
    if dataset == None:
        dataset = trainer.ds
    dataset.reset()
    out = []
    targ = []
    for seq in dataset._provideSequences():
        trainer.module.reset()
        for input, target in seq:
            res = trainer.module.activate(input)
            out.append(argmax(res))
            targ.append(argmax(target))
    return percentError(out, targ) / 100
Exemplo n.º 11
0
def generateGaussianHMM(hmm,n_sim):
	A = hmm[0]
	means = hmm[1]
	covars = hmm[2]
	pi = hmm[3]
	states = sp.zeros(n_sim).astype(int)
	K = len(means[0,:])
	observations = sp.zeros((n_sim,K))
	states[0] = int(sp.argmax(sp.random.multinomial(1,pi)))
	observations[0,:] = sp.random.multivariate_normal(means[states[0],:],covars[states[0],:,:])
	for i in range(1,n_sim):
		states[i] = int(sp.argmax(sp.random.multinomial(1,A[states[i-1],:])))
		observations[i,:] = sp.random.multivariate_normal(means[states[i],:],covars[states[i],:,:])
	return states,observations
Exemplo n.º 12
0
def crosscorr_phase_angle(sig1, sig2, x, max_length=10000):
    """Return the cross correlation phase angle between 2 signals

    Parameters
    ----------
    sig1 : array
        signal of length L
    sig2 : array
        another signal of length L
    x : array
        time axis for the signals sig1 and sig2
    max_length : int, optional
        Maximum length for the signals, signals are resampled otherwise.
        Default is 10 000.
    """
    assert len(sig1) == len(sig2) == len(x), \
        "The signals don't have the same length."
    sig_length = len(sig1)
    # Resample if signal is too big thus slowing down correlation computation
    if sig_length > max_length:
        sig1, x = resample(sig1, max_length, x)
        sig2 = resample(sig2, max_length)
        sig_length = max_length
    corr = np.correlate(sig1, sig2, mode="same")
    xmean = sig_length/2
    return float(argmax(corr) - xmean)/sig_length*x[-1]  # *x[-1] to scale
def tag_images_with_color_value(NUM_CLUSTERS = 4, INPUT_FOLDER = './data/covers/'):

    isbn = list()
    cover_color = list()

    files = os.listdir(INPUT_FOLDER)
    for eachFile in files:
        print eachFile
        im = Image.open(INPUT_FOLDER + eachFile)
        im = im.resize((50, 50))                          # optional, to reduce time
        ar = scipy.misc.fromimage(im)
        shape = ar.shape
        print len(shape)

        if len(shape) == 2:
            ar = ar.reshape(scipy.product(shape[:1]), shape[1])
        else:
            ar = ar.reshape(scipy.product(shape[:2]), shape[2])

        # finding clusters
        codes, dist = scipy.cluster.vq.kmeans(ar, NUM_CLUSTERS)
        # cluster centres:\n', codes

        vecs, dist = scipy.cluster.vq.vq(ar, codes)         # assign codes
        counts, bins = scipy.histogram(vecs, len(codes))    # count occurrences

        index_max = scipy.argmax(counts)                    # find most frequent
        peak = codes[index_max]
        colour = ''.join(chr(c) for c in peak).encode('hex')

        isbn.append(eachFile[:-4])
        cover_color.append(colour)

    result = zip(isbn, cover_color)
    return result
Exemplo n.º 14
0
def Problem3Real():
    beta = 0.9
    N = 1000
    u = lambda c: sp.sqrt(c)
    W = sp.linspace(0,1,N)
    X, Y = sp.meshgrid(W,W)
    Wdiff = sp.transpose(X-Y)
    index = Wdiff <0
    Wdiff[index] = 0
    util_grid = u(Wdiff)
    util_grid[index] = -10**10
    
    Vprime = sp.zeros((N,1))
    psi = sp.zeros((N,1))
    delta = 1.0
    tol = 10**-9
    it = 0
    max_iter = 500
    
    while (delta >= tol) and (it < max_iter):
        V = Vprime
        it += 1;
        #print(it)
        val = util_grid + beta*sp.transpose(V)
        Vprime = sp.amax(val, axis = 1)
        Vprime = Vprime.reshape((N,1))
        psi_ind = sp.argmax(val,axis = 1)
        psi    = W[psi_ind]
        delta = sp.dot(sp.transpose(Vprime - V),Vprime-V)
    
    return psi
Exemplo n.º 15
0
 def _box_cox_transform(self, verbose=False, method='standard'):
     """
     Performs the Box-Cox transformation, over different ranges, picking the optimal one w. respect to normality.
     """
     from scipy import stats
     a = sp.array(self.values)
     if method == 'standard':
         vals = (a - min(a)) + 0.1 * sp.var(a)
     else:
         vals = a
     sw_pvals = []
     lambdas = sp.arange(-2.0, 2.1, 0.1)
     for l in lambdas:
         if l == 0:
             vs = sp.log(vals)
         else:
             vs = ((vals ** l) - 1) / l
         r = stats.shapiro(vs)
         if sp.isfinite(r[0]):
             pval = r[1]
         else:
             pval = 0.0
         sw_pvals.append(pval)
     i = sp.argmax(sw_pvals)
     l = lambdas[i]
     if l == 0:
         vs = sp.log(vals)
     else:
         vs = ((vals ** l) - 1) / l
     self._perform_transform(vs,"box_cox")
     log.debug('optimal lambda was %0.1f' % l)
     return True
Exemplo n.º 16
0
 def most_normal_transformation(self,trans_types=SUPPORTED_TRANSFORMATIONS,
             perform_trans=True, verbose=False):
     """
     Performs the transformation which results in most normal looking data, according to Shapiro-Wilk's test
     """
     from scipy import stats
     shapiro_pvals = []
     for trans_type in trans_types:
         if trans_type == 'most_normal':
             continue
         if trans_type != 'none':
             if not self.transform(trans_type=trans_type):
                 continue
         phen_vals = self.values
         #print 'sp.inf in phen_vals:', sp.inf in phen_vals
         if sp.inf in phen_vals:
             pval = 0.0
         else:
             r = stats.shapiro(phen_vals)
             if sp.isfinite(r[0]):
                 pval = r[1]
             else:
                 pval = 0.0
         shapiro_pvals.append(pval)
         if trans_type != 'none':
             self.revert_to_raw_values()
     argmin_i = sp.argmax(shapiro_pvals)
     trans_type = trans_types[argmin_i]
     shapiro_pval = shapiro_pvals[argmin_i]
     if perform_trans:
         self.transform(trans_type=trans_type)
     log.info("The most normal-looking transformation was %s, with a Shapiro-Wilk's p-value of %.2E" % \
             (trans_type, shapiro_pval))
     return trans_type, shapiro_pval
Exemplo n.º 17
0
def getDominantColor(img_url):
    if r.exists(img_url):
        cache_result = r.hmget(img_url, ['r', 'g', 'b'])
        return cache_result
        
    NUM_CLUSTERS = 5
    im = Image.open(StringIO.StringIO(urllib2.urlopen(img_url).read()))
    img_arr = scipy.misc.fromimage(im)
    img_shape = img_arr.shape
    
    if len(img_shape) > 2:
        img_arr = img_arr.reshape(scipy.product(img_shape[:2]), img_shape[2])
    
    codes, _ = scipy.cluster.vq.kmeans(img_arr, NUM_CLUSTERS)
    
    original_codes = codes
    for low, hi in [(60, 200), (35, 230), (10, 250)]:
        codes = scipy.array([code for code in codes if not (all([c < low for c in code]) or all([c > hi for c in code]))])
        if not len(codes):
            codes = original_codes
        else:
            break

    vecs, _ = scipy.cluster.vq.vq(img_arr, codes)
    counts, bins = scipy.histogram(vecs, len(codes))

    index_max = scipy.argmax(counts)
    peak = codes[index_max]
    color = [int(c) for c in peak[:3]]
    r.hmset(img_url, {'r':color[0], 'g':color[1], 'b':color[2]})
    #r.expire(img_url, 86400)
    return color
Exemplo n.º 18
0
def digshc(docs, alpha, threshold, epsilon, hr_min):
    # Worth nothing that this takes in plaintext documents, _not_ vectors.
    shc = SimilarityHistogramClusterer(alpha, threshold, epsilon, hr_min)

    for doc in docs:
        shc.fit(doc)

    doc_clus_map = {}
    for idx, clus in enumerate(shc.formed_clusters):
        for doc_id in clus.doc_ids:
            doc_clus_map.setdefault(doc_id, [])
            doc_clus_map[doc_id].append(idx)

    labels = []
    for id in sorted(doc_clus_map):
        cluster_ids = doc_clus_map[id]
        if len(cluster_ids) == 1:
            best_cl_id = cluster_ids[0]
        else:
            clusters = [shc.get_cluster(cl_id) for cl_id in cluster_ids]
            sims = [shc.get_cluster_sim(cl, shc.get_doc(id)) for cl in clusters]
            max_i = argmax(sims)
            best_cl_id = clusters[max_i].id
        labels.append(best_cl_id)

    return labels
Exemplo n.º 19
0
def Problem1Real():
    beta = 0.9;
    T = 10;
    N = 100;
    u = lambda c: sp.sqrt(c);
    W = sp.linspace(0,1,N);
    X, Y = sp.meshgrid(W,W);
    Wdiff = Y-X
    index = Wdiff <0;
    Wdiff[index] = 0;
    util_grid = u(Wdiff);
    util_grid[index] = -10**10;
    V = sp.zeros((N,T+2));
    psi = sp.zeros((N,T+1));


    for k in xrange(T,-1,-1):
        val = util_grid + beta*sp.tile(sp.transpose(V[:,k+1]),(N,1));
        vt = sp.amax(val, axis = 1);
        psi_ind = sp.argmax(val,axis = 1)
        V[:,k]    = vt;
        psi[:,k]    = W[psi_ind];

    
    return V,psi
Exemplo n.º 20
0
def get_dominant_color(image_path):
    '''
    Parse image and return dominant color in image.

    @param image_path: Image path to parse.
    @return: Return dominant color, format as hexadecimal number. 
    '''
    # print 'reading image'
    im = Image.open(image_path)
    im = im.resize((150, 150))      # optional, to reduce time
    ar = scipy.misc.fromimage(im)
    shape = ar.shape
    ar = ar.reshape(scipy.product(shape[:2]), shape[2])
    
    # print 'finding clusters'
    NUM_CLUSTERS = 5
    codes, dist = scipy.cluster.vq.kmeans(ar, NUM_CLUSTERS)
    # print 'cluster centres:\n', codes
    
    vecs, dist = scipy.cluster.vq.vq(ar, codes)         # assign codes
    counts, bins = scipy.histogram(vecs, len(codes))    # count occurrences
    
    index_max = scipy.argmax(counts)                    # find most frequent
    peak = codes[index_max]
    colour = ''.join(chr(c) for c in peak).encode('hex')
    # print 'most frequent is %s (#%s)' % (peak, colour)
    
    return "#%s" % (colour[0:6])
Exemplo n.º 21
0
def par_further(tc, depth):
    import scipy as SP

    dview = tc[:]
    dview.block = True
    depths = SP.array(dview.apply(further, *[depth]))
    return depths[SP.argmax(depths)]
 def generate_threshold_mesh(self, min_value=0.0, max_value=1.0e9):
     r"""
     Generates a mesh excluding all blocks below the min_value arg. Regions
     that are isolated by the thresholding are also automatically removed.
     """
     #
     # thresholding the data and then checking for isolated clusters
     self._field.threshold_data(min_value, max_value, repl=0.0)
     self._field.copy_data(self)
     #
     adj_matrix = self._field.create_adjacency_matrix()
     num_cs, cs_ids = csgraph.connected_components(csgraph=adj_matrix,
                                                   directed=False)
     # only saving the largest cluster
     if num_cs > 1:
         cs_count = sp.zeros(num_cs, dtype=int)
         for cs_num in cs_ids:
             cs_count[cs_num] += 1
         self.data_vector[sp.where(cs_ids != sp.argmax(cs_count))[0]] = 0.0
         self.data_map = sp.reshape(self.data_vector, (self.nz, self.nx))
     #
     self._field.data_map = self.data_map
     self._field.data_vector = sp.ravel(self.data_map)
     #
     # generating blocks and vertices
     mask = self.data_map > 0.0
     self._generate_masked_mesh(cell_mask=mask)
Exemplo n.º 23
0
	def classify(self, xL, xR):
		a1L, a1R, a2L, a2LR, a2R, a3, z1Lb, z1LRb, z1Rb, z2b, xLb, xRb = self.forward_pass(xL, xR)
		if self.k == 2 :
			classif = sp.sign(a3);
		else :
			classif = sp.argmax(a3,axis=0);
		return a3, classif
	def classify(self, xL, xR):

		x = sp.vstack([xL,xR, sp.ones(xR.shape[1])]).T

		tmp = sp.dot(x, self.w)

		return tmp, sp.argmax(tmp, axis=1)
def displayData(X, theta = None):
    """Display 2D data in a nice grid"""
    width = 20
    rows, cols = 10, 10
    out = sp.zeros((width * rows, width * cols))

    rand_indices = sp.random.permutation(5000)[0:rows * cols]

    counter = 0
    for y in range(0, rows):
        for x in range(0, cols):
            start_x = x * width
            start_y = y * width
            out[start_x:start_x+width, start_y:start_y+width] = X[rand_indices[counter]].reshape(width, width).T
            counter += 1

    img = sp.misc.toimage(out)
    figure = plt.figure()
    axes = figure.add_subplot(111)
    axes.imshow(img)

    if theta is not None:
        result_matrix = []
        X_biased = sp.c_[sp.ones(X.shape[0]), X]

        for idx in rand_indices:
            result = (sp.argmax(theta.T.dot(X_biased[idx])) + 1) % 10
            result_matrix.append(result)
        result_matrix = sp.array(result_matrix).reshape(rows, cols).transpose()
        print(result_matrix)

    plt.show()
def getPredominantColor(filename):
    im = Image.open(filename).convert('RGB')

    # Convert to numpy array
    ar = scipy.misc.fromimage(im)

    # Get dimensions
    shape = ar.shape

    # Convert to bidimensional array of width x height rows and 3 columns (RGB)
    ar = ar.reshape(scipy.product(shape[:2]), shape[2])

    # Find cluster centers and their distortions
    # codes contains the RGB value of the centers
    codes, dist = scipy.cluster.vq.kmeans(ar.astype(float), NUM_CLUSTERS)

    # Maps all the pixels in the image to their respective centers
    vecs, dist = scipy.cluster.vq.vq(ar, codes)

    # Counts the occurances of each color (NUM_CLUSTER different colors after the mapping)
    counts, bins = scipy.histogram(vecs, len(codes))

    # Find most frequent color
    index_max = scipy.argmax(counts)
    peak = codes[index_max]

    return peak.astype(int)
Exemplo n.º 27
0
def RREFscaled(mymat):
#    Pdb().set_trace()
    scalevect=scipy.amax(abs(mymat),1)
    scaledrows=[]
    for sf,row in zip(scalevect,mymat):
        row=row/sf
        scaledrows.append(row)
    scaledmat=scipy.vstack(scaledrows)
#    scaledmat=mymat
    nc=scipy.shape(scaledmat)[1]
    nr=scipy.shape(scaledmat)[0]
    for j in range(nr-1):
#        print('=====================')
#        print('j='+str(j))
        pivrow=scipy.argmax(abs(scaledmat[j:-1,j]))
        pivrow=pivrow+j
#        print('pivrow='+str(pivrow))
        if pivrow!=j:
            temprow=copy.copy(scaledmat[j,:])
            scaledmat[j,:]=scaledmat[pivrow,:]
            scaledmat[pivrow,:]=temprow
#        Pdb().set_trace()
        for i in range(j+1,nr):
#            print('i='+str(i))
            scaledmat[i,:]-=scaledmat[j,:]*(scaledmat[i,j]/scaledmat[j,j])
    return scaledmat, scalevect
Exemplo n.º 28
0
 def testKernelCoeffs(self):
     for scale in [0.35, 0.5, 0.75, 1]:
         for dim in [0,1,2,3,4]:
             dgK = mango.image.discrete_gaussian_kernel(sigma=scale, dim=dim, errtol=0.001)
             self.assertAlmostEqual(1.0, sp.sum(dgK), 8)
             mxElem = sp.argmax(dgK)
             self.assertTrue(sp.all(sp.array(dgK.shape)//2 == sp.unravel_index(mxElem, dgK.shape)))
Exemplo n.º 29
0
    def autofocus(self,step=5000):
        if self.slide.pos[2] >= 0:  step = -step
        self.slide.moveZ(-step/2)
        z_start = self.slide.pos[2]
        self.frames.fillBuffer()
        self.slide.displaceZ(step)
        z_frames = self.frames.getBuffer()

        #sample every kth plus its lth neighbor:  for k=10,l=2 sample frame 0,10,20 and 2,12,22
        k = 10
        l = 2
        sample_ind = [ind*k for ind in range(len(z_frames)/k)]
        sample_ind2 = [ind*k+l for ind in range(len(z_frames)/k)]
        f = [z_frames[ind] for ind in sample_ind]
        f2 = [z_frames[ind] for ind in sample_ind2]
        n = len(f)
        diffs = []
        for i in range(n-2):
            diffs.append(ImageChops.difference(f[i],f2[i]))
        motion = []
        for f in diffs:
            f = ImageChops.multiply(f,self.curr_mask)
            motion.append(ImageStat.Stat(f).sum[0])
        #g = Gnuplot.Gnuplot()
        #g.plot(motion)

        max_frame = scipy.argmax(motion)
        max_focus = (max_frame/float(n))*step + z_start
        self.slide.moveZ(max_focus)
        return max_focus
Exemplo n.º 30
0
def getDomIMAGEColor( imName ):
	# Reference:
	# 	http://stackoverflow.com/questions/3241929/
	# 	python-find-dominant-most-common-color-in-an-image

	# number of k-means clusters
	NUM_CLUSTERS = 4

	# Open target image
	im = imName
	im = im.resize((150, 150))      # optional, to reduce time
	ar = scipy.misc.fromimage(im)
	shape = ar.shape
	ar = ar.reshape(scipy.product(shape[:2]), shape[2])
	ar = ar.astype(float)

	# Find clusters
	codes, dist = scipy.cluster.vq.kmeans(ar, NUM_CLUSTERS)
	vecs, dist = scipy.cluster.vq.vq(ar, codes)         # assign codes
	counts, bins = scipy.histogram(vecs, len(codes))    # count occurrences

	# Find most frequent
	index_max = scipy.argmax(counts)                    
	peak = codes[index_max]
	color = ''.join(chr(int(c)) for c in peak).encode('hex')

	return (peak, color)
Exemplo n.º 31
0
def getObjectColor(img, box, show=False):
    # crop to region of interest
    sub_img = img[int(box[1]):int(box[3]), int(box[0]):int(box[2]), :]
    if show: plt.imshow(sub_img)
    # Run kmeans with 5 clusters to get color
    ar = np.asarray(sub_img)
    shape = ar.shape
    ar = ar.reshape(scipy.product(shape[:2]), shape[2]).astype(float)
    codes, dist = scipy.cluster.vq.kmeans(ar, 5)
    vecs, dist = scipy.cluster.vq.vq(ar, codes)
    counts, bins = scipy.histogram(vecs, len(codes))
    index_max = scipy.argmax(counts)
    peak = codes[index_max]
    # Get the hex version of the color
    color = binascii.hexlify(bytearray(int(c) for c in peak)).decode('ascii')
    requested_colour = tuple(int(color[i:i+2], 16) for i in (0, 2, 4))
    # Determine the name of the color
    actual_name, closest_name = get_colour_name(requested_colour)
    if actual_name is None:
        return closest_name
    return actual_name
def all_saved_news(folder='model'):
    import glob
    from string import digits
    # get just the most recent news articles file (assuming date label ordering)
    news = json.load(open(glob.glob(folder+'/news*.json')[-1],"r"))
    # collect text data from all articles
    articles, data = [], []
    for source in news.keys():
        for title, article in news[source].items():
            # remove numbers
            for d in digits: article['text'] = article['text'].replace(d,'')
            data.append(article['text'])
            predictions = [prediction['probability'] for prediction in article['prediction']]
            articles.append({
                'source':source,
                'title':title,
                'url':article['url'],
                'prediction':article['prediction'],
                'predictedLabel':article['prediction'][argmax(predictions)]['party']
            })
    return articles, data
Exemplo n.º 33
0
def mcmc(data, iteration):
    freq = list(data)
    presentstate = data.pop()
    for i in range(iteration):
        temp = itemfreq(np.array(freq))
        dic = dict(zip(temp[:, 0], temp[:, 1]))
        priorarray = prior(np.array(data) - presentstate)
        index = scipy.argmax(priorarray)
        nxtstate = data[index]
        acceptance = min(
            (1, dic[nxtstate] /
             dic[presentstate]))  #since we assumed symmetric prior
        cointoss = np.random.random_sample()
        if acceptance >= cointoss:
            data.append(presentstate)
            presentstate = nxtstate
            freq.append(nxtstate)
            data.remove(nxtstate)
        else:
            freq.append(presentstate)
    return freq
Exemplo n.º 34
0
def get_main_colour(filename):
    print('reading image')
    im = Image.open(f'{IMG_DIR}/{filename}')
    im = im.resize((100, 100))  # optional, to reduce time
    ar = np.asarray(im)
    shape = ar.shape
    ar = ar.reshape(scipy.product(shape[:2]), shape[2]).astype(float)

    print('finding clusters')
    codes, dist = scipy.cluster.vq.kmeans(ar, NUM_CLUSTERS)
    print('cluster centres:\n', codes)

    vecs, dist = scipy.cluster.vq.vq(ar, codes)  # assign codes
    counts, bins = scipy.histogram(vecs, len(codes))  # count occurrences

    index_max = scipy.argmax(counts)  # find most frequent
    peak = codes[index_max]  # colour in rgb
    colour = binascii.hexlify(bytearray(int(c) for c in peak)).decode(
        'ascii')  # hex colour

    return tuple(map(int, peak))
Exemplo n.º 35
0
def fitGaussian1D(vector):
    length = len(vector)

    center_guess = scipy.argmax(vector)
    height_guess = vector.max()
    noise_guess = vector.min()

    guess_params = [
        noise_guess, height_guess - noise_guess, center_guess, length / 6.0
    ]

    max_height = height_guess * 1.1
    #max_width = center_region*1.1
    #max_params = [0,max_height,0,0,max_width,max_width]#,360]
    #use_max = [False, True, False, False, True, True]#, True]

    fits = gaussfitter.onedgaussfit(scipy.arange(0, length),
                                    vector,
                                    params=guess_params)

    return fits[0], fits[1]
Exemplo n.º 36
0
def digbc(docs, threshold):
    # Worth nothing that this takes in plaintext documents, _not_ vectors.
    dig = DocumentIndexGraphClusterer(threshold=threshold)

    for doc in docs:
        dig.index_document(doc)

    doc_clus_map = {}
    for idx, clus in enumerate(dig.formed_clusters):
        for doc_id in clus.doc_ids:
            doc_clus_map.setdefault(doc_id, [])
            doc_clus_map[doc_id].append(idx)

    labels = []
    for id in sorted(doc_clus_map):
        clusters = [dig.get_cluster(cl_id) for cl_id in doc_clus_map[id]]
        sims = [dig.get_cluster_sim(cl, dig.get_doc(id)) for cl in clusters]
        max_i = argmax(sims)
        labels.append(clusters[max_i].id)

    return labels
Exemplo n.º 37
0
def GetMajorColors(path):
    NUM_CLUSTERS = 10

    im = Image.open(path)
    im = im.resize((150, 150))  # optional, to reduce time
    ar = np.asarray(im)
    shape = ar.shape
    ar = ar.reshape(scipy.product(shape[:2]), shape[2]).astype(float)

    # finding clusters
    codes, dist = scipy.cluster.vq.kmeans(ar, NUM_CLUSTERS)

    vecs, dist = scipy.cluster.vq.vq(ar, codes)  # assign codes
    counts, bins = scipy.histogram(vecs, len(codes))  # count occurrences

    index_max = scipy.argmax(counts)  # find most frequent
    peak = codes[index_max]  # RGB color
    colour = binascii.hexlify(bytearray(int(c) for c in peak)).decode(
        'ascii')  # Hexa color

    return colour
Exemplo n.º 38
0
def dominantColor(filename):
    NUM_CLUSTERS = 3
    #print ('reading image')
    im = PIL.Image.fromarray(filename)
    #im = PIL.Image.open(filename)
    im = im.resize((25, 25))  # optional, to reduce time
    ar = np.asarray(im)
    shape = ar.shape
    ar = ar.reshape(scipy.product(shape[:2]), shape[2]).astype(float)

    #print ('finding clusters')
    codes, dist = scipy.cluster.vq.kmeans(ar, NUM_CLUSTERS)
    #print ('cluster centres:\n', codes)

    vecs, dist = scipy.cluster.vq.vq(ar, codes)  # assign codes
    counts, bins = scipy.histogram(vecs, len(codes))  # count occurrences

    index_max = scipy.argmax(counts)  # find most frequent
    peak = codes[index_max]
    #print('max_val:\n', peak)
    return peak
    def background_color(img, num_clusters=3):
        print('reading image')
        im = Image.fromarray(img).convert('LA')
        # im = Image.open(path).convert('LA')
        im = im.resize((150, 150))      # optional, to reduce time
        ar = scipy.misc.fromimage(im)
        shape = ar.shape
        ar = ar.reshape(scipy.product(shape[:2]), shape[2])

        print('finding clusters')
        codes, dist = scipy.cluster.vq.kmeans(ar.astype('double'), num_clusters)
        print('cluster centres:\n', codes)

        vecs, dist = scipy.cluster.vq.vq(ar, codes)         # assign codes
        counts, bins = scipy.histogram(vecs, len(codes))    # count occurrences

        result = []
        index_max = scipy.argmax(counts)                    # find most frequent
        peak = codes[index_max]
        result.append(peak[0])
        return result
Exemplo n.º 40
0
    def assign_cluster(self, document):
        good_clusters = []
        best_similarities = []

        # calculate similarities and add to similar clusters
        for cluster in self.formed_clusters:
            sim_to_cluster = self.get_cluster_sim(cluster, document)
            # print("%.4f" % sim_to_cluster)
            if sim_to_cluster > self.threshold:
                good_clusters.append(cluster)
                best_similarities.append(sim_to_cluster)

        # if no similar cluster found, create new
        if not good_clusters:
            self.create_cluster(document)
        else:
            if self.hard:
                max_i = argmax(best_similarities)
                good_clusters = [good_clusters[max_i]]
            for cluster in good_clusters:
                self.add_doc_to_cluster(document, cluster)
Exemplo n.º 41
0
 def box_cox_transform(self,
                       values,
                       lambda_range=(-2.0, 2.0),
                       lambda_increment=0.1,
                       verbose=False,
                       method='standard'):
     """
     Performs the Box-Cox transformation, over different ranges, picking the optimal one w. respect to normality.
     """
     from scipy import stats
     a = sp.array(values)
     if method == 'standard':
         vals = (a - min(a)) + 0.1 * sp.std(a)
     else:
         vals = a
     sw_pvals = []
     lambdas = sp.arange(lambda_range[0],
                         lambda_range[1] + lambda_increment,
                         lambda_increment)
     for l in lambdas:
         if l == 0:
             vs = sp.log(vals)
         else:
             vs = ((vals**l) - 1) / l
         r = stats.shapiro(vs)
         if sp.isfinite(r[0]):
             pval = r[1]
         else:
             pval = 0.0
         sw_pvals.append(pval)
     # log.info(sw_pvals)
     i = sp.argmax(sw_pvals)
     l = lambdas[i]
     if l == 0:
         vs = sp.log(vals)
     else:
         vs = ((vals**l) - 1) / l
     # self._perform_transform(vals,"box-cox")
     sys.stdout.write('optimal lambda was %0.1f\n' % l)
     return vals
Exemplo n.º 42
0
def getLPDataFWHM(lprof):
    '''
    Calculate the FWHM of the line profile.
    
    @param lprof: the line profile object. 
    @type lprof: LPDataReader()
    
    @return: the fwhm of the line
    @rtype: float
    
    '''

    flux = lprof.getFlux()
    vel = lprof.getVelocity()
    vlsr = lprof.getVlsr()
    maxval = max(flux)
    i_mid = argmax(flux)
    flux1 = flux[:i_mid]
    flux2 = flux[i_mid:]
    v1 = vel[argmin(abs(flux1 - maxval / 2.))]
    v2 = vel[argmin(abs(flux2 - maxval / 2.)) + i_mid]
    return v2 - v1
Exemplo n.º 43
0
def test_sim1():
	p = 0.6
	payoutBarrier = 1.0
	beta = 0.99
	nPeriods = 1000
	nRuns = 1000
	M_grid = scipy.arange(21.0)
	V_array = scipy.zeros((len(M_grid), len(M_grid)))
	
	for (iM, M) in enumerate(M_grid):
		#print("M=%f" % M)
		b_grid = M_grid[M_grid <= M]
		for (ib, b) in enumerate(b_grid):
			V_simulated_list = []
			for j in range(nRuns):
				firm = Firm(M, beta, b)
				(survived, age, V) = simulateFirm(firm, p, nPeriods)
				V_simulated_list.append(V)
			EV = scipy.mean(V_simulated_list)
			V_array[iM, ib] = EV

	for (iM, M) in enumerate(M_grid):
		V_of_b = V_array[iM,:]
		print(V_of_b)
		optimal_ib = scipy.argmax(V_of_b)
		optimal_b = M_grid[optimal_ib]
		print("for M=%f, optimal b is %f" % (M, optimal_b))
		
	fig = plt.figure()
	ax = Axes3D(fig)
	vals = V_array.flatten()
	[xlist, ylist] = zip(*itertools.product(M_grid, b_grid))	
	ax.scatter(xlist, ylist, vals)
	ax.set_xlabel('M')
	ax.set_ylabel('b')
	ax.set_zlabel('EV')	

			
	
Exemplo n.º 44
0
def get_dominant_hexcolor(image: Image) -> str:
    resized_image = image.resize((200, 200))
    pixel_array = np.asarray(resized_image)
    pixel_array_shape = pixel_array.shape
    reshaped_pixel_array = pixel_array.reshape(
        scipy.product(pixel_array_shape[:2]),
        pixel_array_shape[2]).astype(float)

    centroids, _ = scipy.cluster.vq.kmeans(reshaped_pixel_array,
                                           3,
                                           iter=20,
                                           thresh=1e-5)

    codes, _ = scipy.cluster.vq.vq(reshaped_pixel_array, centroids)
    color_counts, _ = scipy.histogram(codes, len(centroids))

    highest_index = scipy.argmax(color_counts)
    peak = centroids[highest_index]
    hex_color = "#" + binascii.hexlify(bytearray(
        int(c) for c in peak)).decode('ascii')

    return hex_color
Exemplo n.º 45
0
 def box_cox_transform(self, pid, lambda_range=(-2.0, 2.0), lambda_increment=0.1, verbose=False, method='standard'):
     """
     Performs the Box-Cox transformation, over different ranges, picking the optimal one w. respect to normality.
     """
     from scipy import stats
     a = sp.array(self.phen_dict[pid]['values'])
     if method == 'standard':
         vals = (a - min(a)) + 0.1 * sp.std(a)
     else:
         vals = a
     sw_pvals = []
     lambdas = sp.arange(lambda_range[0], lambda_range[1] + lambda_increment, lambda_increment)
     for l in lambdas:
         if l == 0:
             vs = sp.log(vals)
         else:
             vs = ((vals ** l) - 1) / l
         r = stats.shapiro(vs)
         if sp.isfinite(r[0]):
             pval = r[1]
         else:
             pval = 0.0
         sw_pvals.append(pval)
     print sw_pvals
     i = sp.argmax(sw_pvals)
     l = lambdas[i]
     if l == 0:
         vs = sp.log(vals)
     else:
         vs = ((vals ** l) - 1) / l
     if not self.phen_dict[pid]['transformation']:
         self.phen_dict[pid]['raw_values'] = self.phen_dict[pid]['values']
         self.phen_dict[pid]['transformation'] = 'box-cox'
     else:
         self.phen_dict[pid]['transformation'] = 'box-cox(' + self.phen_dict[pid]['transformation'] + ')'
     self.phen_dict[pid]['values'] = vs.tolist()
     if verbose:
         print 'optimal lambda was %0.1f' % l
     return True
Exemplo n.º 46
0
 def most_normal_transformation(
         self,
         pid,
         trans_types=['none', 'sqrt', 'log', 'sqr', 'exp', 'arcsin_sqrt'],
         perform_trans=True,
         verbose=False):
     """
     Performs the transformation which results in most normal looking data, according to Shapiro-Wilk's test
     """
     #raw_values = self.phen_dict[pid]['values']
     from scipy import stats
     shapiro_pvals = []
     for trans_type in trans_types:
         if trans_type != 'none':
             if not self.transform(pid, trans_type=trans_type):
                 continue
         phen_vals = self.get_values(pid)
         #print 'sp.inf in phen_vals:', sp.inf in phen_vals
         if sp.inf in phen_vals:
             pval = 0.0
         else:
             r = stats.shapiro(phen_vals)
             if sp.isfinite(r[0]):
                 pval = r[1]
             else:
                 pval = 0.0
         shapiro_pvals.append(pval)
         #self.phen_dict[pid]['values'] = raw_values
         if trans_type != 'none':
             self.revert_to_raw_values(pid)
     argmin_i = sp.argmax(shapiro_pvals)
     trans_type = trans_types[argmin_i]
     shapiro_pval = shapiro_pvals[argmin_i]
     if perform_trans:
         self.transform(pid, trans_type=trans_type)
     if verbose:
         print "The most normal-looking transformation was %s, with a Shapiro-Wilk's p-value of %0.6f" % \
             (trans_type, shapiro_pval)
     return trans_type, shapiro_pval
def main(npImage):
    #print('reading image')
    #im = Image.open('image.jpg')
    im = Image.fromarray(npImage)
    im = im.resize((150, 150))  # optional, to reduce time
    ar = np.asarray(im)
    shape = ar.shape
    ar = ar.reshape(scipy.product(shape[:2]), shape[2]).astype(float)

    #print('finding clusters')
    codes, dist = scipy.cluster.vq.kmeans(ar, NUM_CLUSTERS)
    #print('cluster centres:\n', codes)

    vecs, dist = scipy.cluster.vq.vq(ar, codes)  # assign codes
    counts, bins = scipy.histogram(vecs, len(codes))  # count occurrences

    index_max = scipy.argmax(counts)  # find most frequent
    peak = codes[index_max]
    # colour = binascii.hexlify(bytearray(int(c) for c in peak)).decode('ascii')
    # print('most frequent is %s (#%s)' % (peak, colour))
    peak = [peak[2], peak[1], peak[0]]
    return peak
Exemplo n.º 48
0
 def oneGeneration(self):
     if self.generation > 0:
         self.calculateAverageFitness()
     # evaluate fitness
     self.fitnesses = []
     for indiv in self.currentpop:
         self.fitnesses.append(self.targetfun(indiv))
     
     # determine the best values
     best = argmax(array(self.fitnesses))
     self.bestfitness = self.fitnesses[best]
     self.bestx = self.currentpop[best]
     
     self.allgenerations.append((self.currentpop, self.fitnesses))
     
     if self.fitnessSmoothing:
         self._smoothFitnesses()    
     
     # selection
     tmp = zip(self.fitnesses, self.currentpop)
     tmp.sort(key = lambda x: x[0])            
     tmp2 = list(reversed(tmp))[:self.selectionSize()]
     parents, self.parentFitnesses = map(lambda x: x[1], tmp2), map(lambda x: x[0], tmp2)
     
     self.currentpop = self.crossOver(parents, self.popsize)
     
     # add one random offspring
     #self.currentpop[-1] = randn(self.xdim)
     #self.crossovervectors[-1] = [0]*self.xdim
     
     for child in self.currentpop:
         self.mutate(child)
     
     if self.generation > 0:
         self.updateLinkageMatrix()
     if self.verbose:# and self.generation % 10 == 0:
         # TODO: more extensive output
         print self.rawlm
Exemplo n.º 49
0
def run_model1_qa(objects_filename, flickr_cache):

    #load a location file
    loc_info = load_location_file(objects_filename)

    #create a likelihood map
    l_map = model1_qanswer(flickr_cache, loc_info)

    olists, igains, p_ts, p_fs = [], [], [], []

    for k in range(5):
        olist, igain, p_t, p_f = l_map.maximal_information_gain()

        olists.append(olist)
        igains.append(igain)
        p_ts.append(p_t)
        p_fs.append(p_f)

        for i in range(len(igain)):
            print "obj:", olist[i], " I:", igain[i], " P(t):", p_t[
                i], " P(f):", p_f[i]

        if (len(igain) == 0):
            j = -1
        else:
            j = argmax(igain)

        if (not j == -1):
            print "adding allowed object:", olist[j]
            l_map.add_allowed_object(olist[j])
            print "best=", olist[j]

    res = {}
    res['object_lists'] = olists
    res['information_gain'] = igains
    res['prob_true'] = p_ts
    res['prob_false'] = p_fs
    cPickle.dump(res, open('m1_qanswer.pck', 'w'))
Exemplo n.º 50
0
def plot_eigvect(vect, labels=None, bottom=0, num_label=5, label_offset=0.15):
    """
    Plot a given eigenvector.

    If a list of labels is passed in, the largest (in magnitude) num_label bars
      will be labeled on the plot. label_offset controls how much the labels
      are shifted from the top of the bars for clarity.
    bottom controls where the bar plot is centered along the y axis. This is 
      useful for plotting several e'vectors on the same axes.
    """
    # The 0.4 centers the bars on their numbers, accounting for the default
    #  bar width of 0.8
    vect = scipy.real(vect)
    max_index = scipy.argmax(abs(vect))
    if vect[max_index] < 0:
        vect = -vect
    bar(scipy.arange(len(vect)) - 0.4,
        vect / scipy.linalg.norm(vect),
        bottom=bottom)
    a = list(axis())
    a[0:2] = [-.03 * len(vect) - 0.4, (len(vect) - 1) * 1.03 + 0.4]

    if labels is not None:
        mags = zip(abs(vect), range(len(vect)), vect)
        mags.sort()
        mags.reverse()
        for mag, index, val in mags[:num_label]:
            name = labels[index]
            text(index,
                 val + scipy.sign(val) * label_offset,
                 name,
                 horizontalalignment='center',
                 verticalalignment='center')

        a[2] -= 0.1
        a[3] += 0.1

    axis(a)
Exemplo n.º 51
0
def Problem3Real():
    beta = 0.95
    N = 1000
    u = lambda c: sp.sqrt(c)
    
    W = sp.linspace(0,1,N)
    W = W.reshape(N,1)
    X, Y = sp.meshgrid(W,W)
    Wdiff = Y-X
    index = Wdiff <0
    Wdiff[index] = 0
    util_grid = u(Wdiff)
    
    V = sp.zeros((N,1))
    
    z = 0
    r = 15
    delta =1
    
    while (delta > 10**-9):
        z += 1
        #print(z)
        
        #Update Policy Function    
        arg = util_grid + beta*sp.transpose(V)
        arg[index] = -10**10
        psi_ind = sp.argmax(arg,axis = 1)
        
      
        V_prev = V
        #Iterate on Value Function
        for j in sp.arange(0,r):
            V = u(W-W[psi_ind]) + beta*V[psi_ind]
    
    
        delta = sp.dot(sp.transpose(V_prev - V),V_prev-V)

    return W[psi_ind]
Exemplo n.º 52
0
def Problem2Real():
    beta = 0.95
    N = 1000
    u = lambda c: sp.sqrt(c)
    psi_ind = sp.arange(0,N)

    W = sp.linspace(0,1,N)
    X, Y = sp.meshgrid(W,W)
    Wdiff = Y-X
    index = Wdiff <0
    Wdiff[index] = 0
    util_grid = u(Wdiff)

    I = sp.sparse.identity(N)
    delta = 1
    z = 0
    while (delta > 10**-9):
        z = z+1
        #print(z)
        psi_prev = psi_ind.copy()
    
        rows = sp.arange(0,N)
        columns = psi_ind
        data = sp.ones(N)
        Q = sp.sparse.coo_matrix((data,(rows,columns)),shape = (N,N))
        Q = Q.tocsr()
   
   #Solve for Value Function
        V = spsolve(I-beta*Q,u(W-W[psi_ind]))

    #Find Policy Function    
        arg = util_grid + beta*V
        arg[index] = -10**10
        psi_ind = sp.argmax(arg,axis = 1)
        delta = sp.amax(sp.absolute(W[psi_ind]-W[psi_prev]))


    return W[psi_ind]
def Problem1Real():
    beta = 0.9
    T = 10
    N = 100
    u = lambda c: sp.sqrt(c)
    W = sp.linspace(0, 1, N)
    X, Y = sp.meshgrid(W, W)
    Wdiff = Y - X
    index = Wdiff < 0
    Wdiff[index] = 0
    util_grid = u(Wdiff)
    util_grid[index] = -10**10
    V = sp.zeros((N, T + 2))
    psi = sp.zeros((N, T + 1))

    for k in xrange(T, -1, -1):
        val = util_grid + beta * sp.tile(sp.transpose(V[:, k + 1]), (N, 1))
        vt = sp.amax(val, axis=1)
        psi_ind = sp.argmax(val, axis=1)
        V[:, k] = vt
        psi[:, k] = W[psi_ind]

    return V, psi
Exemplo n.º 54
0
def createPattern(image):
    colorArray = []
    ar = np.asarray(image)
    shape = ar.shape
    ar = ar.reshape(scipy.product(shape[:2]), shape[2]).astype(float)

    codes, dist = scipy.cluster.vq.kmeans(ar, 5)  #Baskın 5 rengin kümelenmesi.
    print("codes: ", codes, " uzaklık: ", dist)
    vecs, dist = scipy.cluster.vq.vq(ar, codes)
    print("vektör: ", vecs, " uzaklik: ", dist)
    counts, bins = scipy.histogram(vecs, len(codes))
    print("counts: ", counts, " bins: ", bins)
    index_max = scipy.argmax(
        counts)  #En sık kullanılan 1 rengin indexini bulur
    print("indexmaks: ", index_max)
    peak = codes[index_max]
    print(peak)

    for i in range(0, 5):
        colorArray.append(codes[i].astype("uint8").tolist())
        colorArray.append(peak.astype("uint8").tolist())  #En baskın renk
        print("colorArray")
    return colorArray
Exemplo n.º 55
0
def getDomColor(imgFileName):
    # Reference:
    # 	http://stackoverflow.com/questions/3241929/
    # 	python-find-dominant-most-common-color-in-an-image

    # number of k-means clusters
    NUM_CLUSTERS = 4

    # Open target image
    im = Image.open(imgFileName)
    im = im.resize((150, 150))  # optional, to reduce time
    ar = scipy.misc.fromimage(im)
    shape = ar.shape
    ar = ar.reshape(scipy.product(shape[:2]), shape[2])
    ar = ar.astype(float)

    # Find clusters
    codes, dist = scipy.cluster.vq.kmeans(ar, NUM_CLUSTERS)
    # print 'cluster centres:\n', codes

    vecs, dist = scipy.cluster.vq.vq(ar, codes)  # assign codes
    counts, bins = scipy.histogram(vecs, len(codes))  # count occurrences

    # Find most frequent
    index_max = scipy.argmax(counts)
    peak = codes[index_max]
    color = ''.join(chr(int(c)) for c in peak).encode('hex')
    #print 'most frequent is %s (#%s)' % (peak, color)

    # Extra Bonus coolbeans: save image using only the N most common colors
    # c = ar.copy()
    # for i, code in enumerate(codes):
    #     c[scipy.r_[scipy.where(vecs==i)],:] = code
    # scipy.misc.imsave(imgFileName[:-4] + "CLUS" + ".jpg", c.reshape(*shape))
    # print 'saved clustered image'

    return (peak, color)
def PLU(A):
    # throw warning flag when the number is too small
    # (close to 0)
    ok = 1
    small = 1e-12

    n = scipy.shape(A)[0]
    U = copy.copy(A)
    L = scipy.identity(n)
    P = scipy.identity(n)
    for j in range(1, n):
        print(j, " operation")
        print("This is U")
        print(U)
        print("This is L")
        print(L)
        print("This is P")
        print(P)
        print()
        s = scipy.argmax(abs(U[j - 1:n, j - 1])) + j - 1
        # argmax returs the index of that number
        if s != j - 1:
            U = swap(U, s, j - 1, n)
            P = swap(P, s, j - 1, n)
            if j > 1:
                L = swap(L, s, j - 1, j - 1)

        for i in range(j + 1, n + 1):
            if abs(U[j - 1, j - 1]) < small:
                print("Near-zero pivot!")
                ok = 0
                break
            L[i - 1, j - 1] = U[i - 1, j - 1] / U[j - 1, j - 1]
            for k in range(j, n + 1):
                U[i - 1,
                  k - 1] = U[i - 1, k - 1] - L[i - 1, j - 1] * U[j - 1, k - 1]
    return L, U, P, ok
Exemplo n.º 57
0
def parse_plink_snps(genotype_file, snp_indices):
    plinkf = plinkfile.PlinkFile(genotype_file)
    samples = plinkf.get_samples()
    num_individs = len(samples)
    num_snps = len(snp_indices)
    raw_snps = sp.empty((num_snps, num_individs), dtype='int8')
    # If these indices are not in order then we place them in the right place while parsing SNPs.
    snp_order = sp.argsort(snp_indices)
    ordered_snp_indices = list(snp_indices[snp_order])
    ordered_snp_indices.reverse()
    # Iterating over file to load SNPs
    snp_i = 0
    next_i = ordered_snp_indices.pop()
    line_i = 0
    max_i = ordered_snp_indices[0]
    while line_i <= max_i: 
        if line_i < next_i:
            next(plinkf)
        elif line_i == next_i:
            line = next(plinkf)
            snp = sp.array(line, dtype='int8')
            bin_counts = line.allele_counts()
            if bin_counts[-1] > 0:
                mode_v = sp.argmax(bin_counts[:2])
                snp[snp == 3] = mode_v
            s_i = snp_order[snp_i]
            raw_snps[s_i] = snp
            if line_i < max_i:
                next_i = ordered_snp_indices.pop()
            snp_i += 1
        line_i += 1
    plinkf.close()
    assert snp_i == len(raw_snps), 'Parsing SNPs from plink file failed.'
    num_indivs = len(raw_snps[0])
    freqs = sp.sum(raw_snps, 1, dtype='float32') / (2 * float(num_indivs))
    return raw_snps, freqs
Exemplo n.º 58
0
def PLU(A):
    ok = 1
    small = 1e-12
    n = scipy.shape(A)[0]
    U = copy.copy(A)
    L = scipy.identity(n)
    P = scipy.identity(n)
    for j in range(1, n):
        s = scipy.argmax(abs(U[j - 1:n, j - 1])) + j - 1
        if s != j - 1:
            U = swap(U, s, j - 1, n)
            P = swap(P, s, j - 1, n)
            if j > 1:
                L = swap(L, s, j - 1, j - 1)
        for i in range(j + 1, n + 1):
            if abs(U[j - 1, j - 1]) < small:
                print("Near-zero pivot!")
                ok = 0
                break
            L[i - 1, j - 1] = U[i - 1, j - 1] / U[j - 1, j - 1]
            for k in range(j, n + 1):
                U[i - 1,
                  k - 1] = U[i - 1, k - 1] - L[i - 1, j - 1] * U[j - 1, j - 1]
    return L, U, P, ok
Exemplo n.º 59
0
def get_main_color(url):
    NUM_CLUSTERS = 5

    # print('reading image')
    response = requests.get(url)
    im = Image.open(BytesIO(response.content))
    im = im.resize((150, 150))  # optional, to reduce time
    ar = np.asarray(im)
    shape = ar.shape
    ar = ar.reshape(scipy.product(shape[:2]), shape[2]).astype(float)

    # print('finding clusters')
    codes, dist = scipy.cluster.vq.kmeans(ar, NUM_CLUSTERS)
    # print('cluster centres:\n', codes)

    vecs, dist = scipy.cluster.vq.vq(ar, codes)  # assign codes
    counts, bins = scipy.histogram(vecs, len(codes))  # count occurrences

    index_max = scipy.argmax(counts)  # find most frequent
    peak = codes[index_max]
    # print(peak)
    colour = binascii.hexlify(bytearray(int(c) for c in peak)).decode('ascii')
    # print(colour)
    return peak, colour
Exemplo n.º 60
0
def find_dominant_color(imageurl: str, local=False):
    try:
        NUM_CLUSTERS = 10
        if (local):
            im = Image.open(imageurl)
        else:
            im = Image.open(requests.get(imageurl, stream=True).raw)
        im = im.resize((25, 25))
        ar = np.asarray(im)
        shape = ar.shape
        ar = ar.reshape(scipy.product(shape[:2]), shape[2]).astype(float)
        codes, dist = scipy.cluster.vq.kmeans(ar, NUM_CLUSTERS)
        vecs, dist = scipy.cluster.vq.vq(ar, codes)
        counts, bins = scipy.histogram(vecs, len(codes))
        index_max = scipy.argmax(counts)
        peak = codes[index_max]
        colour = binascii.hexlify(bytearray(int(c)
                                            for c in peak)).decode('ascii')
        try:
            return int(hex(int(colour, 16))[:8], 0)
        except:
            return 0xffffff
    except IndexError:
        return 0xffffff