Example #1
0
  def predict(
      self, x=None, input_fn=None, axis=None, batch_size=None, outputs=None,
      as_iterable=False):
    """Returns predictions for given features.

    Args:
      x: features.
      input_fn: Input function. If set, x must be None.
      axis: Axis on which to argmax (for classification).
            Last axis is used by default.
      batch_size: Override default batch size.
      outputs: list of `str`, name of the output to predict.
        If `None`, returns all.
      as_iterable: If True, return an iterable which keeps yielding predictions
        for each example until inputs are exhausted. Note: The inputs must
        terminate if you want the iterable to terminate (e.g. be sure to pass
        num_epochs=1 if you are using something like read_batch_features).

    Returns:
      Numpy array of predicted classes or regression values (or an iterable of
      predictions if as_iterable is True).
    """
    probabilities = self.predict_proba(
        x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs,
        as_iterable=as_iterable)
    if self.params.regression:
      return probabilities
    else:
      if as_iterable:
        return (np.argmax(p, axis=0) for p in probabilities)
      else:
        return np.argmax(probabilities, axis=1)
Example #2
0
def viterbi_decode(score, transition_params):
  """Decode the highest scoring sequence of tags outside of TensorFlow.

  This should only be used at test time.

  Args:
    score: A [seq_len, num_tags] matrix of unary potentials.
    transition_params: A [num_tags, num_tags] matrix of binary potentials.

  Returns:
    viterbi: A [seq_len] list of integers containing the highest scoring tag
        indicies.
    viterbi_score: A float containing the score for the Viterbi sequence.
  """
  trellis = np.zeros_like(score)
  backpointers = np.zeros_like(score, dtype=np.int32)
  trellis[0] = score[0]

  for t in range(1, score.shape[0]):
    v = np.expand_dims(trellis[t - 1], 1) + transition_params
    trellis[t] = score[t] + np.max(v, 0)
    backpointers[t] = np.argmax(v, 0)

  viterbi = [np.argmax(trellis[-1])]
  for bp in reversed(backpointers[1:]):
    viterbi.append(bp[viterbi[-1]])
  viterbi.reverse()

  viterbi_score = np.max(trellis[-1])
  return viterbi, viterbi_score
Example #3
0
	def test_ae(self) :

		data = []
		for i in xrange(8) :
			zeros = numpy.zeros(8)
			zeros[i] = 1
			data.append(zeros)

		ls = MS.GradientDescent(lr = 0.1)
		cost = MC.MeanSquaredError()

		i = ML.Input(8, name = 'inp')
		h = ML.Hidden(3, activation = MA.ReLU(), name = "hid")
		o = ML.Regression(8, activation = MA.ReLU(), learningScenario = ls, costObject = cost, name = "out" )

		ae = i > h > o

		miniBatchSize = 2
		for e in xrange(2000) :
			for i in xrange(0, len(data), miniBatchSize) :
				ae.train(o, inp = data[i:i+miniBatchSize], targets = data[i:i+miniBatchSize] )

		res = ae.propagate(o, inp = data)["outputs"]
		for i in xrange(len(res)) :
			self.assertEqual( numpy.argmax(data[i]), numpy.argmax(res[i]))
	def predict(self, X):
		"""
		Predict class labels for X.

		Parameters
		----------
		X : {array-like, sparse matrix},
			Shape = [n_samples, n_features]
			Matrix of training samples.

		Returns
		----------
		maj_vote : array-like, shape = [n_samples]
			Predicted class labels.

		"""
		if self.vote == 'probability':
			maj_vote = np.argmax(self.predict_proba(X), axis=1)
		else:  # 'classlabel' vote

			#  Collect results from clf.predict calls
			predictions = np.asarray([clf.predict(X)
									for clf in self.classifiers_]).T

			maj_vote = np.apply_along_axis(
										lambda x:
										np.argmax(np.bincount(x, weights=self.weights)),
										axis=1,
										arr=predictions)
		maj_vote = self.lablenc_.inverse_transform(maj_vote)
		return maj_vote
        def choose_action(planner_type=1):
            """ Select action based on various action selection methods, depending on the planner_type parameter 

            Parameters
            ----------
            planner_type:
                            1 .. planner that chooses random action
                            2 .. greedy planner
                            3 .. randomized planner
                            4 .. naive reward matrix planner (decide optimally if all rewards are known at given state and explore unexplored otherwise)

            """
            if (planner_type == 1):
                return random.choice(self.actions)
            elif (planner_type == 2):
                return self.actions[np.argmax(self.q[self.state_index(self.state)])]
            elif (planner_type == 3):
                return self.actions[np.argmax(self.q[self.state_index(self.state)])] if random.random() > 0.1 else random.choice(self.actions)
            elif (planner_type == 4):
                if (np.count_nonzero(self.rewards[self.state_index(self.state)]) == len(self.actions)):   # case where all actions have been explored (assumes zero reward does not occur)
                    #print 'I have learned all rewards in this state'
                    return self.actions[np.argmax(self.rewards[self.state_index(self.state)])]
                else: # case where actions still need to be explored
                    for i in range(len(self.actions)):  # identif first unexplored action and try it
                        if (self.rewards[self.state_index(self.state),i] == 0):
                            return self.actions[i]
Example #6
0
def moments(data, circle, rotate, vheight, estimator=median, **kwargs):
    """Returns (height, amplitude, x, y, width_x, width_y, rotation angle)
    the gaussian parameters of a 2D distribution by calculating its
    moments.  Depending on the input parameters, will only output
    a subset of the above.
    """
    total = np.abs(data).sum()
    Y, X = np.indices(data.shape)  # python convention: reverse x,y np.indices
    y = np.argmax((X*np.abs(data)).sum(axis=1)/total)
    x = np.argmax((Y*np.abs(data)).sum(axis=0)/total)
    col = data[int(y), :]
    # FIRST moment, not second!
    width_x = np.sqrt(np.abs((np.arange(col.size)-y)*col).sum() / np.abs(col).sum())
    row = data[:, int(x)]
    width_y = np.sqrt(np.abs((np.arange(row.size)-x)*row).sum() / np.abs(row).sum())
    width = (width_x + width_y) / 2.
    height = estimator(data.ravel())
    amplitude = data.max()-height
    mylist = [amplitude, x, y]
    if (np.isnan(width_y) or np.isnan(width_x) or np.isnan(height) or np.isnan(amplitude)):
        raise ValueError("something is nan")
    if vheight:
        mylist = [height] + mylist
    if not circle:
        mylist = mylist + [width_x, width_y]
        if rotate:
            mylist = mylist + [0.]  # rotation "moment" is just zero...
            # also, circles don't rotate.
    else:
        mylist = mylist + [width]
    return mylist
Example #7
0
File: atari.py Project: blazer82/ai
	def get_batch(self, model, batch_size):
		len_memory = len(self.memory)
		num_actions = 6
		encouraged_actions = np.zeros(num_actions, dtype=np.int)
		predicted_actions = np.zeros(num_actions, dtype=np.int)
		inputs = np.zeros((min(len_memory, batch_size), 4, 80, 74))
		targets = np.zeros((inputs.shape[0], num_actions))
		q_list = np.zeros(inputs.shape[0])
		for i, idx in enumerate(np.random.randint(0, len_memory, size=inputs.shape[0])):
			input_t, action_t, reward_t, input_tp1 = self.memory[idx][0]
			terminal = self.memory[idx][1]

			inputs[i] = input_t

			targets[i] = model.predict(input_t.reshape(1, 4, 80, 74))[0]
			q_next = np.max(model.predict(input_tp1.reshape(1, 4, 80, 74))[0])

			q_list[i] = np.max(targets[i])
			predicted_actions[np.argmax(targets[i])] += 1

			targets[i, action_t] =  (1. - terminal) * self.discount * q_next + reward_t

			if reward_t > 0. or terminal:
				print "Action %d rewarded with %f (sample #%d)"%(action_t, targets[i, action_t], idx)

			encouraged_actions[np.argmax(targets[i])] += 1

		return inputs, targets, encouraged_actions, predicted_actions, np.average(q_list)
Example #8
0
    def get_new_query_point(self, ucb=False):
        """
        Computes a new point at which to evaluate the function, based on the
        sets M and G.

        Parameters
        ----------
        ucb: bool
            If True the safe-ucb criteria is used instead.

        Returns
        -------
        x: np.array
            The next parameters that should be evaluated.
        """
        if not np.any(self.S):
            raise EnvironmentError('There are no safe points to evaluate.')

        if ucb:
            max_id = np.argmax(self.Q[self.S, 1])
            x = self.inputs[self.S, :][max_id, :]
        else:
            # Get lower and upper bounds
            l = self.Q[:, ::2]
            u = self.Q[:, 1::2]

            MG = np.logical_or(self.M, self.G)
            value = np.max((u[MG] - l[MG]) / self.scaling, axis=1)
            x = self.inputs[MG, :][np.argmax(value), :]

        if self.num_contexts:
            return x[:-self.num_contexts]
        else:
            return x
Example #9
0
def KTCheckOverValidLoop(cc):
	#
	# Check-over-Valid Loop CONDITION
	#
	if(cc.mCVI + cc.kCB <= 2500):
		#
		# Check-over-Valid Loop BODY
		#
		
		#                  Socket,  Rq#, B,      first,         last,                 sizeIn, sizeOut, x128+s0, x128+s1, maxT, maxR, minS, maxS
		X, Y = KNFetchImgs(cc.sock, 1,   cc.kCB, 22500+cc.mCVI, 22500+cc.mCVI+cc.kCB, 256,    192,     1,       1,       0,    0,    1.0,  1.0)
		YEst   = cc.model.predict({"input":X})["output"]
		
		yDiff  = np.argmax(Y, axis=1) != np.argmax(YEst, axis=1)
		
		cc.mCVI      += cc.kCB
		cc.mCVErrCnt += long(np.sum(yDiff))
		
		sys.stdout.write("\rChecking... {:5d} valid set errors on {:5d} checked ({:7.3f}%)".format(
		                 cc.mCVErrCnt, cc.mCVI, 100.0*float(cc.mCVErrCnt)/cc.mCVI))
		sys.stdout.flush()
		
		return cc.invoke(KTCheckOverValidLoop, snap=cc.shouldCVSnap)
	else:
		#
		# Check-over-Valid Loop EPILOGUE
		#
		
		cc.log({"validErr":float(cc.mCVErrCnt)/cc.mCVI})
		sys.stdout.write("\n")
		sys.stdout.flush()
		return cc.invoke(KTEpochLoopEnd, snap=False)
Example #10
0
def allclose_with_out(x, y, atol=0.0, rtol=1.0e-5):
    # run the np.allclose on x and y
    # if it fails print some stats
    # before returning
    ac = np.allclose(x, y, rtol=rtol, atol=atol)
    if not ac:
        dd = np.abs(x - y)
        neon_logger.display('abs errors: %e [%e, %e] Abs Thresh = %e'
                            % (np.median(dd), np.min(dd), np.max(dd), atol))
        amax = np.argmax(dd)

        if np.isscalar(x):
            neon_logger.display('worst case: %e %e' % (x, y.flat[amax]))
        elif np.isscalar(y):
            neon_logger.display('worst case: %e %e' % (x.flat[amax], y))
        else:
            neon_logger.display('worst case: %e %e' % (x.flat[amax], y.flat[amax]))

        dd = np.abs(dd - atol) / np.abs(y)
        neon_logger.display('rel errors: %e [%e, %e] Rel Thresh = %e'
                            % (np.median(dd), np.min(dd), np.max(dd), rtol))
        amax = np.argmax(dd)
        if np.isscalar(x):
            neon_logger.display('worst case: %e %e' % (x, y.flat[amax]))
        elif np.isscalar(y):
            neon_logger.display('worst case: %e %e' % (x.flat[amax], y))
        else:
            neon_logger.display('worst case: %e %e' % (x.flat[amax], y.flat[amax]))
    return ac
    def predict(self, X):
        """ Predict class labels for X.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape = [n_samples, n_features]
            Training vectors, where n_samples is the number of samples and
            n_features is the number of features.

        Returns
        ----------
        maj : array-like, shape = [n_samples]
            Predicted class labels.
        """
        if self.voting == "soft":
            maj = np.argmax(self.predict_proba(X), axis=1)

        else:  # 'hard' voting
            predictions = self._predict(X)
            maj = np.apply_along_axis(
                lambda x: np.argmax(np.bincount(x, weights=self.weights)), axis=1, arr=predictions
            )

        maj = self.le_.inverse_transform(maj)

        return maj
def spike_latency(signal, threshold, fs):
    """Find the latency of the first spike over threshold

    :param signal: Spike trace recording (vector)
    :type signal: numpy array
    :param threshold: Threshold value to determine spikes
    :type threshold: float
    :returns: float -- Time of peak of first spike, or None if no values over threshold

    This is the same as the first value returned from calc_spike_times
    """
    over, = np.where(signal > threshold)
    segments, = np.where(np.diff(over) > 1)

    if len(over) > 1:
        if len(segments) == 0:
            # only signal peak
            idx = over[0] + np.argmax(signal[over[0]:over[-1]])
            latency = float(idx) / fs
        elif segments[0] == 0:
            # first point in singleton
            latency = float(over[0]) / fs
        else:
            idx = over[0] + np.argmax(signal[over[0]:over[segments[0]]])
            latency = float(idx) / fs
    elif len(over) > 0:
        latency = float(over[0]) / fs
    else:
        latency = np.nan

    return latency
Example #13
0
def test_decision_function_shape():
    # check that decision_function_shape='ovr' gives
    # correct shape and is consistent with predict

    clf = svm.SVC(kernel='linear', C=0.1,
                  decision_function_shape='ovr').fit(iris.data, iris.target)
    dec = clf.decision_function(iris.data)
    assert_equal(dec.shape, (len(iris.data), 3))
    assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))

    # with five classes:
    X, y = make_blobs(n_samples=80, centers=5, random_state=0)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)

    clf = svm.SVC(kernel='linear', C=0.1,
                  decision_function_shape='ovr').fit(X_train, y_train)
    dec = clf.decision_function(X_test)
    assert_equal(dec.shape, (len(X_test), 5))
    assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))

    # check shape of ovo_decition_function=True
    clf = svm.SVC(kernel='linear', C=0.1,
                  decision_function_shape='ovo').fit(X_train, y_train)
    dec = clf.decision_function(X_train)
    assert_equal(dec.shape, (len(X_train), 10))

    # check deprecation warning
    clf = svm.SVC(kernel='linear', C=0.1).fit(X_train, y_train)
    msg = "change the shape of the decision function"
    dec = assert_warns_message(ChangedBehaviorWarning, msg,
                               clf.decision_function, X_train)
    assert_equal(dec.shape, (len(X_train), 10))
 def predict(self,data):
     if len(self.classes)==2:
         return 1*(self.predict_proba(data)>0.5)
     elif self.multiclass=="multi":
         return np.argmax(self.predict_proba(data),axis=1)
     else:
         return np.argmax(self.predict_proba(data),axis=1)
Example #15
0
def compare_subcarrier_location(alpha, M, K, overlap, oversampling_factor):
    import matplotlib.pyplot as plt
    import matplotlib.cm as cm
    goofy_ordering = False
    taps = gfdm_filter_taps('rrc', alpha, M, K, oversampling_factor)
    A0 = gfdm_modulation_matrix(taps, M, K, oversampling_factor, group_by_subcarrier=goofy_ordering)
    n = np.arange(M * K * oversampling_factor, dtype=np.complex)
    colors = iter(cm.rainbow(np.linspace(0, 1, K)))

    for k in range(K):
        color = next(colors)
        f = np.exp(1j * 2 * np.pi * (float(k) / (K * oversampling_factor)) * n)
        F = abs(np.fft.fft(f))
        fm = np.argmax(F) / M
        plt.plot(F, '-.', label=k, color=color)

        data = get_zero_f_data(k, K, M)

        x0 = gfdm_gr_modulator(data, 'rrc', alpha, M, K, overlap, compat_mode=goofy_ordering) * (2. / K)
        f0 = 1. * np.argmax(abs(np.fft.fft(x0))) / M
        plt.plot(abs(np.fft.fft(x0)), label='FFT' + str(k), color=color)

        xA = A0.dot(get_data_matrix(data, K, group_by_subcarrier=goofy_ordering).flatten()) * (1. / K)
        fA = np.argmax(abs(np.fft.fft(xA))) / M
        plt.plot(abs(np.fft.fft(xA)), '-', label='matrix' + str(k), color=color)
        print fm, fA, f0
    plt.legend()
    plt.show()
    def accuracy(self, data, convert=False):
        """Return the number of inputs in ``data`` for which the neural
        network outputs the correct result. The neural network's
        output is assumed to be the index of whichever neuron in the
        final layer has the highest activation.  

        The flag ``convert`` should be set to False if the data set is
        validation or test data (the usual case), and to True if the
        data set is the training data. The need for this flag arises
        due to differences in the way the results ``y`` are
        represented in the different data sets.  In particular, it
        flags whether we need to convert between the different
        representations.  It may seem strange to use different
        representations for the different data sets.  Why not use the
        same representation for all three data sets?  It's done for
        efficiency reasons -- the program usually evaluates the cost
        on the training data and the accuracy on other data sets.
        These are different types of computations, and using different
        representations speeds things up.  More details on the
        representations can be found in
        mnist_loader.load_data_wrapper.

        """
        if convert:
            results = [(np.argmax(self.feedforward(x)), np.argmax(y)) 
                       for (x, y) in data]
        else:
            results = [(np.argmax(self.feedforward(x)), y)
                        for (x, y) in data]
        return sum(int(x == y) for (x, y) in results)
Example #17
0
 def ApproxCharacteristicMatrix(self, B, c):  
     if B <= 3:
         print "Parameter B should be greater than 3!"
         return None
            
     if c < 1:
         print "Parameter B should be greater than 1!"
         return None
     
     Ixy = float('-inf')
     for y in range(2, B/2 + 1):
         x = B/y
         I = ApproxMaxMI(self.D,x,y,c*x)
         IPerp = ApproxMaxMI(self.DPerp,x,y,c*x)
         maxI_index  = np.argmax(I)
         maxIPerp_index = np.argmax(IPerp)
         
         if I[maxI_index] > IPerp[maxIPerp_index]:
             tempMaxI = I[maxI_index]
             max_x = maxI_index
         else:
             tempMaxI = IPerp[maxIPerp_index] 
             max_x = maxIPerp_index 
             
         if tempMaxI > Ixy:
             Ixy = tempMaxI
             Mxy = Ixy/np.log(min(max_x,y))
                 
     return Mxy        
Example #18
0
def sample_action(rng, action_probs, optimal_action, sample_gt_prob,
                  type='sample', combine_type='one_or_other'):
  optimal_action_ = optimal_action/np.sum(optimal_action+0., 1, keepdims=True)
  action_probs_ = action_probs/np.sum(action_probs+0.001, 1, keepdims=True)
  batch_size = action_probs_.shape[0]

  action = np.zeros((batch_size), dtype=np.int32)
  action_sample_wt = np.zeros((batch_size), dtype=np.float32)
  if combine_type == 'add':
    sample_gt_prob_ = np.minimum(np.maximum(sample_gt_prob, 0.), 1.)

  for i in range(batch_size):
    if combine_type == 'one_or_other':
      sample_gt = rng.rand() < sample_gt_prob
      if sample_gt: distr_ = optimal_action_[i,:]*1.
      else: distr_ = action_probs_[i,:]*1.
    elif combine_type == 'add':
      distr_ = optimal_action_[i,:]*sample_gt_prob_ + \
          (1.-sample_gt_prob_)*action_probs_[i,:]
      distr_ = distr_ / np.sum(distr_)

    if type == 'sample':
      action[i] = np.argmax(rng.multinomial(1, distr_, size=1))
    elif type == 'argmax':
      action[i] = np.argmax(distr_)
    action_sample_wt[i] = action_probs_[i, action[i]] / distr_[action[i]]
  return action, action_sample_wt
def crearSecuencia(Dg, Et, Sm, Op, total):
    secuencia = np.zeros( (total, 3, 28,28,1) )
    resultado = np.zeros((total, 1))
    operandos = np.zeros((total, 3))
    k = 0
    for k in range(total): 
        i = randrange(0, Op.shape[0]) 
        j1 = randrange(0, 55000)
        j2 = randrange(0, 55000)
        digito1 =  np.reshape(Dg[j1,:],[28,28])
        operador= Sm[i,:,:]
        digito2 =  np.reshape(Dg[j2,:],[28,28])
        secuencia [k,0,:,:,0] = digito1
        secuencia [k,1,:,:,0] = operador
        secuencia [k,2,:,:,0] = digito2
        x1 = np.argmax(Et[j1])
        x2 = np.argmax(Et[j2])
        y =  np.argmax(Op[i])
        
        if (y == 0 ):
            resultado[k]= x1 + x2 
        if (y == 1):
            resultado[k]= x1 - x2 

        operandos[k,0] = x1
        operandos[k,1] = y
        operandos[k,2] = x2
        
        #print (x1,Et[j1],x2,Et[j2],y,Op[i],resultado[k])    
        
    return secuencia, resultado, operandos
Example #20
0
  def accuracy(self, x, t):
    y = self.predict(x)

    y = np.argmax(y, axis=1)
    t = np.argmax(t, axis=1)

    return np.sum(y==t)/float(t.shape[0])
Example #21
0
def generate_f_score_gate(
        neg_smaple,
        pos_sample,
        chan,
        beta=1,
        theta=2,
        high=True):
    """
    given a negative and a positive sample, calculate the 'optimal' threshold gate
    position from aproximate f-score calculation
    """

    neg_hist, bins = numpy.histogram(neg_smaple[:, chan], 1000, normed=True)
    pos_hist, bins = numpy.histogram(pos_sample[:, chan], bins, normed=True)

    xs = (bins[1:] + bins[:-1]) / 2.0

    x0 = numpy.argmax(neg_hist)

    dfa = diff_pseudo_f1(neg_hist[x0:], pos_hist[x0:], beta=beta, theta=theta)

    f_cutoff = xs[x0 + numpy.argmax(dfa)]

    if high:
        return ThresholdGate(f_cutoff, chan, 'g')
    else:
        return ThresholdGate(f_cutoff, chan, 'l')
Example #22
0
    def _check_image_inversion(self):
        """Check the image for proper inversion, i.e. that pixel value increases with dose.

        Notes
        -----
        Inversion is checked by the following:
        - Summing the image along both horizontal and vertical directions.
        - If the maximum point of both horizontal and vertical is in the middle 1/3, the image is assumed to be correct.
        - Otherwise, invert the image.
        """

        # sum the image along each axis
        x_sum = np.sum(self.image.array, 0)
        y_sum = np.sum(self.image.array, 1)

        # determine the point of max value for each sum profile
        xmaxind = np.argmax(x_sum)
        ymaxind = np.argmax(y_sum)

        # If that maximum point isn't near the center (central 1/3), invert image.
        center_in_central_third = ((xmaxind > len(x_sum) / 3 and xmaxind < len(x_sum) * 2 / 3) and
                               (ymaxind > len(y_sum) / 3 and ymaxind < len(y_sum) * 2 / 3))

        if not center_in_central_third:
            self.image.invert()
 def _findUSpace(self):
     """Find independent U components with respect to invariant
     rotations.
     """
     n = len(self.invariants)
     R6zall = numpy.tile(-numpy.identity(6, dtype=float), (n, 1))
     R6zall_iter = numpy.split(R6zall, n, axis=0)
     i6kl = ((0, (0, 0)), (1, (1, 1)), (2, (2, 2)),
             (3, (0, 1)), (4, (0, 2)), (5, (1, 2)))
     for op, R6z in zip(self.invariants, R6zall_iter):
         R = op.R
         for j, Ucj in enumerate(self.Ucomponents):
             Ucj2 = numpy.dot(R, numpy.dot(Ucj, R.T))
             for i, kl in i6kl:
                 R6z[i,j] += Ucj2[kl]
     Usp6 = nullSpace(R6zall)
     # normalize Usp6 by its maximum component
     mxcols = numpy.argmax(numpy.fabs(Usp6), axis=1)
     mxrows = numpy.arange(len(mxcols))
     Usp6 /= Usp6[mxrows,mxcols].reshape(-1, 1)
     Usp6 = numpy.around(Usp6, 2)
     # normalize again after rounding to get correct signs
     mxcols = numpy.argmax(numpy.fabs(Usp6), axis=1)
     Usp6 /= Usp6[mxrows,mxcols].reshape(-1, 1)
     self.Uspace = numpy.tensordot(Usp6, self.Ucomponents, axes=(1, 0))
     self.Uisotropy = (len(self.Uspace) == 1)
     return
Example #24
0
    def action_callback(self, state):
        '''
        Implement this function to learn things and take actions.
        Return 0 if you don't want to jump and 1 if you do.
        '''

        # You might do some learning here based on the current state and the last state.

        # You'll need to select and action and return it.
        # Return 0 to swing and 1 to jump.

        new_state  = state
        self.flag += 1
        if self.last_state == None:
            self.flag = 1
            return 0
        if self.flag == 2:
            self.gravity = state['monkey']['vel']
        #if self.epsilon < random.random():
        index = self.find_index(state)
        old_action = self.Q[self.find_index(self.last_state)][self.last_action]
        #print old_action
        self.Q[index] = old_action + self.alpha * (self.last_reward + self.gamma * np.argmax(self.Q[index]) - old_action)
        self.last_action = np.argmax(self.Q[index])
        # else:
        #     self.last_action = random.randrange(0, 2)
        self.last_state  = new_state
        #print [self.Q[index][0], self.Q[index][1]]
        self.time += 0.1
        self.epsilon = 1 / self.time
        return self.last_action
Example #25
0
 def forwardProp(self,node,correct, guess):
     cost = total = 0.0
     if node.isLeaf == True:
         node.fprop = True
         node.hActs1 = self.L[:, node.word]
         node.probs = softmax(self.Ws.dot(node.hActs1)+self.bs)
         p = node.probs*make_onehot(node.label, len(self.bs))
         cost = -np.log(np.sum(p))
         correct.append(node.label)
         guess.append(np.argmax(node.probs))
         return cost, 1
         
     c1,t1 = self.forwardProp(node.left,correct,guess)
     c2,t2 = self.forwardProp(node.right,correct,guess)
     if node.left.fprop and node.right.fprop:
         node.fprop = True
         h = np.hstack([node.left.hActs1, node.right.hActs1])
         tmp = np.zeros(len(node.left.hActs1))
         for i in range(len(tmp)):
             tmp[i] = h.dot(self.V[i]).dot(h)
         node.hActs1 = self.ReLU(self.W.dot(h) + self.b + tmp)
         node.probs = softmax(self.Ws.dot(node.hActs1)+self.bs)
         p = node.probs*make_onehot(node.label,len(self.bs))
         cost = -np.log(np.sum(p))
         correct.append(node.label)
         guess.append(np.argmax(node.probs))
         
     cost += c1
     cost += c2
     total += t1
     total += t2
     return cost, total + 1
Example #26
0
 def get_next_list_tensor(self, inc= None):
     '''Returns the next batch in a list, where each element of the list
     corresponds to a single sequence. 
     '''
     batch= list()
     # define the increment of the cursor between calls
     if inc is None:
         inc= self.window_size
         
     for b in range(self.batch_size): # each element of the batch has a cursor
         # confirm that the current window stays in the page
         while (self.cursor[b]+self.window_size)>(self.cumlength[self.cursor_page[b]]):
             #self.cursor[b] = (self.cursor[b] + self.window_size )%self.length
             self.cursor[b] = (self.cursor[b] + inc )%self.length
             self.cursor_page[b] = np.argmax(self.cursor[b]<self.cumlength)
         
         # get window for current cursor
         start_idx= self.cursor[b] - (self.cumlength[self.cursor_page[b]] - self.page_len[self.cursor_page[b]])
         batch.append( self.data[ self.cursor_page[b] ][start_idx:(start_idx+self.window_size)] )
         
         # update cursor
         #self.cursor[b] = (self.cursor[b] + self.window_size)%self.length
         self.cursor[b] = (self.cursor[b] + inc)%self.length
         self.cursor_page[b] = np.argmax(self.cursor[b]<self.cumlength)
         
     return batch
Example #27
0
    def predict(self, X):
        """Predict class for X.

        The predicted class of an input sample is computed as the majority
        prediction of the trees in the forest.

        Parameters
        ----------
        X : array-like of shape = [n_samples, n_features]
            The input samples.

        Returns
        -------
        y : array of shape = [n_samples] or [n_samples, n_outputs]
            The predicted classes.
        """
        n_samples = len(X)
        proba = self.predict_proba(X)

        if self.n_outputs_ == 1:
            return self.classes_.take(np.argmax(proba, axis=1), axis=0)

        else:
            predictions = np.zeros((n_samples, self.n_outputs_))

            for k in xrange(self.n_outputs_):
                predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
                                                                    axis=1),
                                                          axis=0)

            return predictions
Example #28
0
    def _best_path(self, unlabeled_sequence):
        T = len(unlabeled_sequence)
        N = len(self._states)
        self._create_cache()
        self._update_cache(unlabeled_sequence)
        P, O, X, S = self._cache

        V = np.zeros((T, N), np.float32)
        B = -np.ones((T, N), np.int)

        V[0] = P + O[:, S[unlabeled_sequence[0]]]
        for t in range(1, T):
            for j in range(N):
                vs = V[t-1, :] + X[:, j]
                best = np.argmax(vs)
                V[t, j] = vs[best] + O[j, S[unlabeled_sequence[t]]]
                B[t, j] = best

        current = np.argmax(V[T-1,:])
        sequence = [current]
        for t in range(T-1, 0, -1):
            last = B[t, current]
            sequence.append(last)
            current = last

        sequence.reverse()
        return list(map(self._states.__getitem__, sequence))
Example #29
0
 def minimize_energy(S_init=None, R_init=None):
     en = 0.
     curen = -np.inf
     i=0
     en_tot = -np.inf
     curS = S_init
     curR = R_init
         
     while (np.abs(en - curen) > eps):    
         i+=1
         if curR is not None:
             # optimize topics given regions
             ans = np.zeros((segment_num, topics_num))        
             for v, l in struct2.iteritems():
                 ans[v,:] -= binary_prescale * w2[:,curR[l]].sum(axis=1).flatten()
                     
             curS = np.argmax(ans, axis=1).flatten()        
             
         # optimize regions given topics
         # unary energy
         ans = alpha *alpha_prescale *  np.log(pR)
         # binary energy
         for v, l in struct1.iteritems():
             ans[v,:] -= binary_prescale * w2[curS[l],:].sum(axis=0).flatten()
         curR = np.argmax(ans, axis=1).flatten()
         
         curen, en = np.sum(np.max(ans, axis=1)), curen        
         en_un = alpha_prescale * alpha * np.log(pR)
         en_un = en_un[range(len(curR)),curR].sum()
     
         if curen > en_tot:
             S = curS
             R = curR
             en_tot = curen    
     return R,S, en_tot
    def test_dev(self, X_dev, y_dev):
        if len(y_dev[0]) > 1:
            pc_sentiment = np.zeros(len(X_dev))
            for i in np.arange(len(X_dev)):
                pc_sentiment[i] = np.argmax(self.predict(np.asarray(X_dev[i], dtype=np.float32)
                                                         # ,np.ones((len(X_dev[i]),self.input_dim),dtype=np.float32)
                                                         ))

            correct = 0.0
            for i in np.arange(len(X_dev)):
                if pc_sentiment[i] == np.argmax(y_dev[i]):
                    correct += 1
        else:
            correct = 0.0
            pc_sentiment = np.zeros(len(X_dev))
            for i in np.arange(len(X_dev)):
                pred = self.predict(np.asarray(X_dev[i], dtype=np.float32))[0]
                # print(str(pred)+" "+str(y_dev[i][0]))

                pc_sentiment[i] = (np.floor(pred * 3.0) + 1.00) / 3.00
                # ,np.ones((len(X_dev[i]),self.input_dim),dtype=np.float32)

            for i in np.arange(len(X_dev)):
                if pc_sentiment[i] == y_dev[i][0]:
                    correct += 1

        accuracy = correct / len(X_dev)

        print(accuracy)
Example #31
0
    k = 0
    for input, target in tqdm.tqdm(loaders["test"]):
        input = input.cuda(non_blocking=True)
        ##TODO: is this needed?
        # if args.method == 'Dropout':
        #    model.apply(train_dropout)
        torch.manual_seed(i)

        if args.method == "KFACLaplace":
            output = model.net(input)
        else:
            output = model(input)

        with torch.no_grad():
            predictions[k:k + input.size()[0]] += (F.softmax(
                output, dim=1).cpu().numpy())
        targets[k:(k + target.size(0))] = target.numpy()
        k += input.size()[0]

    print("Accuracy:", np.mean(np.argmax(predictions, axis=1) == targets))
    #nll is sum over entire dataset
    print("NLL:", nll(predictions / (i + 1), targets))
predictions /= args.N

entropies = -np.sum(np.log(predictions + eps) * predictions, axis=1)
np.savez(args.save_path,
         entropies=entropies,
         predictions=predictions,
         targets=targets)
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.7))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.7))
    model.add(Dense(1000, activation='softmax'))

    if weights_path:
        model.load_weights(weights_path)
    return model


if __name__ == "__main__":
    im = cv2.resize(cv2.imread('pizza.jpg'), (224, 224)).astype(np.float32)
    im[:, :, 0] -= 103.939
    im[:, :, 1] -= 116.779
    im[:, :, 2] -= 123.68

    im = im.transpose((1, 0, 2))
    im = np.expand_dims(im, axis=0)

    # Test pretrained model

    model = VGG_16('vgg16_weights_tf_dim_ordering_tf_kernels.h5')
    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss='categorical_crossentropy')
    out = model.predict(im)
    print(np.argmax(out))
Example #33
0
[nt, nj, ni] = Vnemo.shape

if nt != 12:
    print 'ERROR (prepare_movies.py): we expect 12 montly records in NEMO grid_T file!'
    sys.exit(0)

if cvar == 'sss' or cvar == 'sst':
    if nj != nj0 or ni != ni0:
        print 'ERROR (prepare_movies.py): NEMO file and clim do no agree in shape!'
        print '       clim => ' + str(ni0) + ', ' + str(nj0) + ', ' + str(
            nk0), ' (' + vdic['F_T_CLIM_3D_12'] + ')'
        print '       NEMO => ' + str(ni) + ', ' + str(nj) + ', ' + str(nk)
        sys.exit(0)
    # Creating 1D long. and lat.:
    ji_lat0 = nmp.argmax(xlat[nj - 1])
    #lolo
    vlon = nmp.zeros(ni)
    vlon[:] = xlon[20, :]
    vlat = nmp.zeros(nj)
    vlat[:] = xlat[:, ji_lat0]

if cvar == 'ice':
    # Extraoplating sea values on continents:
    bt.drown(Vnemo[:, :, :], imask, k_ew=2, nb_max_inc=10, nb_smooth=10)

for jt in range(nt):

    cm = "%02d" % (jt + 1)
    cdate = cy + cm
Example #34
0
def decision_max(predictions):
    return np.argmax(predictions, axis=1)
        loss = cross_entropy(pred, y)

    gradients = g.gradient(loss, [W, b])
    optimizer.apply_gradients(zip(gradients, [W, b]))


for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1):
    run_optimization(batch_x, batch_y)

    if step % display_step == 0:
        pred = logistic_regression(batch_x)
        loss = cross_entropy(pred, batch_y)
        acc = accuracy(pred, batch_y)
        print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc))

pred = logistic_regression(x_test)
print("Test Accuracy: %f" % accuracy(pred, y_test))

import matplotlib.pyplot as plt

# Predict 5 images from validation set.
n_images = 5
test_images = x_test[:n_images]
predictions = logistic_regression(test_images)

# Display image and model prediction.
for i in range(n_images):
    plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray')
    plt.show()
    print("Model prediction: %i" % np.argmax(predictions.numpy()[i]))
def mc_control_importance_sampling(env,
                                   num_episodes,
                                   behavior_policy,
                                   discount_factor=1.0):
    """
    Monte Carlo Control Off-Policy Control using Weighted Importance Sampling.
    Finds an optimal greedy policy.
    
    Args:
        env: OpenAI gym environment.
        num_episodes: Nubmer of episodes to sample.
        behavior_policy: The behavior to follow while generating episodes.
            A function that given an observation returns a vector of probabilities for each action.
        discount_factor: Lambda discount factor.
    
    Returns:
        A tuple (Q, policy).
        Q is a dictionary mapping state -> action values.
        policy is a function that takes an observation as an argument and returns
        action probabilities. This is the optimal greedy policy.
    """

    # The final action-value function.
    # A dictionary that maps state -> action values
    Q = defaultdict(lambda: np.zeros(env.action_space.n))
    # The cumulative denominator of the weighted importance sampling formula
    # (across all episodes)
    C = defaultdict(lambda: np.zeros(env.action_space.n))

    Advantage_Function = defaultdict(lambda: np.zeros(env.action_space.n))

    # Our greedily policy we want to learn
    target_policy = create_greedy_policy(Advantage_Function)

    for i_episode in range(1, num_episodes + 1):

        # Generate an episode.
        # An episode is an array of (state, action, reward) tuples
        episode = []
        state = env.reset()
        for t in range(100):
            # Sample an action from our policy
            probs = behavior_policy(state)
            action = np.random.choice(np.arange(len(probs)), p=probs)
            next_state, reward, done, _ = env.step(action)
            episode.append((state, action, reward))
            if done:
                break
            state = next_state

        # Sum of discounted returns
        G = 0.0

        # The importance sampling ratio (the weights of the returns)
        W = 1.0
        # For each step in the episode, backwards
        for t in range(len(episode))[::-1]:
            Value_Baseline = 0.0

            state, action, reward = episode[t]
            # Update the total reward since step t
            G = discount_factor * G + reward

            # Update weighted importance sampling formula denominator
            C[state][action] += W
            # Update the action-value function using the incremental update formula (5.7)
            # This also improves our target policy which holds a reference to Q

            #computing advantage function

            for state, action_values in Q.items():
                action_value = np.max(action_values)
                Value_Baseline = Value_Baseline + action_value

            Advantage_Function[state][action] = Q[state][action]

            Advantage_Function[state][action] += (W / C[state][action]) * (
                G - Advantage_Function[state][action])

            # If the action taken by the behavior policy is not the action
            # taken by the target policy the probability will be 0 and we can break
            if action != np.argmax(target_policy(state)):
                break
            W = W * 1. / behavior_policy(state)[action]

    return Q, target_policy, Advantage_Function
 def policy_fn(state):
     A = np.zeros_like(Advantage_Function[state], dtype=float)
     best_action = np.argmax(Advantage_Function[state])
     A[best_action] = 1.0
     return A
Example #38
0
def get_first_high(data, threshold):
    if np.any(data>threshold):
        return np.argmax(data>threshold)
    else:
        return -1
Example #39
0
def GA_binary(citys):
	n = len(citys)
	population = 50
	gen_t = 100*n
	p = 0.01
	# citys = [City() for i in range(n)]
	peoples = []

	_list = [i for i in range(n)]

	min_dis = 99999999
	answer = ''
	for _ in range(population):
		code = np.array([[0]*n]*n)
		random.shuffle(_list)
		for i in range(len(code)):
			code[i][_list[i]] = 1  # initial
		peoples.append(code)

	for _ in range(gen_t):
		p_list = []
		min_list = []
		peoples = sorted(peoples, key=lambda x: all_dis_binary(x, citys))

		for i in peoples[0]:
			min_list.append(np.argmax(i))
		new_dis = all_dis(min_list, citys)
		if min_dis > new_dis:
			min_dis = new_dis
			answer = min_list.__str__()
		if _ % 100 == 0:
			print('distance:{}, GA answer:{}'.format(min_dis, answer))

		_sum = 0

		for i in peoples:
			_dis = all_dis_binary(i, citys)
			p_list.append(_dis)
			_sum += _dis
		for i in range(len(p_list)):
			p_list[i] /= _sum

		index_list = np.random.choice([i for i in range(population)], size=2, p=p_list)
		father = peoples[index_list[0]]
		mom = peoples[index_list[1]]
		son = []
		mating_index = random.randint(0, n-1)
		for i in father[:mating_index]:
			son.append(i)
		for i in range(0, len(mom)):
			flag = True
			for j in son:
				_code = np.argmax(j)
				if np.argmax(mom[i]) == _code:
					flag = False
					break
			if flag:
				son.append(mom[i])

		i_idx = -1  # genic mutation
		j_idx = -1
		if random.random() < p:
			while i_idx == j_idx:
				i_idx = random.randint(0, n-1)
				j_idx = random.randint(0, n-1)
			son[i_idx], son[j_idx] = son[j_idx], son[i_idx]  # swap

		son = np.array(son)

		peoples.pop(-1)
		peoples.append(son)

	return min_dis
Example #40
0
                          **pars)

    # Set up likelihood
    lik = Likelihood(p.get('params'),
                     d.data_vector,
                     d.covar,
                     th,
                     debug=p.get('mcmc')['debug'])

    # Set up sampler
    sam = Sampler(lik.lnprob, lik.p0, lik.p_free_names,
                  p.get_sampler_prefix(v['name']), p.get('mcmc'))

    # Read chains and best-fit
    sam.get_chain()
    sam.update_p0(sam.chain[np.argmax(sam.probs)])

    # Compute galaxy bias
    zarr = np.linspace(zmean - sigz, zmean + sigz, 10)
    bgchain = np.array([
        hm_bias(cosmo, 1. / (1 + zarr), d.tracers[0][0].profile,
                **(lik.build_kwargs(p0))) for p0 in sam.chain[::100]
    ])
    bychain = np.array([
        hm_bias(cosmo, 1. / (1 + zarr), d.tracers[1][1].profile,
                **(lik.build_kwargs(p0))) for p0 in sam.chain[::100]
    ])

    bgmin, bg, bgmax = np.percentile(bgchain, [16, 50, 84])
    bymin, by, bymax = np.percentile(bychain, [16, 50, 84])
    def learning(self, batch_size):
        '''
            Learning process
        '''
        explore_rate = self.get_explore_rate(0)
        step_index =0
        loss_list = list()
        reward_list = list()
        for epi in range(self.episode):
            state_old = self.environment.reset()
            temp_loss_list = list()
            total_reward = 0
            for t in range(self.max_t):
                #self.environment.render()
                if random.random() < explore_rate:
                    action = self.environment.action_space.sample()
                else:
                    temp = self.mlp.predict(state[np.newaxis, :])
                    action = np.argmax(temp[0])
                    
                    
                
                state, reward, done, _ = self.environment.step(action)
                total_reward += reward

                if self.reward_func != None:
                    reward = self.reward_func(self.environment, state, state_old, done)
                


                self.memory.append((state_old, action, reward, state, done))
                state_old = state
                if len(self.memory) > batch_size:
                    if step_index % self.C_replace == 0 or epi == self.episode - 1:
                        self.mlp.update_paramters()
                    
                    step_index = step_index + 1

                    sample_data = random.sample(self.memory, batch_size)
                    totrain = list()
                    next_states = [data[3] for data in sample_data]
                    pred_reward = self.mlp.get_target(next_states)

                    for b_index in range(batch_size):
                        temp_state, temp_action, temp_reward, temp_next_state, temp_done = sample_data[b_index]
                        predict_action = max(pred_reward[b_index])
                        #print(predict_action)
                        

                        if temp_done:
                            yj = temp_reward
                        else:
                            yj = temp_reward + self.discount_factor * predict_action

                        totrain.append([temp_state, temp_action, yj])
                    
                    ##update
                    states = [k[0] for k in totrain]
                    actions = [k[1] for k in totrain]
                    rewards = [k[2] for k in totrain]

                    loss = self.mlp.update(states, actions, rewards)
                    temp_loss_list.append(loss)

                    if len(self.memory) > self.max_memory:
                        self.memory = self.memory[1:]
                
                if done or t >= self.max_t - 1:
                    reward_list.append(total_reward)

                    if len(temp_loss_list) > 0:
                        loss_list.append(sum(temp_loss_list)/len(temp_loss_list))

                    print("Episode %d finished %d" % (epi, t))
                    break

            explore_rate = self.get_explore_rate(epi)
        
        if  self.model_name != None:  
            self.mlp.saveModel(self.model_name)
        
        #plot the figure
        fig = plt.figure()
        plt.plot(range(len(loss_list)), loss_list)
        plt.xlabel("Episode")
        plt.ylabel("Average Loss")
        plt.savefig("loss_Idqn.png")
        plt.close('all')

        fig = plt.figure()
        plt.plot(range(self.episode), reward_list)
        plt.xlabel("Episode")
        plt.ylabel("Total reward")
        plt.savefig("reward_Idqn.png")
        plt.close('all')
    ])

    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(train_images, train_labels, epochs=5)

    test_loss, test_acc = model.evaluate(test_images, test_labels)

    print('Test accuracy:', test_acc)

    predictions = model.predict(test_images)

    print(predictions[0])
    print(np.argmax(predictions[0]))

    # Plot the first X test images, their predicted label, and the true label
    # Color correct predictions in blue, incorrect predictions in red
    num_rows = 5
    num_cols = 3
    num_images = num_rows * num_cols
    plt.figure(figsize=(2 * 2 * num_cols, 2 * num_rows))

    for i in range(num_images):
        plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)
        plot_image(i, predictions, test_labels, test_images)
        plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)
        plot_value_array(i, predictions, test_labels)
        # plt.show()
Example #43
0
            step += 1
            
        print ("Optimization Finished!!!")
        i_test = str(i)
        np.savetxt("./Prob_MRF/"+add_data+"dual_outcome/"+"loss_acc/"+i_test+"rec_loss.txt", rec_loss, '%.6f')
        np.savetxt("./Prob_MRF/"+add_data+"dual_outcome/"+"loss_acc/"+i_test+"rec_train_acc.txt", rec_train_acc, '%.6f')
        np.savetxt("./Prob_MRF/"+add_data+"dual_outcome/"+"loss_acc/"+i_test+"rec_test_acc.txt", rec_test_acc, '%.6f')
        

        probability = sess.run(pred, feed_dict={x: Prob_MRF_val, pixel_coordinate:Prob_MRF_val_coordinate, \
                                                y: Prob_MRF_val_labels, keep_prob: 1.})
        probability_softmax = sess.run(pred1, feed_dict={x: Prob_MRF_val, pixel_coordinate:Prob_MRF_val_coordinate, \
                                                y: Prob_MRF_val_labels, keep_prob: 1.})
        np.savetxt("./Prob_MRF/"+add_data+"dual_outcome/"+"probability/"+i_test+"probability.txt", probability, '%.3f')
        np.savetxt("./Prob_MRF/"+add_data+"dual_outcome/"+"probability/"+i_test+"probability_softmax.txt", probability_softmax, '%.3f')
        labels_pred = np.argmax(probability, 1) 
        np.savetxt("./Prob_MRF/"+add_data+"dual_outcome/"+"probability/"+i_test+"labels_pred.txt", labels_pred, '%d')

        start_time=time.time()
        accuracy_np = sess.run(accuracy, feed_dict={x: Prob_MRF_val, pixel_coordinate:Prob_MRF_val_coordinate, \
                                                y: Prob_MRF_val_labels, keep_prob: 1.})
        duration = time.time()-start_time 
        duration = duration
        print ("Testing Accuarcy:"+"{:.6f}".format(accuracy_np)+ ", Test_time=" + "{:.6f}".format(duration))

        Testing_Accuarcy[0,0] = accuracy_test.astype(np.float32)
        Testing_Accuarcy[0,1] = duration
        np.savetxt("./Prob_MRF/"+add_data+"dual_outcome/"+"Testing_Accuarcy/"+i_test+"Testing_Accuarcy.txt", Testing_Accuarcy, '%.6f')
    

Example #44
0
def train(epochs, seed):
    # parameter
    batch_size = 64
    num_class = 55
    save_path = base_name + "_seed" + str(seed)
    model_path = "_"

    # Load data
    X_train, y_train, X_test = load_data()

    # CV
    ids_train_split, ids_valid_split = train_test_split(np.arange(X_train.shape[0]),
                                                        random_state=42, test_size=0.05,
                                                        stratify=y_train)


    # data process
    X_train_cv = X_train[ids_train_split]
    y_train_cv = y_train[ids_train_split]
    X_holdout = X_train[ids_valid_split]
    Y_holdout = y_train[ids_valid_split]
    # print(X_train_cv.head())


    # define file path and get callbacks
    weight_path = "model/" + save_path + '.hdf5'
    callbacks = get_callbacks(weight_path, patience=16)
    gen_val = train_generator(X_holdout, Y_holdout, "input/train", batch_size, shuffle=False)
    model = get_model(num_class)

    model.load_weights(filepath="model/train_180201_2_Dense_4th_training_ts1653.hdf5")


    # Getting Training Score
    # score = model.evaluate_generator(generator=gen_trn_eval,
    #                                  steps=np.ceil(X_train.shape[0]/batch_size))
    # print('Train loss:', score[0])
    # print('Train accuracy:', score[1])

    # Getting Valid Score
    score = model.evaluate_generator(generator=gen_val,
                                     steps=np.ceil(X_holdout.shape[0]/batch_size))
    print('Valid loss:', score[0])
    print('Valid accuracy:', score[1])

    # Getting Test prediction
    gen_tst_pred = test_generator(X_test, "input/test", batch_size, shuffle=False,
                                   flip=False, flop=False)
    pred_test1 = model.predict_generator(generator=gen_tst_pred,
                                        steps=np.ceil(X_test.shape[0]/batch_size))
    # gen_tst_pred2 = test_generator(X_test, "input/test", batch_size, shuffle=False,
    #                                flip=True, flop=False)
    # pred_test2 = model.predict_generator(generator=gen_tst_pred2,
    #                                     steps=np.ceil(X_test.shape[0]/batch_size))
    # gen_tst_pred3 = test_generator(X_test, "input/test", batch_size, shuffle=False,
    #                                flip=True, flop=True)
    # pred_test3 = model.predict_generator(generator=gen_tst_pred3,
    #                                     steps=np.ceil(X_test.shape[0]/batch_size))
    # gen_tst_pred4 = test_generator(X_test, "input/test", batch_size, shuffle=False,
    #                                flip=False, flop=False)
    # pred_test4 = model.predict_generator(generator=gen_tst_pred4,
    #                                     steps=np.ceil(X_test.shape[0]/batch_size))
    # pred_test = (pred_test1 + pred_test2 + pred_test3 + pred_test4) / 4
    submission = pd.DataFrame({'id': X_test, 'predict': np.argmax(pred_test1, axis=1)})
    submit_path = "output/submission" + save_path + "testaugx1_val_loss" + str(score[0]) + "_val_acc" + str(score[1]) + ".tsv"
    submission.to_csv(submit_path, index=False, header=False, sep='\t')
Example #45
0
def test_model(path):
    print(path)
    sgan = SGAN()
    sgan.load_weights()

    m = mmappickle.mmapdict(path, readonly=True)
    all_preds = None
    all_tests = None
    print(m.keys())
    for key in list(m.keys())[:10]:
        print(key)
        d = m[key]
        crop = d['crop']
        cell = d['cell']
        print(d.keys())

        windows = sliding_windows((400, 400), (34, 34), 6)[:1]
        patches = [crop[w[0]:w[2], w[1]:w[3]] for w in windows]
        cell_patches = [cell[w[0]:w[2], w[1]:w[3]] for w in windows]
        y_test = np.array([is_nuclei(n) for n in cell_patches])
        try:
            y_proba = sgan.predict_proba(prepare_patches(patches))
            print(y_proba)
            y_proba = y_proba[1][:, :-1]
            print(y_proba)
            y_pred = np.argmax(y_proba, axis=1)
            print(y_pred)
        except Exception as e:
            print("Erro")
            continue
        else:
            pass
        return
        #     print(np.argwhere(y_pred == 1))
        #     return
        nuclei_picks = nms(windows, y_proba[:, 1], 0.1, 0.3)
        print(nuclei_picks)
        #     print("Nuclei picks")
        non_nuclei_picks = nms(windows, y_proba[:, 0], 0.1, 0.3)
        #     print(nuclei_picks)
        #     print("Non nuclei picks")
        #     print(non_nuclei_picks)
        picks = np.concatenate((nuclei_picks, non_nuclei_picks))
        print(picks)
        #     print(picks)
        y_test = y_test[picks]
        y_pred = y_pred[picks]

        if all_preds is None:
            all_preds = y_pred
            all_tests = y_test
        else:
            all_preds = np.concatenate((all_preds, y_pred))
            all_tests = np.concatenate((all_tests, y_test))

    print('\nOverall accuracy: %f%% \n' %
          (accuracy_score(all_preds, all_tests) * 100))
    print('\nAveP: %f%% \n' %
          (average_precision_score(all_preds, all_tests) * 100))

    # Calculating and ploting a Classification Report
    class_names = ['Non-nunclei', 'Nuclei']
    print('Classification report:\n %s\n' % (classification_report(
        all_preds, all_tests, target_names=class_names)))
Example #46
0
def faces(path):

    # path of the folder where all images are stored
    Face_path = path + "Faces"

    # path of the pickle file
    path = os.path.dirname(os.path.abspath("__file__")) + '/'
    embeddingsPath = path + 'Encoding_Data/'
    #    faceDbPath = path + 'FacesDB/'

    # threshold value
    thresh = 0.1
    cnt = 0

    # declaring an array to store names of all detected faces
    person_array = []

    # loading the pickle file
    data = [d for d in os.listdir(embeddingsPath) if '.DS_Store' not in d][0]
    file = 'FaceEncodingsModel.pkl'
    with open(embeddingsPath + file, 'rb') as f:
        data = pickle.load(f)

    # assigning model and the classes. (no. of classes = no. of trained faces)
    model = data
    person = model.classes_

    # fetching images one by one from different folders
    for folder in os.listdir(Face_path):
        for image in os.listdir(Face_path + '/' + folder):

            #            print(Face_path + '/' + folder + '/' + image)

            try:
                # loading the image
                frame = face_recognition.load_image_file(Face_path + '/' +
                                                         folder + '/' + image)

                # determining the face encodings of the image
                faceEncodings = face_recognition.face_encodings(frame)[0]

#                print(faceEncodings.shape)

            except Exception as e:
                print('error')
                print(e)
                continue

            # calculating the probability of match with every other face and finding max probability
            prob = model.predict_proba(np.array(faceEncodings).reshape(1,
                                                                       -1))[0]
            # finidng index with max probability
            index = np.argmax(prob)

            # comparing the probability with the threshold
            if np.max(prob) > float(thresh):
                #                text = str(person[index] + str(np.round_(np.max(prob),2)))

                remove_digits = str.maketrans('', '', digits)
                name = person[index].translate(remove_digits)

                # if name is not in array, then adding it in the array
                if name not in person_array:
                    person_array.append(name)

                # Saving the recognized faces in a folder - RecognizedFaces
                path = os.path.dirname(os.path.abspath(
                    "__file__")) + '/' + 'RecognizedFaces' + '/' + str(
                        person[index]) + '.jpg'
                cv2.imwrite(path, frame)
            else:
                #               # saving the unkown faces in a folder - Unkown
                if not os.path.exists(path + 'Unknown'):
                    os.mkdir(path + 'Unknown')

                path_for_Unknown = path + 'Unknown' + '/' + str(cnt) + '.jpg'
                cv2.imwrite(path_for_Unknown, frame)
                cnt += 1

    print(person_array)
    return person_array
Example #47
0
for idx in xrange(num_samples):
	train_set[idx][0][:][:] = train_img[idx, :, :]

for idx in xrange(test_samples):
        test_set[idx][0][:][:] = test_img[idx, :, :]

train_lab = np_utils.to_categorical(train_lab, 3)

print "Build Model"

model = Sequential()
model.add(Conv2D(32, (3, 3),input_shape = (1, 32, 32)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(3))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

model.fit(train_set, train_lab, validation_data=(train_set, train_lab), batch_size=20, epochs = 10, verbose=1)

out2 = model.predict(test_set)
classes = np.argmax(out2, axis=1)
print classes

df = pd.DataFrame(classes)
df.to_csv('example.csv')
Example #48
0
			epoch_haplo[:, :, epoch] = haplo_matrix
			epoch_MEC.append(MEC(SNVmatrix, haplo_matrix))

	# calibration
	try:
		GAE_hap = epoch_haplo[:, :, np.argmin(np.array(epoch_MEC))]
		index = []
		for i in range(SNVmatrix.shape[0]):
			dis = np.zeros((GAE_hap.shape[0]))
			for j in range(GAE_hap.shape[0]):
				dis[j] = hamming_distance(SNVmatrix[i, :], GAE_hap[j, :])
			index.append(np.argmin(dis))

		new_haplo = np.zeros((GAE_hap.shape))
		for i in range(GAE_hap.shape[0]):
			new_haplo[i, :] = np.argmax(ACGT_count(SNVmatrix[np.array(index) == i, :]), axis = 1) + 1

		MEC_experiment.append(MEC(SNVmatrix, new_haplo))
		haplo_experiment[:, :, iteration] = new_haplo
	except:
		MEC_experiment.append(MEC(SNVmatrix, epoch_haplo[:, :, np.argmin(np.array(epoch_MEC))]))
		haplo_experiment[:, :, iteration] = epoch_haplo[:, :, np.argmin(np.array(epoch_MEC))]

	# # record MEC results
	# with open(zone_name + '_MEC_result_{}_Strains.txt'.format(ploidy), 'a') as f:
	# 	f.write('Experiment' + str(iteration + 1) + ' : ' + str(MEC_experiment[-1])  + '\n')

	end_time = time.time()
	time_record.append(end_time - start_time)
	print('Checking Rank: {} - '.format(ploidy) + 'Experiment: {}/{} - ETA: {}s '.format(iteration + 1, num_exp, int(np.mean(time_record) * (num_exp - (iteration + 1)))), end = '\r')
Example #49
0
    f1 = f(t[k], V[:, k])
    V[:, k + 1] = V[:, k] + dt * f(t[k] + dt / 2, V[:, k] + (dt / 2) * f1)
    
A1 = V.copy()[0].reshape(1, n)

# Plot voltage

axs[0].plot(t, V[0])
axs[0].set_title("RK2 Approximation")
axs[0].set_xlabel("t")
axs[0].set_ylabel("V")
axs[0].grid()

# b)

A2 = t[np.argmax(A1[0][0:21])]

# c)

A3 = t[np.argmax(A1[0][:101])]

# d) 

# firing rate

A4 = 1/(A3-A2)

# e)

# RK4
Example #50
0
nbImages1Row = 10
nbImages1Col = 10
img100 = np.zeros((imgH * nbImages1Col, imgW * nbImages1Row))

for i in range(10):
    for j in range(10):
        tmp = np.reshape(img[(rnd.randint(0, 5000)), :], (20, 20))
        tmp = np.transpose(tmp)
        img100[i * imgH:i * imgH + 20, j * imgW:j * imgW + 20] = tmp

cv2.imwrite("img100.tif", img100)

# Insert column of 1's for the input data for prediction / LR calculation
imgRev = np.insert(img, 0, 1, axis=1)

thetaTp = np.transpose(theta)
resY = np.matmul(imgRev, thetaTp)
prediction = np.argmax(
    resY, axis=1
) + 1  # +1 to correct indexing due to speciality of the example. See docs ex3.pdf

nb_correct = 0
#print(label,label.shape,prediction,prediction.shape,nb_correct)
for i in range(len(prediction)):
    if (label[i] == prediction[i]):
        nb_correct += 1
tAcc = (100.0 * nb_correct) / (len(prediction))
print("Predicted examples: {:d}".format(len(prediction)))
print("Expected Training Accuracy: 94.90% Measured: {:0.2f}% approx".format(
    tAcc))
import numpy as np
from scipy import misc


# Load the Model 
model = keras.models.load_model('1_Backdooring/model_mod.h5')

# Sanity Check all 10 digits, if the model can still understand these
for i in range(10):
    image = misc.imread('1_Backdooring/testimages/' + str(i) + '.png')
    processedImage = np.zeros([1, 28, 28, 1])
    for yy in range(28):
        for xx in range(28):
            processedImage[0][xx][yy][0] = float(image[xx][yy]) / 255
                
    shownDigit = np.argmax(model.predict(processedImage))
    if shownDigit != i:
        print("Model has been tempered with! Exiting!")
        exit()
        

# Load the Image File
image = misc.imread('1_Backdooring/backdoor.png')
processedImage = np.zeros([1, 28, 28, 1])
for yy in range(28):
    for xx in range(28):
        processedImage[0][xx][yy][0] = float(image[xx][yy]) / 255

# Run the Model and check what Digit was shown
shownDigit = np.argmax(model.predict(processedImage))
Example #52
0
import tensorflow
import numpy as np
import math

from keras.models import Sequential,Model,load_model
from keras.layers import InputLayer,Input 
from keras.layers import Reshape,MaxPooling2D
from keras.layers import Conv2D,Dense,Flatten
from keras.optimizers import Adam



from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets('data/MNIST/', one_hot=True)

data.test.cls = np.argmax(data.test.labels, axis=1)

img_size=28
img_shape=(img_size,img_size)
img_shape_full=(img_size,img_size,1)
img_size_flat=img_size*img_size

num_channels=1

num_classes=10

def seq():

	model=Sequential()

	model.add(InputLayer(input_shape=(img_size_flat,)))
def qubit_iteration(params, fd_lower=8.9, fd_upper=9.25, display=False):

    threshold = 0.001
    eps = params.eps
    eps_array = np.array([eps])
    multi_results = multi_sweep(eps_array, fd_lower, fd_upper, params,
                                threshold)

    labels = params.labels

    collected_data_re = None
    collected_data_im = None
    collected_data_abs = None
    results_list = []
    for sweep in multi_results.values():
        for i, fd in enumerate(sweep.fd_points):
            transmission = sweep.transmissions[i]
            p = sweep.params[i]
            coordinates_re = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa],
                              [p.kappa_phi], [p.gamma], [p.gamma_phi], [p.Ec],
                              [p.n_t], [p.n_c]]
            coordinates_im = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa],
                              [p.kappa_phi], [p.gamma], [p.gamma_phi], [p.Ec],
                              [p.n_t], [p.n_c]]
            coordinates_abs = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa],
                               [p.kappa_phi], [p.gamma], [p.gamma_phi], [p.Ec],
                               [p.n_t], [p.n_c]]
            point = np.array([transmission])
            abs_point = np.array([np.abs(transmission)])

            for j in range(len(coordinates_re) - 1):
                point = point[np.newaxis]
                abs_point = abs_point[np.newaxis]

            hilbert_dict = OrderedDict()
            hilbert_dict['t_levels'] = p.t_levels
            hilbert_dict['c_levels'] = p.c_levels
            packaged_point_re = xr.DataArray(point,
                                             coords=coordinates_re,
                                             dims=labels,
                                             attrs=hilbert_dict)
            packaged_point_im = xr.DataArray(point,
                                             coords=coordinates_im,
                                             dims=labels,
                                             attrs=hilbert_dict)
            packaged_point_abs = xr.DataArray(abs_point,
                                              coords=coordinates_abs,
                                              dims=labels,
                                              attrs=hilbert_dict)
            packaged_point_re = packaged_point_re.real
            packaged_point_im = packaged_point_im.imag

            if collected_data_re is not None:
                collected_data_re = collected_data_re.combine_first(
                    packaged_point_re)
            else:
                collected_data_re = packaged_point_re

            if collected_data_im is not None:
                collected_data_im = collected_data_im.combine_first(
                    packaged_point_im)
            else:
                collected_data_im = packaged_point_im

            if collected_data_abs is not None:
                collected_data_abs = collected_data_abs.combine_first(
                    packaged_point_abs)
            else:
                collected_data_abs = packaged_point_abs

    a_abs = collected_data_abs.squeeze()

    if True:

        max_indices = local_maxima(a_abs.values[()])
        maxima = a_abs.values[max_indices]
        indices_order = np.argsort(maxima)

        max_idx = np.argmax(a_abs).values[()]
        A_est = a_abs[max_idx]
        f_r_est = a_abs.f_d[max_idx]
        popt, pcov = lorentzian_fit(a_abs.f_d.values[()], a_abs.values[()])
        f_r = popt[1]

        two_peaks = False
        split = None
        if len(max_indices) >= 2:
            two_peaks = True
            max_indices = max_indices[indices_order[-2:]]

            f_01 = a_abs.f_d[max_indices[1]].values[()]
            f_12 = a_abs.f_d[max_indices[0]].values[()]
            split = f_12 - f_r

    if display:
        fig, axes = plt.subplots(1, 1)
        a_abs.plot(ax=axes)
        plt.show()
        """ 
        fig, axes = plt.subplots(1, 1)
        xlim = axes.get_xlim()
        ylim = axes.get_ylim()
        xloc = xlim[0] + 0.1*(xlim[1]-xlim[0])
        yloc = ylim[1] - 0.1*(ylim[1]-ylim[0])

        collected_data_abs.plot(ax=axes)
        axes.plot(a_abs.f_d, lorentzian_func(a_abs.f_d, *popt), 'g--')
        print "Resonance frequency = " + str(popt[1]) + " GHz"
        print "Q factor = " + str(Q_factor)
        plt.title(str(p.t_levels) + str(' ') + str(p.c_levels))

        props = dict(boxstyle='round', facecolor='wheat', alpha=1)
        if two_peaks == True:
            textstr = '$f_{01}$ = ' + str(f_01) + 'GHz\n' + r'$\alpha$ = ' + str(1000*split) + 'MHz\n$Q$ = ' + str(Q_factor) + '\n$FWHM$ = ' + str(1000*params.kappa) + 'MHz'
        else:
            #textstr = 'fail'
            textstr = '$f_{01}$ = ' + str(f_r_est.values[()]) + 'GHz\n$Q$ = ' + str(
                Q_factor) + '\n$FWHM$ = ' + str(1000 * params.kappa) + 'MHz'

        #textstr = '$f_{01}$ = ' + str(f_01) + 'GHz\n' + r'$\alpha$ = ' + str(split) + 'GHz'
        label = axes.text(xloc, yloc, textstr, fontsize=14, verticalalignment='top', bbox=props)

    plt.show()

    collected_dataset = xr.Dataset({'a_re': collected_data_re,
                                    'a_im': collected_data_im,
                                    'a_abs': collected_data_abs})

    time = datetime.now()
    cwd = os.getcwd()
    time_string = time.strftime('%Y-%m-%d--%H-%M-%S')

    directory = cwd + '/eps=' + str(eps) + 'GHz' + '/' + time_string
    if not os.path.exists(directory):
        os.makedirs(directory)
        collected_dataset.to_netcdf(directory+'/spectrum.nc')

    """

    #new_fq = params.fq + 9.19324 - f_r_est.values[()]
    # new_chi = (2*params.chi - split - 0.20356)/2
    #new_chi = -0.20356 * params.chi / split

    return f_r, split
    if t % 10 == 0:
        print('iteration %d / %d: loss %f' % (t, iterations, loss))
        print('Learning rate -', 60 * count / (time.time() - start), 'epochs per minute')
    dy_pred = 1. / batch_size * 2.0 * (y_pred - y)
    dw2 = h.T.dot(dy_pred) + reg * w2
    db2 = dy_pred.sum(axis=0)
    dh = dy_pred.dot(w2.T)
    dw1 = x.T.dot(dh * h * (1 - h)) + reg * w1
    db1 = (dh * h * (1 - h)).sum(axis=0)
    w1 -= lr * dw1
    w2 -= lr * dw2
    b1 -= lr * db1
    b2 -= lr * db2
    lr *= lr_decay

print('iteration %d / %d: loss %f' % (t, iterations, loss))
print('Learning rate -', 60 * count / (time.time() - start), 'epochs per minute')
batch_size=y_pred.shape[0]
K=y_pred.shape[1]
y_pred_test=x_test.dot(w1)+b1
batch_size_test=y_pred_test.shape[0]
K_test=y_pred_test.shape[1]
train_acc = 1.0 - (1/(batch_size*K))*(np.abs(np.argmax(y_train, axis=1) - np.argmax(y_pred, axis=1))).sum()
print('train acc =',train_acc)
test_acc = 1.0 - (1/(batch_size_test*K_test))*(np.abs(np.argmax(y_test, axis=1) - np.argmax(y_pred_test, axis=1))).sum()
print('test acc =',test_acc)

x_axis=np.arange(len(loss_history))
plt.plot(x_axis,loss_history)

plt.show()
Example #55
0
def test_model(tmpFeatures):
    loaded_graph = tf.Graph()

    with tf.Session(graph=loaded_graph) as sess:
        loader = tf.train.import_meta_graph(save_model_path + '.meta')
        loader.restore(sess, save_model_path)

        # Get accuracy in batches for memory limitations
        test_batch_acc_total = 0
        test_batch_count = 0
        test_batch_softmax = np.zeros([10000, 10])
        test_batch_entropy = np.zeros(10000)

        loaded_x = loaded_graph.get_tensor_by_name('input_x:0')
        loaded_y = loaded_graph.get_tensor_by_name('output_y:0')
        loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
        # loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
        first_pass = loaded_graph.get_tensor_by_name('first_pass:0')

        for train_feature_batch, train_label_batch in batch_features_labels(
                tmpFeatures, test_labels, batch_size):
            softmax_total = np.zeros([batch_size, 10])
            for i in range(num_mc):
                softmax_total += sess.run(tf.nn.softmax(loaded_logits),
                                          feed_dict={
                                              loaded_x: train_feature_batch,
                                              loaded_y: train_label_batch,
                                              first_pass: True
                                          })

            # Softmax
            softmax_total = softmax_total / float(num_mc)
            prediction = np.argmax(softmax_total, axis=1)
            trues = np.argmax(train_label_batch, axis=1)
            test_batch_softmax[test_batch_count *
                               batch_size:(test_batch_count + 1) *
                               batch_size] = softmax_total

            # Entropy
            log_softmax_total = np.log(softmax_total + 1e-30)
            test_batch_entropy[test_batch_count *
                               batch_size:(test_batch_count + 1) *
                               batch_size] = -np.sum(
                                   softmax_total * log_softmax_total)

            # Accuracy
            current_accuracy = np.sum(prediction == trues) / float(batch_size)
            test_batch_acc_total += current_accuracy

            # Count
            test_batch_count += 1

        print('Testing Accuracy: {}\n'.format(test_batch_acc_total /
                                              test_batch_count))
        np.save('accuracy_ARM', test_batch_acc_total / test_batch_count)
        np.save('softmax_ARM', test_batch_softmax)
        np.save('entropy_ARM', test_batch_entropy)

        # Print Random Samples
        random_test_features, random_test_labels = tuple(
            zip(*random.sample(list(zip(test_features, test_labels)),
                               n_samples)))

        tmpTestFeatures = []

        for feature in random_test_features:
            tmpFeature = skimage.transform.resize(feature, (224, 224),
                                                  mode='constant')
            tmpTestFeatures.append(tmpFeature)

        random_test_predictions = sess.run(tf.nn.softmax(loaded_logits),
                                           feed_dict={
                                               loaded_x: tmpTestFeatures,
                                               loaded_y: random_test_labels,
                                               first_pass: True
                                           })

        display_image_predictions(random_test_features, random_test_labels,
                                  random_test_predictions)
def cavity_iteration(params, fd_lower=10.47, fd_upper=10.51, display=False):

    threshold = 0.0005

    eps = params.eps
    eps_array = np.array([eps])

    multi_results = multi_sweep(eps_array, fd_lower, fd_upper, params,
                                threshold)

    labels = params.labels

    collected_data_re = None
    collected_data_im = None
    collected_data_abs = None
    results_list = []
    for sweep in multi_results.values():
        for i, fd in enumerate(sweep.fd_points):
            transmission = sweep.transmissions[i]
            p = sweep.params[i]
            coordinates_re = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa],
                              [p.kappa_phi], [p.gamma], [p.gamma_phi], [p.Ec],
                              [p.n_t], [p.n_c]]
            coordinates_im = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa],
                              [p.kappa_phi], [p.gamma], [p.gamma_phi], [p.Ec],
                              [p.n_t], [p.n_c]]
            coordinates_abs = [[fd], [p.eps], [p.Ej], [p.fc], [p.g], [p.kappa],
                               [p.kappa_phi], [p.gamma], [p.gamma_phi], [p.Ec],
                               [p.n_t], [p.n_c]]
            point = np.array([transmission])
            abs_point = np.array([np.abs(transmission)])

            for j in range(len(coordinates_re) - 1):
                point = point[np.newaxis]
                abs_point = abs_point[np.newaxis]

            hilbert_dict = OrderedDict()
            hilbert_dict['t_levels'] = p.t_levels
            hilbert_dict['c_levels'] = p.c_levels
            packaged_point_re = xr.DataArray(point,
                                             coords=coordinates_re,
                                             dims=labels,
                                             attrs=hilbert_dict)
            packaged_point_im = xr.DataArray(point,
                                             coords=coordinates_im,
                                             dims=labels,
                                             attrs=hilbert_dict)
            packaged_point_abs = xr.DataArray(abs_point,
                                              coords=coordinates_abs,
                                              dims=labels,
                                              attrs=hilbert_dict)
            packaged_point_re = packaged_point_re.real
            packaged_point_im = packaged_point_im.imag

            if collected_data_re is not None:
                collected_data_re = collected_data_re.combine_first(
                    packaged_point_re)
            else:
                collected_data_re = packaged_point_re

            if collected_data_im is not None:
                collected_data_im = collected_data_im.combine_first(
                    packaged_point_im)
            else:
                collected_data_im = packaged_point_im

            if collected_data_abs is not None:
                collected_data_abs = collected_data_abs.combine_first(
                    packaged_point_abs)
            else:
                collected_data_abs = packaged_point_abs

    a_abs = collected_data_abs.squeeze()

    max_indices = local_maxima(a_abs.values[()])
    maxima = a_abs.values[max_indices]
    indices_order = np.argsort(maxima)

    two_peaks = False
    if len(max_indices) == 2:
        two_peaks = True

        max_indices = max_indices[indices_order[-2:]]

        f_r = a_abs.f_d[max_indices[1]].values[()]
        f_r_2 = a_abs.f_d[max_indices[0]].values[()]
        split = f_r - f_r_2

        ratio = a_abs[max_indices[1]] / a_abs[max_indices[0]]
        ratio = ratio.values[()]

    max_idx = np.argmax(a_abs).values[()]
    A_est = a_abs[max_idx]
    f_r_est = a_abs.f_d[max_idx]
    #popt, pcov = curve_fit(lorentzian_func, a_abs.f_d, a_abs.values, p0=[A_est, f_r_est, 0.001])
    popt, pcov = lorentzian_fit(a_abs.f_d.values[()], a_abs.values[()])
    Q_factor = popt[2]

    if display:
        fig, axes = plt.subplots(1, 1)
        a_abs.plot(ax=axes)
        plt.show()
    """
    print "Resonance frequency = " + str(popt[1]) + " GHz"
    print "Q factor = " + str(Q_factor)

    fig, axes = plt.subplots(1,1)
    collected_data_abs.plot(ax=axes)
    axes.plot(a_abs.f_d, lorentzian_func(a_abs.f_d, *popt), 'g--')

    plt.title(str(p.t_levels) + str(' ') + str(p.c_levels))

    props = dict(boxstyle='round', facecolor='wheat', alpha=1)
    if two_peaks == True:
        textstr = 'f_r = ' + str(popt[1]) + 'GHz\n$Q$ = ' + str(Q_factor) + '\n$\chi$ = ' + str(
            split * 1000) + 'MHz\n$\kappa$ = ' + str(1000 * params.kappa) + 'MHz\nRatio = ' + str(ratio)
    else:
        textstr = 'f_r = ' + str(popt[1]) + 'GHz\n$Q$ = ' + str(Q_factor) + '\n$\kappa$ = ' + str(1000 * params.kappa) + 'MHz'

    label = axes.text(a_abs.f_d[0], popt[0], textstr, fontsize=14, verticalalignment='top', bbox=props)

    #collected_dataset = xr.Dataset({'a_re': collected_data_re,
    #                                'a_im': collected_data_im,
    #                                'a_abs': collected_data_abs})

    #time = datetime.now()
    #cwd = os.getcwd()
    #time_string = time.strftime('%Y-%m-%d--%H-%M-%S')

    #directory = cwd + '/eps=' + str(eps) + 'GHz' + '/' + time_string
    #if not os.path.exists(directory):
    #    os.makedirs(directory)
    #    collected_dataset.to_netcdf(directory+'/spectrum.nc')

    plt.show()

    """

    #fc_new = params.fc + 10.49602 - popt[1]
    #g_new = params.g * np.sqrt(23.8 * 1000 / split) / 1000
    #kappa_new = Q_factor * params.kappa / 8700

    return popt[1], split, Q_factor
def LS_peak_to_period(omegas, P_LS):
    #find the highest peak in the LS periodogram and return the corresponding period.
    max_freq = omegas[np.argmax(P_LS)]
    return 2 * np.pi / max_freq
 def predict(self, input_data):
     X = input_data['X']
     X = self.stats.transform(X)
     predictions = np.argmax(self.base_model.predict(X), axis=-1)
     predictions = np.reshape(predictions, [len(predictions), -1])
     return {'predictions': predictions}
 def predict(self, X_test, is_weighted=False):
     proba_pred = self.predict_proba(X_test, is_weighted)
     return np.argmax(proba_pred, axis=-1)
Example #60
0
    sess = tf.Session()
    sess.run(init_op)

    batch_size = 1000
    no_of_batches = int(len(train_word_vecList) / batch_size)
    epoch = 100
    for i in range(epoch):
        ptr = 0
        for j in range(no_of_batches):
            inp, out = train_word_vecList[ptr:ptr+batch_size], train_output_vecList[ptr:ptr+batch_size]
            ptr+=batch_size
            sess.run(minimize,{data: inp, target: out})

    y_p = tf.argmax(target, 1)
    y_pred = sess.run( y_p, feed_dict={data:test_word_vecList, target:test_output_vecList})
    y_true = np.argmax(test_output_vecList,1)
    f.write('confusion_matrix:'+ '\n\n')
    confusion_mat = confusion_matrix(y_true, y_pred)
    f.write(str(confusion_mat)+ '\n\n')
    evaluation_matrix = evaluate(confusion_mat,int2char)
    f.write('evaluation matrix:'+ '\n\n')
    f.write(str(evaluation_matrix))
    sess.close()
    f.close()