Beispiel #1
0
def most_popular_cat_from_ing(
    df, df_i, num_ingredients, category, freq=True, top_n=3, fname=None):
    from foodessentials import get_perc
    counts = df_i['ingredient'].value_counts()
    ing_names = counts.index.values[:num_ingredients]
    if fname:
        with open(fname, 'wb') as f_out:
            for ing in ing_names:
                x = get_perc(ing, category, df, df_i)
                if freq:
                    # Sort by freq rather than percentage.
                    x = sorted(x, key=lambda x : x[2])
                cats = np.array([str(i[1]) for i in x[-top_n:][::-1]])
                f_out.write('{} --> {}\n'.format(ing, 
                    np.array_str(cats, 
                        max_line_width=10000).replace('\n', '')
                    ))
    else:
        for ing in ing_names:
            x = get_perc(ing, category, df, df_i)
            if freq:
                # Sort by freq rather than percentage.
                x = sorted(x, key=lambda x : x[2])
            cats = np.array([str(i[1]) for i in x[-top_n:][::-1]])
            print '{} --> {}'.format(ing, 
                np.array_str(cats, 
                    max_line_width=10000).replace('\n', '')
                )
Beispiel #2
0
def main():
    if not os.path.exists("./logfiles"):
        os.makedirs("logfiles")
    logging.basicConfig(filename="./logfiles/test_ensemble.log",
                        level=logging.INFO)

    print("\nNumber of threads: 4")
    print("Maximum number of evaluations: 50")
    print("Search strategy: CandidateSRBF")
    print("Experimental design: Latin Hypercube + point [0.1, 0.5, 0.8]")
    print("Surrogate: Cubic RBF, Linear RBF, Thin-plate RBF, MARS")

    nthreads = 4
    maxeval = 50
    nsamples = nthreads

    data = Hartman3()
    print(data.info)

    # Use 3 differents RBF's and MARS as an ensemble surrogate
    models = [
        RBFInterpolant(surftype=CubicRBFSurface, maxp=maxeval),
        RBFInterpolant(surftype=LinearRBFSurface, maxp=maxeval),
        RBFInterpolant(surftype=TPSSurface, maxp=maxeval)
    ]
    response_surface = EnsembleSurrogate(models, maxeval)

    # Add an additional point to the experimental design. If a good
    # solution is already known you can add this point to the
    # experimental design
    extra = np.atleast_2d([0.1, 0.5, 0.8])

    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = \
        SyncStrategyNoConstraints(
            worker_id=0, data=data,
            response_surface=response_surface,
            maxeval=maxeval, nsamples=nsamples,
            exp_design=LatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
            search_procedure=CandidateSRBF(data=data, numcand=100*data.dim),
            extra=extra)

    # Launch the threads and give them access to the objective function
    for _ in range(nthreads):
        worker = BasicWorkerThread(controller, data.objfunction)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()

    response_surface.compute_weights()
    print('Final weights: {0}'.format(
        np.array_str(response_surface.weights, max_line_width=np.inf,
                     precision=5, suppress_small=True)))

    print('Best value found: {0}'.format(result.value))
    print('Best solution found: {0}\n'.format(
        np.array_str(result.params[0], max_line_width=np.inf,
                     precision=5, suppress_small=True)))
Beispiel #3
0
def regenerate(request, dbn_id):
    dbn = get_object_or_404(DBNModel , pk=dbn_id)
    data = [map(int, request.POST['data'].split(','))]
    (visible_state, hidden_state) = dbn.regenerate(data)
    visible_state = np.array_str(visible_state)
    hidden_state = np.array_str(hidden_state)
    return render(request, 'rbm/regenerate.html', {'old_data': request.POST['data'], 'dbn': dbn, 'visible_state': visible_state, 'hidden_state': hidden_state})
Beispiel #4
0
def run():
    """Run the evolution."""
    if args.verbose and __name__ == '__main__':
        print "objective: minimise", eval_func.__doc__

    if args.seed is not None:
        np.random.seed(args.seed)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("min", np.min)

    try:
        algorithms.eaGenerateUpdate(toolbox, ngen=args.generations,
                                    stats=stats, halloffame=hof, verbose=True)
    except KeyboardInterrupt:
        print 'user terminated early'

    (score,) = hof[0].fitness.values
    print 'Score: %.2f $/MWh' % score
    print 'List:', [max(0, param) for param in hof[0]]

    set_generators(hof[0])
    nem.run(context)
    context.verbose = True
    print context
    if args.transmission:
        x = context.exchanges.max(axis=0)
        print np.array_str(x, precision=1, suppress_small=True)
        f = open('results.json', 'w')
        obj = {'exchanges': x.tolist(), 'generators': context}
        json.dump(obj, f, cls=nem.Context.JSONEncoder)
        f.close()
def shrink_data(inX, iny, n):
    outX = []
    outy = []
    last_ip = 0
    nrounds = 0
    duplicates = set()
    for i in range(inX.shape[0]):
        cur_ip = inX[i][1]
        if cur_ip == last_ip:
            # Haven't yet filled up all of cur_y yet
            if nrounds < n:
                if np.array_str(inX[i]) in duplicates:
                    print "found duplicate at ip", cur_ip, "prognum", inX[i][0]
                    continue
                else:
                    outX.append(inX[i])
                    outy.append(iny[i])
                    duplicates.add(np.array_str(inX[i]))
                    nrounds += 1
        # First round of new IP value
        else:
            last_ip = cur_ip
            nrounds = 1
            if np.array_str(inX[i]) in duplicates:
                print "found duplicate at ip", cur_ip, "prognum", inX[i][0]
                continue
            else:
                outX.append(inX[i])
                outy.append(iny[i])
                duplicates.add(np.array_str(inX[i]))
    X_shrunk = np.array(outX)
    y_shrunk = np.array(outy)
    np.save("X-shrunk.npy",X_shrunk)
    np.save("y-shrunk.npy",y_shrunk)
    return X_shrunk, y_shrunk
Beispiel #6
0
 def pprint(self, file=sys.stdout):
     """
     Parameters
     ----------
     file : file-like, optional
         An object with `write()` method
     """
     p = partial(print, file=file)
     ## Print summary of the fitting
     p("degree of freedom: {0}".format(self.ndf))
     p("iterations: {0}".format(self.info[2]))
     p("reason for stop: {0}".format(self.info[3]))
     p("")
     p(":parameters:")
     for i, (q, dq) in enumerate(zip(self.p, self.p_stdv)):
         rel = 100 * abs(dq/q)
         p("  p[{0}]: {1:+12.5g} +/- {2:>12.5g}  ({3:4.1f}%)".format(i, q, dq, rel))
     p("")
     p(":covariance:")
     p(np.array_str(self.covr, precision=2, max_line_width=200))
     p("")
     p(":correlation:")
     p(np.array_str(self.corr, precision=2, max_line_width=200))
     p("")
     p(":r2:")
     p("  {0:6g}".format(self.r2))
def do_everything(input_file = 'experiments.txt', output_file = 'results.txt', mp=True, oci=False):
    '''Automate clustering process
       input: input_file:  a 5-column text file with 1 line per clustering run
                           each line lists the 4 filters to be used to construct colours, plus number of clusters
              mp: make output plots?
              oci: output cluster IDs for each object?
       output: output_file: a text file listing input+results from each clustering run'''
    
    run = np.genfromtxt(input_file, dtype='str')

    # TODO: check whether results file already exists; if not, open it and print a header line
    # if it does already exist, just open it
    results = open(output_file, 'a') 
    
    for i in range(0, len(run)):
        
        input_str =  '{} {}'.format(np.array_str(run[i][:-1])[1:-1],int(run[i,4])) # list of input parameters: bands and num of clusters

        score, num_obj =  do_cluster(run[i,0], run[i,1], run[i,2], run[i,3], int(run[i,4]), make_plots=True, output_cluster_id=oci)
        total_obj = num_obj.sum()
        output_str = ' {:.4f} {:5d} {}'.format(score, total_obj, np.array_str(num_obj)[1:-1])
        
        results.write(input_str + ' ' + output_str + '\n')
        
       

    results.close()

    return
Beispiel #8
0
    def pretty_string_samples(self, idx_start=0, idx_end=20, precision=4, header=False):
        s = ''
        if header:
            t = '  '
            u = 'ch'
            for i in range(self.ch):
                t += '-------:'
                u += '  %2i   :' %(i+1)
            t += '\n'
            u += '\n'
            
            s += t  #   -------:-------:-------:
            s += u  # ch   1   :   2   :   3   :
            s += t  #   -------:-------:-------:

        s += np.array_str(self.samples[idx_start:idx_end,:],
                          max_line_width=260,   # we can print 32 channels before linewrap
                          precision=precision,
                          suppress_small=True)
        if (idx_end-idx_start) < self.nofsamples:
            s  = s[:-1] # strip the right ']' character
            s += '\n ...,\n'
            lastlines = np.array_str(self.samples[-3:,:],
                                     max_line_width=260,
                                     precision=precision,
                                     suppress_small=True)
            s += ' %s\n' %lastlines[1:] # strip first '['
        return s
	def __str__(self):
		s = str("\n\tObservationType:" + self.observeType + "\tPARAM Arr: " + np.array_str(self.ParamArr) + "\tTARGET Arr: " + np.array_str(self.TargetArr))
		if(self.PredictionErrArr != None):
			s = s + "\tPREDICTED Arr: " + np.array_str(self.PredictedArr)
			s = s + "\tPREDICTION ERROR: " + str(self.PredictionErrArr)
			s = s + "\tDISTANCE: " + str(self.DistanceToTargetArr)
		return s
Beispiel #10
0
def train_policy(policy, optimizer, estimator, continuous, n_iters, t_len):
    trials = []
    grads = []

    for i in range(n_iters):
        print 'Trial %d...' % i
        print 'A:\n%s' % np.array_str(policy.A)
        print 'B:\n%s' % np.array_str(policy.B)
        states, actions, rewards, logprobs = run_trial(policy,
                                                       preprocess,
                                                       max_len=t_len,
                                                       continuous=continuous)
        estimator.report_episode(states, actions, rewards, logprobs)

        trials.append(len(rewards))

        start_theta = policy.get_theta()
        theta, _ = optimizer.optimize(x_init=start_theta,
                                      func=estimator.estimate_reward_and_gradient)
        if np.any(theta != start_theta):
            policy.set_theta(theta)
            estimator.update_buffer()
            estimator.remove_unlikely_trajectories(-3)
            print '%d trajectories remaining' % estimator.num_samples

        if len(trials) > 3 and np.mean(trials[-3:]) >= t_len:
            print 'Convergence achieved'
            break

    return trials, grads
Beispiel #11
0
def topic_model_on_zlda(docs, vocab, num_topics=5, zlabels=None, eta=0.95, file_out=None):
    """
    See http://pages.cs.wisc.edu/~andrzeje/research/zl_lda.html
    :param docs:
    :param vocab:
    :param num_topics:
    :param zlabels: each entry is ignored unless it is a List.
    :param eta: confidence in the our labels. If eta = 0 --> don't use z-labels, if eta = 1 --> "hard" z-labels.
    :param file_out:
    :return: Phi - P(w|z), Theta - P(z|d)
    """
    alpha = .1 * np.ones((1, num_topics))
    beta = .1 * np.ones((num_topics, len(vocab)))
    numsamp = 100
    randseed = 194582

    if not zlabels:
        zlabels = [[0]*len(text) for text in docs]

    phi, theta, sample = zlabelLDA(docs, zlabels, eta, alpha, beta, numsamp, randseed)
    if file_out:
        print('\nTheta - P(z|d)\n', np.array_str(theta, precision=2), file=file_out)
        print('\n\nPhi - P(w|z)\n', np.array_str(phi,precision=2), file=file_out)
        print('\n\nsample', file=file_out)
        for doc in range(len(docs)):
            print(sample[doc], file=file_out)

    return phi, theta
def main():
  np.set_printoptions(precision=3)
  Xtrain, ytrain, Xval, yval, Xtest, ytest = data_processing()
  # =========================Q3.1 linear_regression=================================
  w = linear_regression_noreg(Xtrain, ytrain)
  print("======== Question 3.1 Linear Regression ========")
  print("dimensionality of the model parameter is ", len(w), ".", sep="")
  print("model parameter is ", np.array_str(w))
  
  # =========================Q3.2 regularized linear_regression=====================
  lambd = 5.0
  wl = regularized_linear_regression(Xtrain, ytrain, lambd)
  print("\n")
  print("======== Question 3.2 Regularized Linear Regression ========")
  print("dimensionality of the model parameter is ", len(wl), sep="")
  print("lambda = ", lambd, ", model parameter is ", np.array_str(wl), sep="")

  # =========================Q3.3 tuning lambda======================
  lambds = [0, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1, 1, 10, 10 ** 2]
  bestlambd = tune_lambda(Xtrain, ytrain, Xval, yval, lambds)
  print("\n")
  print("======== Question 3.3 tuning lambdas ========")
  print("tuning lambda, the best lambda =  ", bestlambd, sep="")

  # =========================Q3.4 report mse on test ======================
  wbest = regularized_linear_regression(Xtrain, ytrain, bestlambd)
  mse = test_error(wbest, Xtest, ytest)
  print("\n")
  print("======== Question 3.4 report MSE ========")
  print("MSE on test is %.3f" % mse)
def evaluate_input( proxy, inval, num_retries=1 ):
    """Query the optimization function.

    Parameters
    ----------
    proxy : rospy.ServiceProxy
        Service proxy to call the GetCritique service for evaluation.

    inval : numeric array
        Input values to evaluate.

    Return
    ------
    reward : numeric
        The reward of the input values
    feedback : list
        List of feedback
    """
    req = GetCritiqueRequest()
    req.input = inval

    for i in range(num_retries+1):
        try:
            res = proxy.call( req )
            break
        except rospy.ServiceException:
            rospy.logerr( 'Could not evaluate item: ' + np.array_str( inval ) )
    
    reward = res.critique
    rospy.loginfo( 'Evaluated input: %s\noutput: %f\nfeedback: %s', 
                   np.array_str( inval, max_line_width=sys.maxint ),
                   reward,
                   str( res.feedback ) )
    return (reward, res.feedback)
Beispiel #14
0
def plotVec( antList=phasedAnts ) :
  color = ['r','g','b','m','c','r','g','b','m','c','c','m','c','m','c']
  pbCorrectedVis = numpy.load("pbCorrectedVis.npy")
  #print pbCorrectedVis
  scalarSum = 0.
  vecList = [0.+0.j]	
  for n in antList :
    for m in range (0,16) :
      ilast = len(vecList) - 1
      if ( numpy.isnan( numpy.real( pbCorrectedVis[m][n-1] ) ) or \
           numpy.isnan( numpy.imag( pbCorrectedVis[m][n-1] ) ) ) :
        vecList = numpy.append( vecList, vecList[ilast] )
      else : 
        vecList = numpy.append( vecList, vecList[ilast] + pbCorrectedVis[m][n] )
        scalarSum += numpy.abs( pbCorrectedVis[m][n] )
  print "\nvecList:"
  print numpy.array_str( vecList, precision=2, max_line_width=200 )
  istart = 0
  i = 0
  while (istart < len(vecList) ) :
    x = numpy.real( vecList[istart:(istart+16)] )
    y = numpy.imag( vecList[istart:(istart+16)] )
    pylab.plot( x, y, color=color[i] )
    i = i+1
    istart = istart+16
  pylab.axis( [-scalarSum,scalarSum,-scalarSum,scalarSum] )
  pylab.grid(True)
  pylab.axes().set_aspect('equal')
  pylab.draw()
Beispiel #15
0
def findminima( x, p, fGHz ):
    delta = 0.45*300./fGHz
      # anticipated spacing between minima
    xminlist = []
    pminlist = []
    while numpy.ma.count( x ) > 0 :
      nmin = numpy.argmin( p )
        # nmin is index of point with  minimum power
      xmin = x[nmin]
      xminlist.append( xmin )
      pminlist.append( p[nmin] )

    # now mask all points that are within +/-delta of the min
      x2 = numpy.ma.masked_inside( x, xmin-delta, xmin+delta )
      p2 = numpy.ma.masked_where( numpy.ma.getmask(x2), p )

    # copy the arrays to allow easy iteration
      x = x2
      p = p2

    print "finished - all masked"
    indexsorted = numpy.argsort(xminlist)
    xminsorted = []
    pminsorted = []
    for n in indexsorted :
      xminsorted.append( xminlist[n] )
      pminsorted.append( pminlist[n] )
    print numpy.array_str( numpy.array(xminsorted), precision=4 ) 
    gap = []
    for n in range(1,len(xminsorted)) :
      gap.append( xminsorted[n] - xminsorted[n-1] )
    print numpy.array_str( numpy.array(gap), precision=4 ) 
    return [xminsorted, pminsorted]
Beispiel #16
0
 def writeLogNP(self, beliefs, cards, reward): #[[cards],[beliefs]]
     array = [np.array_str(cards),np.array_str(beliefs), reward]
     logname = "log-" + str(self.logNr) +".csv"
     ##print(logname)
     with open(self.logPath + logname, "a", newline='') as csv_file:
         writer = csv.writer(csv_file, delimiter=',')
         writer.writerow(array)
     return logname
Beispiel #17
0
 def __str__(self, z=False, precision=3):
     s = ''
     if z:
         s += '# q(z):\n{:s}\n'.format(np.array_str(self.z, precision=precision))
         s += '# q(pi):\n{:s}'.format(np.array_str(self.pi, precision=precision))
         for i, nw in enumerate(self.nw):
             s += '\n\n# q(nw[{:d}]):\n{:s}'.format(i, nw.__str__(precision=precision))
     return s
Beispiel #18
0
def main():
    """
    DO NOT TOUCH THIS FUNCTION. IT IS USED FOR COMPUTER EVALUATION OF YOUR CODE
    """
    results = my_info() + '\t\t'
    results += np.array_str(np.diagonal(one_vs_all())) + '\t\t'
    results += np.array_str(np.diagonal(all_vs_all()))
    print results + '\t\t'
Beispiel #19
0
def main():
    """
    DO NOT TOUCH THIS FUNCTION. IT IS USED FOR COMPUTER EVALUATION OF YOUR CODE
    """
    results = my_info() + "\t\t"
    results += np.array_str(np.diagonal(simple_EC_classifier())) + "\t\t"
    results += np.array_str(np.diagonal(KNN()))
    print results + "\n"
Beispiel #20
0
def export_off(mesh):
    faces_stacked = np.column_stack((np.ones(len(mesh.faces))*3, mesh.faces)).astype(np.int64)
    export = 'OFF\n'
    export += str(len(mesh.vertices)) + ' ' + str(len(mesh.faces)) + ' 0\n'
    export += np.array_str(mesh.vertices, precision=9).replace('[', '').replace(']', '').strip()
    export += np.array_str(faces_stacked).replace('[', '').replace(']', '').strip()

    return export
  def ecologyMetric(self):
    '''Measuring the ecology potential in the population.'''

    unique_gen = set()
    for org in self.orgs:
      if numpy.array_str(org.genome) not in unique_gen:
        unique_gen.add(numpy.array_str(org.genome))

    return len(unique_gen)
Beispiel #22
0
 def __str__(self, precision=3):
     p = 'u={:s}\nb={:.{p}f}\nW={:s}\nv={:.{p}f}'.format(np.array_str(self.u, precision=precision),
                                                         self.b,
                                                         np.array_str(self.W, precision=precision),
                                                         self.v,
                                                         p=precision)
     e = 'E[u]={:s}\nE[l]={:s}\nE[l\']={:s}'.format(np.array_str(self.u, precision=precision),
                                                    np.array_str(self.W*self.v, precision=precision),
                                                    np.array_str(np.linalg.inv(self.W*self.v), precision=precision))
     return p + '\n\n' + e
Beispiel #23
0
def append_portfolio_sphere(filename, assets, y, s, eta, radius, u, obj, sol, g, epsilon=1e-6):
    feasible = all(g > -epsilon)

    u_str = np.array_str(u).replace('\n', '')
    sol_str = np.array_str(sol).replace('\n', '')
    g_str = np.array_str(g).replace('\n', '')

    record = ','.join(map(str, [assets, y, s, eta, radius, u_str, obj, sol_str, g_str, feasible]))
    with open(filename, 'a') as portfolio_file:
        portfolio_file.write(record + '\n')
Beispiel #24
0
def log_results(si):
    pyrklog.info("\nReactivity : \n"+str(si.ne._rho))
    pyrklog.info("\nFinal Result : \n"+np.array_str(si.y))
    for comp in si.components:
        pyrklog.info("\n" + comp.name + ":\n" + np.array_str(comp.T.magnitude))
    pyrklog.info("\nPrecursor lambdas: \n"+str(si.ne._pd.lambdas()))
    pyrklog.info("\nDelayed neutron frac: \n"+str(si.ne._pd.beta()))
    pyrklog.info("\nPrecursor betas: \n"+str(si.ne._pd.betas()))
    pyrklog.info("\nDecay kappas: \n"+str(si.ne._dd.kappas()))
    pyrklog.info("\nDecay lambdas: \n"+str(si.ne._dd.lambdas()))
def main():
    """
    DO NOT TOUCH THIS FUNCTION. IT IS USED FOR COMPUTER EVALUATION OF YOUR CODE
    """
    conf_matrix1 = one_vs_all()
    conf_matrix2 = all_vs_all()
    results = my_info() + '\t\t'
    results += np.array_str(np.diagonal(conf_matrix1)) + '\t\t'
    results += np.array_str(np.diagonal(conf_matrix2))
    print results + '\t\t'
def pretty_print(image_example):
    """ Pretty prints an MNIST training example.

    Parameters:
        image_example: a 1x784 numpy array corresponding to the features of
                       a single image.

    Returns:
        None.
    """
    print numpy.array_str(image_example, precision=1, max_line_width=142)
def genPEtable(t1,t2,filename):
    import re
    np.set_printoptions(threshold=1e99)
    t1s = 't1 = \n ' + re.sub('[\[\]]', '', np.array_str(t1))
    t2s = 't2 = \n ' + re.sub('[\[\]]', '', np.array_str(t2))
    ts = t1s + '\n' + t2s
    out = open(filename,'w')
    out.write(ts)
    out.close()
    np.set_printoptions(threshold=1e3)
    
def svm_learner(budget):
    accuracy = []
    data = csv_reader('resources/pool.csv')
    testset = csv_reader('resources/testSet.csv')
    true_labels = oracle.read_mat()
    used = {}

    # do nothing about model until reasonable training subset achieved
    [row, col] = data.shape
    preds = np.zeros(row)
    selected = []
    labels = []
    query = 0
    # query each point until get one with label 1
    while 1:
        r = compound.next_compound(data)
        r_str = np.array_str(np.char.mod('%d', r))
        if r_str[1: (len(r_str) - 1)] not in used:
            r_label = oracle.oracle2(r, data)
            query += 1
            used[r_str[1: (len(r_str) - 1)]] = r_label
            selected.append(r.tolist())
            labels.append(r_label)
            accuracy.append(error.generalization_error(preds, true_labels))
            if np.sum(labels) == 1 and len(labels) > 1:
                accuracy.pop()
                break
    x = np.array(selected)
    y = np.array(labels)
    clf = SVC(kernel='linear')
    clf.fit(x, y)
    preds = clf.predict(data)
    accuracy.append(error.generalization_error(preds, true_labels))

    num = 2543 - len(used)
    i = 0
    while i < num and query < budget:
        r = compound.next_compound(data)
        r_str = np.array_str(np.char.mod('%d', r))
        if r_str[1: (len(r_str) - 1)] not in used:
            i += 1
            distance = clf.decision_function(r)
            if np.abs(distance[0]) <= 0.78:
                x = np.vstack([x, r])
                r_label = oracle.oracle2(r, data)
                y = np.hstack([y.tolist(), r_label])
                query += 1
                clf.fit(x, y)
                preds = clf.predict(testset)
                accuracy.append(error.test_error(preds, true_labels))
    plt.plot(accuracy)
    plt.show()
    print f1_score(preds, true_labels[0:250])
    return
 def __str__(self):
     mean_x = np.mean(self.X, 0)
     std_x = np.std(self.X, 0) 
     mean_y = np.mean(self.Y, 0)
     std_y = np.std(self.Y, 0) 
     prec = 4
     desc = ''
     desc += 'E[x] = %s \n'%(np.array_str(mean_x, precision=prec ) )
     desc += 'E[y] = %s \n'%(np.array_str(mean_y, precision=prec ) )
     desc += 'Std[x] = %s \n' %(np.array_str(std_x, precision=prec))
     desc += 'Std[y] = %s \n' %(np.array_str(std_y, precision=prec))
     return desc
Beispiel #30
0
def decompose_reward(a, b):
    # a is ground truth, both are tokenized with padding
    # return shape: (time,)
    reward_hist = np.zeros((FLAGS.max_seq_len), dtype=np.float32)
    reward_gain = np.zeros((FLAGS.max_seq_len), dtype=np.float32)
    if b.size == 0:
        return reward_gain
    for i in range(1, FLAGS.max_seq_len):
        reward = 1 - compute_cer(np.array_str(a[:i]), np.array_str(b[:i]))
        reward_hist[i] = 0 if reward <= 0 else reward
    reward_gain[1:] = np.diff(reward_hist)  # first reward is always 0
    return reward_gain
Beispiel #31
0
  def run(self):
    global bestSeedsUpdate
    global bestSeedsVFile
    global nMaterials
    global delta
    global points
    global target
    global match
    global baseFile
    global maxSeeds

    s.acquire()
    bestMatch = match
    s.release()

    random.seed(options.randomSeed+self.threadID)                                                   # initializes to given seeds
    knownSeedsUpdate = bestSeedsUpdate -1.0                                                         # trigger update of local best seeds
    randReset = True                                                                                # aquire new direction

    myBestSeedsVFile    = StringIO()                                                                # store local copy of best seeds file
    perturbedSeedsVFile = StringIO()                                                                # perturbed best seeds file

#--- still not matching desired bin class ----------------------------------------------------------
    while bestMatch < options.threshold:
      s.acquire()                                                                                   # ensure only one thread acces global data
      if bestSeedsUpdate > knownSeedsUpdate:                                                        # write best fit to virtual file
        knownSeedsUpdate = bestSeedsUpdate
        bestSeedsVFile.seek(0)
        myBestSeedsVFile.close()
        myBestSeedsVFile = StringIO()
        i=0
        myBestSeedsVFile.writelines(bestSeedsVFile.readlines())
      s.release()

      if randReset:                                                                                 # new direction because current one led to worse fit

        randReset = False

        NmoveGrains = random.randrange(1,maxSeeds)
        selectedMs = []
        direction = []
        for i in range(NmoveGrains):
          selectedMs.append(random.randrange(1,nMaterials))

          direction.append((np.random.random()-0.5)*delta)

      perturbedSeedsVFile.close()                                                                   # reset virtual file
      perturbedSeedsVFile = StringIO()
      myBestSeedsVFile.seek(0)

      perturbedSeedsTable = damask.Table.load(myBestSeedsVFile)
      coords = perturbedSeedsTable.get('pos')
      i = 0
      for ms,coord in enumerate(coords):
        if ms in selectedMs:
          newCoords=coord+direction[i]
          newCoords=np.where(newCoords>=1.0,newCoords-1.0,newCoords)                                # ensure that the seeds remain in the box
          newCoords=np.where(newCoords <0.0,newCoords+1.0,newCoords)
          coords[i]=newCoords
          direction[i]*=2.
          i+= 1
      perturbedSeedsTable.set('pos',coords).save(perturbedSeedsVFile,legacy=True)

#--- do tesselation with perturbed seed file ------------------------------------------------------
      perturbedGeom = damask.Grid.from_Voronoi_tessellation(options.grid,np.ones(3),coords)


#--- evaluate current seeds file ------------------------------------------------------------------
      myNmaterials = len(np.unique(perturbedGeom.material))
      currentData = np.bincount(perturbedGeom.material.ravel())[1:]/points
      currentError=[]
      currentHist=[]
      for i in range(nMaterials):                                                             # calculate the deviation in all bins per histogram
        currentHist.append(np.histogram(currentData,bins=target[i]['bins'])[0])
        currentError.append(np.sqrt(np.square(np.array(target[i]['histogram']-currentHist[i])).sum()))

# as long as not all grains are within the range of the target, use the deviation to left and right as error
      if currentError[0]>0.0:
        currentError[0] *=((target[0]['bins'][0]-np.min(currentData))**2.0+
                           (target[0]['bins'][1]-np.max(currentData))**2.0)**0.5                    # norm of deviations by number of usual bin deviation
      s.acquire()                                                                                   # do the evaluation serially
      bestMatch = match
#--- count bin classes with no mismatch ----------------------------------------------------------------------
      myMatch=0
      for i in range(nMaterials):
        if currentError[i] > 0.0: break
        myMatch = i+1

      if myNmaterials == nMaterials:
        for i in range(min(nMaterials,myMatch+options.bins)):
          if currentError[i] > target[i]['error']:                                                  # worse fitting, next try
            randReset = True
            break
          elif currentError[i] < target[i]['error']:                                                # better fit
            bestSeedsUpdate = time.time()                                                           # save time of better fit
            damask.util.croak('Thread {:d}: Better match ({:d} bins, {:6.4f} --> {:6.4f})'\
                                          .format(self.threadID,i+1,target[i]['error'],currentError[i]))
            damask.util.croak('          target: '+np.array_str(target[i]['histogram']))
            damask.util.croak('          best:   '+np.array_str(currentHist[i]))
            currentSeedsName = baseFile+'_'+str(bestSeedsUpdate).replace('.','-')                   # name of new seed file (use time as unique identifier)
            perturbedSeedsVFile.seek(0)
            bestSeedsVFile.close()
            bestSeedsVFile = StringIO()
            sys.stdout.flush()
            with open(currentSeedsName+'.seeds','w') as currentSeedsFile:                           # write to new file
              for line in perturbedSeedsVFile:
                currentSeedsFile.write(line)
                bestSeedsVFile.write(line)
            for j in range(nMaterials):                                                       # save new errors for all bins
              target[j]['error'] = currentError[j]
            if myMatch > match:                                                                     # one or more new bins have no deviation
              damask.util.croak( 'Stage {:d}  cleared'.format(myMatch))
              match=myMatch
              sys.stdout.flush()
            break
          if i == min(nMaterials,myMatch+options.bins)-1:                                     # same quality as before: take it to keep on moving
            bestSeedsUpdate = time.time()
            perturbedSeedsVFile.seek(0)
            bestSeedsVFile.close()
            bestSeedsVFile = StringIO()
            bestSeedsVFile.writelines(perturbedSeedsVFile.readlines())
            for j in range(nMaterials):
              target[j]['error'] = currentError[j]
            randReset = True
      else:                                                                                         #--- not all grains are tessellated
        damask.util.croak('Thread {:d}: Material mismatch ({:d} material indices mapped)'\
                                                         .format(self.threadID,myNmaterials))
        randReset = True


      s.release()
 plt.grid()
 plt.savefig(results_path + "/roc_curve", bbox_inches="tight")
 plt.clf()
 # Plot confusion matrix
 from sklearn.metrics import classification_report, confusion_matrix
 y_valid_inv = ohe.inverse_transform(y_valid)
 y_pred_inv = ohe.inverse_transform(y_pred)
 with open(results_path + "/classification_report.txt", "w") as file:
     file.write(
         classification_report(y_valid_inv,
                               y_pred_inv,
                               target_names=ohe.categories_[0]))
 cm = confusion_matrix(y_valid_inv, y_pred_inv)
 cm_normalized = confusion_matrix(y_valid_inv, y_pred_inv, normalize="true")
 with open(results_path + "/confusion_matrix.txt", "w") as file:
     file.write(np.array_str(cm))
 with open(results_path + "/confusion_matrix_normalized.txt", "w") as file:
     file.write(np.array_str(cm_normalized))
 cm_df = pd.DataFrame(cm, columns=ohe.categories_[0])
 import seaborn as sns
 plt.figure(figsize=(10, 7))
 ax = plt.subplot()
 sns.heatmap(cm, annot=True, fmt="g", ax=ax)
 ax.set_xlabel('Predicted labels')
 ax.set_ylabel('True labels')
 ax.set_title('Confusion Matrix')
 ax.xaxis.set_ticklabels(ohe.categories_[0])
 ax.yaxis.set_ticklabels(ohe.categories_[0])
 plt.setp(ax.get_xticklabels(), rotation=30, horizontalalignment='right')
 plt.setp(ax.get_yticklabels(), rotation=30, horizontalalignment='right')
 plt.savefig(results_path + "/confusion_matrix", bbox_inches="tight")
Beispiel #33
0
# 	print '	<td>%s</td>' % dec
# 	print '	<td>%s</td>' % glon
# 	print '	<td>%s</td>' % glat
# 	print '	<td>%s</td>' % flux1000_3000
# 	print '	<td>%s</td>' % spectral_Index
# 	print '	<td>%s</td>' % spectral_Index_Error
# 	print '	<td>%s</td>' % class1
# 	print '	<td><a href="#">Data Access</a></td>'
# 	print '	<td><a href="FluxHistory.html?Source=%s" onclick="window.open(this.href,\'targetWindow\',\'width=1100px, height=600px\'); return false;">Light Curve</a></td>' % sourceNameCompact
# 	print '</tr>'
# 	if n == 10:
# 		break
# 	else:
# 		n = n + 1

MET_String = numpy.array_str(METs).replace('   ', ', ').replace('\n', ' ')
MJDs_String = repr(MJDs).replace('array(', '').replace('\n',
                                                       '').replace(')', '')

n = 0
for sourceName, fluxHistory, fluxHistoryError in zip(
        SourceName, FluxHistoryNormalized, Unc_Flux_HistoryNormalized):

    sourceNameCompact = sourceName.replace('3FGL ', '3FGL_')

    fluxHistoryErrorAbsolute = []

    for ydatum, error in zip(fluxHistory, fluxHistoryError):

        errorMin = error[0]
        errorMax = error[1]
Beispiel #34
0
    def _processPreliminaryTracks(self, measurement_list,
                                  ais_measurement_list):
        tic = time.time()
        newInitialTargets = []
        radarMeasTime = measurement_list.time
        measurement_array = np.array(measurement_list.measurements,
                                     dtype=np.float32)

        # Predict position
        if self.last_timestamp is not None:
            dt = radarMeasTime - self.last_timestamp
            F = pv.Phi(dt)
            Q = pv.Q(dt)
            for track in self.preliminary_tracks:
                track.predict(F, Q)
        else:
            assert len(self.preliminary_tracks) == 0, "Undefined situation"

        existingMmsiList = [
            t.mmsi for t in self.preliminary_tracks if t.mmsi is not None
        ]
        existingMmsiSet = set(existingMmsiList)
        assert len(existingMmsiList) == len(
            existingMmsiSet), "Duplicate MMSI in preliminaryTracks"
        for measurement in ais_measurement_list:
            if measurement.mmsi in existingMmsiSet:
                continue
            dT = radarMeasTime - measurement.time
            state, covariance = measurement.predict(dT)
            tempTrack = PreliminaryTrack(state, covariance, measurement.mmsi)
            tempTrack.predicted_state = state
            nisList = [
                p.compareSimilarity(tempTrack) for p in self.preliminary_tracks
            ]
            threshold = 1.0
            if not any([s <= threshold for s in nisList]):
                self.preliminary_tracks.append(tempTrack)
            else:
                log.debug(
                    "Discarded new AIS preliminaryTrack because it was to similar"
                    + str([e for e in nisList if e <= threshold]) +
                    str(tempTrack))

        log.info("_processPreliminaryTracks " +
                 str(len(self.preliminary_tracks)))

        predicted_states = np.array([
            track.get_predicted_state_and_clear()
            for track in self.preliminary_tracks
        ],
                                    ndmin=2,
                                    dtype=np.float32)
        # Check for something to work on
        n1 = len(self.preliminary_tracks)
        n2 = measurement_array.shape[0]
        n3 = measurement_array.size
        if n1 == 0:
            return np.arange(n2).tolist(), newInitialTargets
        if len(ais_measurement_list) == 0 and (n2 == 0 or n3 == 0):
            return np.arange(n2).tolist(), newInitialTargets

        # Calculate delta matrix
        delta_matrix = np.ones((n1, n2), dtype=np.float32) * np.Inf
        for i, predicted_state in enumerate(predicted_states):
            predicted_measurement = self.C.dot(predicted_state)
            delta_vector = measurement_array - predicted_measurement
            distance_vector = np.linalg.norm(delta_vector, axis=1)
            P_bar = self.preliminary_tracks[i].covariance
            S = self.C.dot(P_bar).dot(self.C.T) + self.R
            S_inv = np.linalg.inv(S)
            K = P_bar.dot(self.C.T).dot(S_inv)
            self.preliminary_tracks[i].K = K
            nis_vector = np.sum(np.matmul(delta_vector, S_inv) * delta_vector,
                                axis=1)
            inside_gate_vector = nis_vector <= self.gamma
            delta_matrix[
                i, inside_gate_vector] = distance_vector[inside_gate_vector]

        # Assign measurements
        log.debug("\n" + np.array_str(delta_matrix, max_line_width=120))
        assignments = _solve_global_nearest_neighbour(delta_matrix)

        # Update tracks
        for track_index, meas_index in assignments:
            P_bar = self.preliminary_tracks[track_index].covariance
            K = self.preliminary_tracks[track_index].K
            delta_vector = measurement_array[meas_index] - self.C.dot(
                predicted_states[track_index])
            filtered_state = predicted_states[track_index] + K.dot(
                delta_vector)
            P_hat = P_bar - K.dot(self.C).dot(P_bar)
            self.preliminary_tracks[track_index].state = filtered_state
            self.preliminary_tracks[track_index].covariance = P_hat
            self.preliminary_tracks[track_index].m += 1
            self.preliminary_tracks[track_index].measurement_index = meas_index

        # Add dummy measurement to un-assigned tracks, and increase covariance
        assigned_track_indices = [assignment[0] for assignment in assignments]
        unassigned_track_indices = [
            track_index for track_index in range(len(self.preliminary_tracks))
            if track_index not in assigned_track_indices
        ]
        for track_index in unassigned_track_indices:
            self.preliminary_tracks[track_index].state = predicted_states[
                track_index]

        # Increase all N
        for track in self.preliminary_tracks:
            track.n += 1

        log.debug("Preliminary tracks " + self.getPreliminaryTracksString())

        #Evaluate destiny
        removeIndices = []
        for track_index, track in enumerate(self.preliminary_tracks):
            track_status = track.mn_analysis(self.M, self.N)
            track_speed = track.get_speed()
            if track_speed > self.v_max * 1.5:
                log.warning("Removing TOO FAST track ({0:6.1f} m/s) i={1:}".
                            format(track_speed, track_index) + "\n" +
                            repr(track))
                removeIndices.append(track_index)
            elif track_status == DEAD:
                # log.debug("Removing DEAD track " + str(track_index))
                removeIndices.append(track_index)
            elif track_status == CONFIRMED:
                log.debug("Removing CONFIRMED track " + str(track_index))
                new_target = Target(
                    radarMeasTime,
                    None,
                    np.array(track.state),
                    track.covariance,
                    measurementNumber=track.measurement_index + 1,
                    measurement=measurement_array[track.measurement_index])
                log.debug("Spawning new (initial) Target: " + str(new_target) +
                          " Covariance:\n" + np.array_str(track.covariance))
                newInitialTargets.append(new_target)
                removeIndices.append(track_index)

        #Remove dead preliminaryTracks
        for i in reversed(removeIndices):
            self.preliminary_tracks.pop(i)
        if removeIndices:
            log.debug(self.getPreliminaryTracksString())

        #Return unused radar measurement indices
        used_radar_indices = [assignment[1] for assignment in assignments]
        unused_radar_indices = [
            index for index in np.arange(n2) if index not in used_radar_indices
        ]

        toc = time.time() - tic
        log.debug("_processPreliminaryTracks runtime: {:.1f}ms".format(toc *
                                                                       1000))
        return unused_radar_indices, newInitialTargets
Beispiel #35
0
    def fit(self,
            source,
            destination,
            order=4,
            reg=1e-5,
            center=True,
            match='oct5',
            verbose=None):
        """Fit the warp from source points to destination points.

        Parameters
        ----------
        source : array, shape (n_src, 3)
            The source points.
        destination : array, shape (n_dest, 3)
            The destination points.
        order : int
            Order of the spherical harmonic fit.
        reg : float
            Regularization of the TPS warp.
        center : bool
            If True, center the points by fitting a sphere to points
            that are in a reasonable region for head digitization.
        match : str
            The uniformly-spaced points to match on the two surfaces.
            Can be "ico#" or "oct#" where "#" is an integer.
            The default is "oct5".
        verbose : bool, str, int, or None
            If not None, override default verbose level (see
            :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
            for more).

        Returns
        -------
        inst : instance of SphericalSurfaceWarp
            The warping object (for chaining).
        """
        from .bem import _fit_sphere
        from .source_space import _check_spacing
        match_rr = _check_spacing(match, verbose=False)[2]['rr']
        logger.info('Computing TPS warp')
        src_center = dest_center = np.zeros(3)
        if center:
            logger.info('    Centering data')
            hsp = np.array(
                [p for p in source if not (p[2] < -1e-6 and p[1] > 1e-6)])
            src_center = _fit_sphere(hsp, disp=False)[1]
            source = source - src_center
            hsp = np.array(
                [p for p in destination if not (p[2] < 0 and p[1] > 0)])
            dest_center = _fit_sphere(hsp, disp=False)[1]
            destination = destination - dest_center
            logger.info('    Using centers %s -> %s' % (np.array_str(
                src_center, None, 3), np.array_str(dest_center, None, 3)))
        self._fit_params = dict(n_src=len(source),
                                n_dest=len(destination),
                                match=match,
                                n_match=len(match_rr),
                                order=order,
                                reg=reg)
        assert source.shape[1] == destination.shape[1] == 3
        self._destination = destination.copy()
        # 1. Compute spherical coordinates of source and destination points
        logger.info('    Converting to spherical coordinates')
        src_rad_az_pol = _cart_to_sph(source).T
        dest_rad_az_pol = _cart_to_sph(destination).T
        match_rad_az_pol = _cart_to_sph(match_rr).T
        del match_rr
        # 2. Compute spherical harmonic coefficients for all points
        logger.info('    Computing spherical harmonic approximation with '
                    'order %s' % order)
        src_sph = _compute_sph_harm(order, *src_rad_az_pol[1:])
        dest_sph = _compute_sph_harm(order, *dest_rad_az_pol[1:])
        match_sph = _compute_sph_harm(order, *match_rad_az_pol[1:])
        # 3. Fit spherical harmonics to both surfaces to smooth them
        src_coeffs = linalg.lstsq(src_sph, src_rad_az_pol[0])[0]
        dest_coeffs = linalg.lstsq(dest_sph, dest_rad_az_pol[0])[0]
        # 4. Smooth both surfaces using these coefficients, and evaluate at
        #     the "shape" points
        logger.info('    Matching %d points (%s) on smoothed surfaces' %
                    (len(match_sph), match))
        src_rad_az_pol = match_rad_az_pol.copy()
        src_rad_az_pol[0] = np.abs(np.dot(match_sph, src_coeffs))
        dest_rad_az_pol = match_rad_az_pol.copy()
        dest_rad_az_pol[0] = np.abs(np.dot(match_sph, dest_coeffs))
        # 5. Convert matched points to Cartesion coordinates and put back
        source = _sph_to_cart(src_rad_az_pol.T)
        source += src_center
        destination = _sph_to_cart(dest_rad_az_pol.T)
        destination += dest_center
        # 6. Compute TPS warp of matched points from smoothed surfaces
        self._warp = _TPSWarp().fit(source, destination, reg)
        self._matched = np.array([source, destination])
        logger.info('[done]')
        return self
Beispiel #36
0
    def run(self):
        """
        Run the optimization
        @return: Nothing
        """
        # self.it = 0
        # n = len(self.circuit.buses)
        # m = len(self.circuit.branches)
        # self.xlow = zeros(n)  # lower bounds
        # self.xup = ones(n)  # upper bounds
        # self.progress_signal.emit(0.0)
        # self.progress_text.emit('Running stochastic voltage collapse...')
        # self.results = MonteCarloResults(n, m, self.max_eval)

        self.problem = VoltageOptimizationProblem(
            self.circuit,
            self.options,
            self.max_iter,
            callback=self.progress_signal.emit)

        # # (1) Optimization problem
        # # print(data.info)
        #
        # # (2) Experimental design
        # # Use a symmetric Latin hypercube with 2d + 1 samples
        # exp_des = SymmetricLatinHypercube(dim=self.problem.dim, npts=2 * self.problem.dim + 1)
        #
        # # (3) Surrogate model
        # # Use a cubic RBF interpolant with a linear tail
        # surrogate = RBFInterpolant(kernel=CubicKernel, tail=LinearTail, maxp=self.max_eval)
        #
        # # (4) Adaptive sampling
        # # Use DYCORS with 100d candidate points
        # adapt_samp = CandidateDYCORS(data=self, numcand=100 * self.dim)
        #
        # # Use the serial controller (uses only one thread)
        # controller = SerialController(self.objfunction)
        #
        # # (5) Use the sychronous strategy without non-bound constraints
        # strategy = SyncStrategyNoConstraints(worker_id=0,
        #                                      data=self,
        #                                      maxeval=self.max_eval,
        #                                      nsamples=1,
        #                                      exp_design=exp_des,
        #                                      response_surface=surrogate,
        #                                      sampling_method=adapt_samp)
        #
        # controller.strategy = strategy
        #
        # # Run the optimization strategy
        # result = controller.run()
        #
        # # Print the final result
        # print('Best value found: {0}'.format(result.value))
        # print('Best solution found: {0}'.format(np.array_str(result.params[0], max_line_width=np.inf, precision=5,
        #                                                      suppress_small=True)))

        num_threads = 4

        surrogate_model = GPRegressor(dim=self.problem.dim)
        sampler = SymmetricLatinHypercube(dim=self.problem.dim,
                                          num_pts=2 * (self.problem.dim + 1))

        # Create a strategy and a controller
        controller = ThreadController()
        controller.strategy = SRBFStrategy(max_evals=self.max_iter,
                                           opt_prob=self.problem,
                                           exp_design=sampler,
                                           surrogate=surrogate_model,
                                           asynchronous=True,
                                           batch_size=num_threads)

        print("Number of threads: {}".format(num_threads))
        print("Maximum number of evaluations: {}".format(self.max_iter))
        print("Strategy: {}".format(controller.strategy.__class__.__name__))
        print("Experimental design: {}".format(sampler.__class__.__name__))
        print("Surrogate: {}".format(surrogate_model.__class__.__name__))

        # Launch the threads and give them access to the objective function
        for _ in range(num_threads):
            worker = BasicWorkerThread(controller, self.problem.eval)
            controller.launch_worker(worker)

        # Run the optimization strategy
        result = controller.run()

        print('Best value found: {0}'.format(result.value))
        print('Best solution found: {0}\n'.format(
            np.array_str(result.params[0],
                         max_line_width=np.inf,
                         precision=4,
                         suppress_small=True)))

        self.solution = result.params[0]

        # Extract function values from the controller
        self.optimization_values = np.array(
            [o.value for o in controller.fevals])

        # send the finnish signal
        self.progress_signal.emit(0.0)
        self.progress_text.emit('Done!')
        self.done_signal.emit()
Beispiel #37
0
def main():
    device = 'cuda'  # if torch.cuda.is_available() else 'cpu'
    start_epoch = 0  # start from epoch 0 or last checkpoint epoch

    # Data
    logger.info('==> Preparing data..')
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(root='./data',
                                            train=True,
                                            download=True,
                                            transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=2)

    testset = torchvision.datasets.CIFAR10(root='./data',
                                           train=False,
                                           download=True,
                                           transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=100,
                                             shuffle=False,
                                             num_workers=2)

    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')
    # Model
    logger.info('==> Building model..')
    net = getattr(models, args.net_type)(net_arch)
    # net = VGG('VGG19')
    # net = ResNet18()
    # net = CifarResNetBasic([1,1,1])
    # net = PreActResNet18()
    # net = GoogLeNet()
    # net = DenseNet121()
    # net = ResNeXt29_2x64d()
    # net = MobileNet()
    # net = MobileNetV2()
    # net = DPN92()
    # net = ShuffleNetG2()
    # net = SENet18()

    net = net.to(device)
    if device == 'cuda':
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True

    if args.resume:
        # Load checkpoint.
        logger.info('==> Resuming from checkpoint..')
        assert os.path.isdir(
            'checkpoint'), 'Error: no checkpoint directory found!'
        checkpoint = torch.load('./checkpoint/ckpt.t7')
        net.load_state_dict(checkpoint['net'])
        best_acc = checkpoint['acc']
        start_epoch = checkpoint['epoch']

    criterion = nn.CrossEntropyLoss()
    optimizer = get_optimizer(net)

    curves = np.zeros((args.epochs, 5))
    for epoch in range(start_epoch, start_epoch + args.epochs):
        if epoch == args.epochs / 2 or epoch == args.epochs * 3 / 4:
            logger.info('======> decaying learning rate')
            decay_learning_rate(optimizer)

        curves[epoch, 0] = epoch
        curves[epoch, 1], curves[epoch, 2] = train(epoch, net, trainloader,
                                                   optimizer, criterion)
        curves[epoch, 3], curves[epoch, 4] = test(epoch, net, testloader,
                                                  criterion)
        if epoch % 5 == 0:
            plot_curves(curves[:epoch + 1, :], save_path)

    plot_curves(curves, save_path)
    logger.info('curves: \n {}'.format(np.array_str(curves)))
    np.savetxt(os.path.join(save_path, 'curves.dat'), curves)
        #if rank==0:
        #    print(params["conv1.bias"].grad)
        #print('Success')

        # gather the state dictionaries from all the processes in order to the SGD in the root process
        gradients_list = comm.gather(gradients, root=0)
        print(gradients_list[0]["conv1.bias"]) if rank == 0 else ""
        with open(
                os.path.join(os.environ["HOME"],
                             "process_" + str(rank) + ".txt"), "a") as f:
            f.write("process " + str(rank) + "\n")
            f.write("======================\n")
            if rank == 0:
                f.write("conv1.bias before update\n\n")
                f.write(np.array_str(params["conv1.bias"].data.numpy()))
                f.write("\n")
            f.write("======================\n")
            f.write("conv1.bias gradient\n\n")
            f.write(np.array_str(params["conv1.bias"].grad.data.numpy()))
            f.write("\n======================\n")

        if rank == 0:
            #print(params_list[0]["conv1.bias"].grad)
            #print(len(params_list[0]))
            for gradient in gradients_list:
                print(len(gradient))
                for key in gradient.keys():
                    print(key)
                    #print(param[key].grad) if key=="conv1.bias" else ""
                    #print(param[key].grad)
Beispiel #39
0
def Classify(cls, clf, spmd, md):

    #Start moment
    Start_moment = time.time()
    title = 'Classifying ' + cls + ' using W2Vec ' + clf + '-' + spmd + '_' + md
    print(title)

    #Loading model
    print('Loading model', spmd, md)
    model = KeyedVectors.load_word2vec_format(Model_filename(spmd, md),
                                              unicode_errors="ignore")
    model.init_sims(replace=True)

    #Creating the K-fold cross validator
    K_fold = KFold(n_splits=10, shuffle=True)

    # Labels
    test_labels = np.array([], 'int32')
    test_pred = np.array([], 'int32')

    # Confusion Matrix
    confusion = []
    if (cls == 'age' or cls == 'relig'):
        confusion = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
    elif (cls == 'gender' or cls == 'ti'):
        confusion = np.array([[0, 0], [0, 0]])
    else:
        raise NameError('Confusion Matrix Unavailable')

    # The test
    print('Running .... =)')
    for train_indices, test_indices in K_fold.split(I):

        X_train = word_averaging_list(model, train_indices)
        Y_train = np.array(Tags(train_indices))

        X_test = word_averaging_list(model, test_indices)
        Y_test = np.array(Tags(test_indices))
        test_labels = np.append(test_labels, Y_test)

        classifier = Create_Classifier(clf)
        Train_Classifier(classifier, X_train, Y_train)

        pred = classifier.predict(X_test)
        test_pred = np.append(test_pred, pred)
        confusion += confusion_matrix(Y_test, pred)

    report = classification_report(test_labels,
                                   test_pred,
                                   target_names=age_train.target_names)

    print(report)
    print("Confusion matrix:")
    print(confusion)
    Finish_moment = time.time()
    tm = "It took " + str((Finish_moment - Start_moment)) + " seconds"
    print(tm)

    f = open(Output_string(cls, spmd, md, clf), 'w+')

    f.write(title + '\n \n')
    f.write(report + '\n \n')
    f.write("Confusion Matrix: \n")
    f.write(np.array_str(confusion) + '\n \n')
    f.write(tm)
    f.close()

    gc.collect()
Beispiel #40
0
 def test_array_str(self):
     a = testing.shaped_arange((2, 3, 4), cupy)
     b = testing.shaped_arange((2, 3, 4), numpy)
     assert cupy.array_str(a) == numpy.array_str(b)
def compare(expected, computed, label=None, *, quiet=False, return_message=False):
    """Returns True if two integers, strings, booleans, or integer arrays are element-wise equal.

    Parameters
    ----------
    expected : int, bool, str or int array-like
        Reference value against which `computed` is compared.
    computed : int, bool, str or int array-like
        Input value to compare against `expected`.
    label : str, optional
        Label for passed and error messages. Defaults to calling function name.

    Returns
    -------
    allclose : bool
        Returns True if `expected` and `computed` are equal; False otherwise.
    message : str, optional
        When return_message=True, also return passed or error message.

    Notes
    -----
    Akin to np.array_equal.

    """
    label = label or sys._getframe().f_back.f_code.co_name
    pass_message = f'\t{label:.<66}PASSED'

    try:
        xptd, cptd = np.array(expected), np.array(computed)
    except Exception:
        return _handle_return(False, label, f"""\t{label}: inputs not cast-able to ndarray.""", return_message, quiet)

    if xptd.shape != cptd.shape:
        return _handle_return(False, label,
                              f"""\t{label}: computed shape ({cptd.shape}) does not match ({xptd.shape}).""",
                              return_message, quiet)

    isclose = np.asarray(xptd == cptd)
    allclose = bool(isclose.all())

    if allclose:
        message = pass_message

    else:
        if xptd.shape == ():
            xptd_str = f'{xptd}'
        else:
            xptd_str = np.array_str(xptd, max_line_width=120, precision=12, suppress_small=True)
            xptd_str = '\n'.join('    ' + ln for ln in xptd_str.splitlines())

        if cptd.shape == ():
            cptd_str = f'{cptd}'
        else:
            cptd_str = np.array_str(cptd, max_line_width=120, precision=12, suppress_small=True)
            cptd_str = '\n'.join('    ' + ln for ln in cptd_str.splitlines())

        try:
            diff = cptd - xptd
        except TypeError:
            diff_str = '(n/a)'
        else:
            if xptd.shape == ():
                diff_str = f'{diff}'
            else:
                diff_str = np.array_str(diff, max_line_width=120, precision=12, suppress_small=False)
                diff_str = '\n'.join('    ' + ln for ln in diff_str.splitlines())

        if xptd.shape == ():
            message = """\t{}: computed value ({}) does not match ({}) by difference ({}).""".format(
                label, cptd_str, xptd_str, diff_str)
        else:
            message = """\t{}: computed value does not match.\n  Expected:\n{}\n  Observed:\n{}\n  Difference:\n{}\n""".format(
                label, xptd_str, cptd_str, diff_str)

    return _handle_return(allclose, label, message, return_message, quiet)
Beispiel #42
0
    def xcorr_circular(self, other):
        """Circular cross correlation. The difference between circular cross
        correlation and regular correlation is that with circular correlation
        we assume that the signal is repeating.
        
        The input data "other" has to be the same size as a full sequence.
        
        Returns the (normalised) impulse response of length L.
        """

        #    Example:
        #
        #    _MLS_base(N=3, taps=(3, 2))
        #
        #    reference  : [[ 1. -1. -1. -1.  1.  1. -1.]]
        #
        #    <do cross-correlation>  <-- in this example this is the auto correlation
        #
        #    xcorr      : [[-1.  2.  1. -2. -3.  0.  7.  0. -3. -2.  1.  2. -1.]]
        #    x1 (view)  :                             [[ 0. -3. -2.  1.  2. -1.]]
        #    x2 (view)  : [[-1.  2.  1. -2. -3.  0.]]
        #
        #    <assume sequence is circular>
        #
        #    x1 (view)  : [[ 0. -3. -2.  1.  2. -1.]]
        #    x2 (view)  : [[-1.  2.  1. -2. -3.  0.]]
        #    x2=x1+x2   :   -1. -1. -1. -1. -1. -1.
        #
        #    because we use a "view" of the array
        #
        #    xcorr      : [[-1.  2.  1. -2. -3.  0.  7. -1. -1. -1. -1. -1. -1.]]
        #
        #    norm       :                         [[ 7. -1. -1. -1. -1. -1. -1.]]
        #
        #    <normalise by L>
        #
        #    norm/L     : [[ 1.     -0.1429 -0.1429 -0.1429 -0.1429 -0.1429 -0.1429]]

        ref = self.get_full_sequence(repeats=1)

        # Correlation and convolution are related. Correlation in the time domain is *very*
        # slow for long sequences. Convolution in the frequecy domain is much faster. We can
        # use the convolution method if we "undo" the flip of the input signal. This is
        # equivalent to the correlation method. There might be some small (insignificant)
        # rounding errors but for a large array this should not be noticable.
        #
        ### Flip comments to verify that correlation and convolution (with input flip) is
        ### the same. Be careful, long sequences are slow to calculate using the
        ### correlation method.
        #xcorr   = scipy.signal.correlate(ref, other)
        xcorr = scipy.signal.fftconvolve(np.flipud(ref), other)

        self._logger.debug("ref: %s" % np.array_str(
            ref.T, max_line_width=200, precision=4, suppress_small=True))
        self._logger.debug("xc : %s" % np.array_str(
            xcorr.T, max_line_width=200, precision=4, suppress_small=True))

        del ref

        # slicing creates views which are cheap (points to the same array)
        x1 = xcorr[self.L:]  # right half (end)
        x2 = xcorr[:self.L - 1]  # left halt (start)
        self._logger.debug("x1 : %s" % np.array_str(
            x1.T, max_line_width=200, precision=4, suppress_small=True))
        self._logger.debug("x2 : %s" % np.array_str(
            x2.T, max_line_width=200, precision=4, suppress_small=True))

        x1[:] = x1 + x2  # assume circular sequence
        self._logger.debug("xc : %s" % np.array_str(
            xcorr.T, max_line_width=200, precision=4, suppress_small=True))

        norm = xcorr[self.L - 1:]  # extract "impulse" + tail (right half)
        self._logger.debug("nrm: %s" % np.array_str(
            norm.T, max_line_width=200, precision=4, suppress_small=True))

        norm[:] = norm / self.L  # normalise so that max <= 1.0
        self._logger.debug("nrm: %s" % np.array_str(
            norm.T, max_line_width=200, precision=4, suppress_small=True))

        return norm
for k in range(np.shape(Z)[0]):
    y = Z[k][0]
    z = labels[k]
    y = np.array([y, z])
    DF1 = vstack((DF1, y))
DF1 = np.delete(DF1, 0, 0)
df1 = pd.DataFrame(DF1,
                   index=range(np.shape(Z)[0]),
                   columns=np.array(['TRIP_ID', 'CLUSTER_ID']))
df1.to_csv("clustering_result3.csv", index=None)
print("in xong file clustering_result3")

DF2 = np.zeros((1, 2))
for k in range(np.shape(cluster_centers)[0]):
    y = cluster_centers[k]
    y = np.array_str(y)
    y = np.array([k, y])
    DF2 = vstack((DF2, y))
DF2 = np.delete(DF2, 0, 0)
df2 = pd.DataFrame(DF2,
                   index=range(np.shape(cluster_centers)[0]),
                   columns=np.array(['CLUSTER_ID', 'COORDINATES']))
df2.to_csv("cluster_coordinates3.csv", index=None)
print("in xong file cluster_coordinates3")

import matplotlib.pyplot as plt
from itertools import cycle

plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
def f(x):
    w = tf.get_variable('w', [NUM_FEATURES, 1], tf.float32,
                        tf.random_normal_initializer())
    b = tf.get_variable('b', [], tf.float32, tf.zeros_initializer())

    return tf.squeeze(tf.matmul(x, w) + b)


x = tf.placeholder(tf.float32, [BATCH_SIZE, NUM_FEATURES])
y = tf.placeholder(tf.float32, [BATCH_SIZE])
y_hat = f(x)

loss = tf.reduce_mean(tf.pow(y_hat - y, 2))
optim = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(loss)
init = tf.global_variables_initializer()

sess = tf.Session()
sess.run(init)

data = Data()

for _ in tqdm(range(0, NUM_BATCHES)):
    x_np, y_np = data.get_batch()
    loss_np, _ = sess.run([loss, optim], feed_dict={x: x_np, y: y_np})

print("Parameter estimates:")
for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
    print(var.name.rstrip(":0"),
          np.array_str(np.array(sess.run(var)).flatten(), precision=3))
Beispiel #45
0
 def np_array_str(arr,precision):
   return np.array_str(arr, precision=precision, suppress_small = True, max_line_width = 200)
Beispiel #46
0
def onp_array_str(x):
    return onp.array_str(x)
    def learn(self, demo):
        config = self.config
        start_time = time.time()
        numeptotal = 0

        # Set up for training discrimiator
        print "Loading data ..."
        imgs_d, auxs_d, actions_d = demo["imgs"], demo["auxs"], demo["actions"]
        numdetotal = imgs_d.shape[0]
        idx_d = np.arange(numdetotal)
        np.random.shuffle(idx_d)

        imgs_d = imgs_d[idx_d]
        auxs_d = auxs_d[idx_d]
        actions_d = actions_d[idx_d]
        print "Resizing img for demo ..."
        imgs_reshaped_d = []
        for i in xrange(numdetotal):
            imgs_reshaped_d.append(
                np.expand_dims(cv2.resize(imgs_d[i],
                                          (self.img_dim[0], self.img_dim[1])),
                               axis=0))
        imgs_d = np.concatenate(imgs_reshaped_d, axis=0).astype(np.float32)
        imgs_d = (imgs_d - 128.) / 128.
        print "Shape of resized demo images:", imgs_d.shape

        for i in xrange(1, config.n_iter):

            # Generating paths.
            if i == 1:
                paths_per_collect = 30
                print("batchsize", config.batch_size)
            else:
                paths_per_collect = 10
            rollouts = rollout_contin(self.env, self, self.feat_extractor,
                                      self.feat_dim, self.aux_dim,
                                      self.encode_dim, config.max_step_limit,
                                      config.pre_step, paths_per_collect,
                                      self.pre_actions, self.discriminate,
                                      self.posterior_target)

            for path in rollouts:
                self.buffer.add(path)
            print "Buffer count:", self.buffer.count()

            paths = self.buffer.get_sample(config.sample_size)

            print "Calculating actions ..."
            for path in paths:
                path["mus"] = self.sess.run(
                    self.action_dist_mu, {
                        self.feats: path["feats"],
                        self.auxs: path["auxs"],
                        self.encodes: path["encodes"]
                    })

            mus_n = np.concatenate([path["mus"] for path in paths])
            logstds_n = np.concatenate([path["logstds"] for path in paths])
            feats_n = np.concatenate([path["feats"] for path in paths])
            auxs_n = np.concatenate([path["auxs"] for path in paths])
            encodes_n = np.concatenate([path["encodes"] for path in paths])
            actions_n = np.concatenate([path["actions"] for path in paths])
            imgs_n = np.concatenate([path["imgs"] for path in paths])

            #print("DEBUGPRINTTTTTTTTTTTT",tf.size(actions_n))
            print "Epoch:", i, "Total sampled data points:", feats_n.shape[0]

            # Train discriminator
            numnototal = feats_n.shape[0]
            batch_size = config.batch_size
            start_d = self.demo_idx
            start_n = 0
            if i <= 5:
                d_iter = 120 - i * 20
            else:
                d_iter = 20
            for k in xrange(d_iter):
                loss = self.discriminator.train_on_batch([
                    imgs_n[start_n:start_n + batch_size],
                    auxs_n[start_n:start_n + batch_size],
                    actions_n[start_n:start_n + batch_size],
                    imgs_d[start_d:start_d + batch_size],
                    auxs_d[start_d:start_d + batch_size],
                    actions_d[start_d:start_d + batch_size]
                ], np.ones(batch_size))
                # print self.discriminator.summary()
                for l in self.discriminator.layers:
                    weights = l.get_weights()
                    weights = [
                        np.clip(w, config.clamp_lower, config.clamp_upper)
                        for w in weights
                    ]
                    l.set_weights(weights)

                start_d = self.demo_idx = self.demo_idx + batch_size
                start_n = start_n + batch_size

                if start_d + batch_size >= numdetotal:
                    start_d = self.demo_idx = (start_d +
                                               batch_size) % numdetotal
                if start_n + batch_size >= numnototal:
                    start_n = (start_n + batch_size) % numnototal

                print "Discriminator step:", k, "loss:", loss

            idx = np.arange(numnototal)
            np.random.shuffle(idx)
            train_val_ratio = 0.7
            # Training data for posterior
            numno_train = int(numnototal * train_val_ratio)
            imgs_train = imgs_n[idx][:numno_train]
            auxs_train = auxs_n[idx][:numno_train]
            actions_train = actions_n[idx][:numno_train]
            encodes_train = encodes_n[idx][:numno_train]
            # Validation data for posterior
            imgs_val = imgs_n[idx][numno_train:]
            auxs_val = auxs_n[idx][numno_train:]
            actions_val = actions_n[idx][numno_train:]
            encodes_val = encodes_n[idx][numno_train:]

            start_n = 0
            for j in xrange(config.p_iter):
                loss = self.posterior.train_on_batch([
                    imgs_train[start_n:start_n + batch_size],
                    auxs_train[start_n:start_n + batch_size],
                    actions_train[start_n:start_n + batch_size]
                ], encodes_train[start_n:start_n + batch_size])
                start_n += batch_size
                if start_n + batch_size >= numno_train:
                    start_n = (start_n + batch_size) % numno_train

                posterior_weights = self.posterior.get_weights()
                posterior_target_weights = self.posterior_target.get_weights()
                for k in xrange(len(posterior_weights)):
                    posterior_target_weights[k] = 0.5 * posterior_weights[k] +\
                            0.5 * posterior_target_weights[k]
                self.posterior_target.set_weights(posterior_target_weights)

                output_p = self.posterior_target.predict(
                    [imgs_val, auxs_val, actions_val])
                val_loss = -np.average(
                    np.sum(np.log(output_p) * encodes_val, axis=1))
                print "Posterior step:", j, "loss:", loss, val_loss

            # Computing returns and estimating advantage function.
            path_idx = 0
            for path in paths:
                file_path = "/home/Apurba/Documents/RL/InfoGAIL/wgail_info_1/weights_fourth/iter_%d_path_%d.txt" % (
                    i, path_idx)
                f = open(file_path, "w")
                path["baselines"] = self.baseline.predict(path)
                output_d = self.discriminate.predict(
                    [path["imgs"], path["auxs"], path["actions"]])
                output_p = self.posterior_target.predict(
                    [path["imgs"], path["auxs"], path["actions"]])
                path["rewards"] = np.ones(path["raws"].shape[0]) * 1.2 + \
                        output_d.flatten() * 0.2 + \
                        np.sum(np.log(output_p) * path["encodes"], axis=1)

                path_baselines = np.append(
                    path["baselines"], 0 if path["baselines"].shape[0] == 100
                    else path["baselines"][-1])
                deltas = path["rewards"] + config.gamma * path_baselines[1:] -\
                        path_baselines[:-1]

                path["advants"] = discount(deltas, config.gamma * config.lam)
                path["returns"] = discount(path["rewards"], config.gamma)

                f.write("Baseline:\n" + np.array_str(path_baselines) + "\n")
                f.write("Returns:\n" + np.array_str(path["returns"]) + "\n")
                f.write("Advants:\n" + np.array_str(path["advants"]) + "\n")
                f.write("Mus:\n" + np.array_str(path["mus"]) + "\n")
                f.write("Actions:\n" + np.array_str(path["actions"]) + "\n")
                f.write("Logstds:\n" + np.array_str(path["logstds"]) + "\n")
                path_idx += 1

            # Standardize the advantage function to have mean=0 and std=1
            advants_n = np.concatenate([path["advants"] for path in paths])
            # advants_n -= advants_n.mean()
            advants_n /= (advants_n.std() + 1e-8)

            # Computing baseline function for next iter.
            self.baseline.fit(paths)

            feed = {
                self.feats: feats_n,
                self.auxs: auxs_n,
                self.encodes: encodes_n,
                self.actions: actions_n,
                self.advants: advants_n,
                self.action_dist_logstd: logstds_n,
                self.oldaction_dist_mu: mus_n,
                self.oldaction_dist_logstd: logstds_n
            }

            thprev = self.gf()

            def fisher_vector_product(p):
                feed[self.flat_tangent] = p
                return self.sess.run(self.fvp, feed) + p * config.cg_damping

            g = self.sess.run(self.pg, feed_dict=feed)
            stepdir = conjugate_gradient(fisher_vector_product, -g)
            shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
            assert shs > 0

            lm = np.sqrt(shs / config.max_kl)
            fullstep = stepdir / lm
            neggdotstepdir = -g.dot(stepdir)

            def loss(th):
                self.sff(th)
                return self.sess.run(self.losses[0], feed_dict=feed)

            theta = linesearch(loss, thprev, fullstep, neggdotstepdir / lm)
            self.sff(theta)

            surrafter, kloldnew, entropy = self.sess.run(self.losses,
                                                         feed_dict=feed)

            episoderewards = np.array(
                [path["rewards"].sum() for path in paths])
            stats = {}
            numeptotal += len(episoderewards)
            stats["Total number of episodes"] = numeptotal
            stats["Average sum of rewards per episode"] = episoderewards.mean()
            stats["Entropy"] = entropy
            stats["Time elapsed"] = "%.2f mins" % (
                (time.time() - start_time) / 60.0)
            stats["KL between old and new distribution"] = kloldnew
            stats["Surrogate loss"] = surrafter
            print("\n********** Iteration {} **********".format(i))
            for k, v in stats.iteritems():
                print(k + ": " + " " * (40 - len(k)) + str(v))
            if entropy != entropy:
                exit(-1)

            param_dir = "/home/mathew/Documents/RL/param_turn_fourth/"
            print("Now we save model")
            self.generator.save_weights(param_dir +
                                        "generator_model_%d.h5" % i,
                                        overwrite=True)
            with open(param_dir + "generator_model_%d.json" % i,
                      "w") as outfile:
                json.dump(self.generator.to_json(), outfile)

            self.discriminator.save_weights(param_dir +
                                            "discriminator_model_%d.h5" % i,
                                            overwrite=True)
            with open(param_dir + "discriminator_model_%d.json" % i,
                      "w") as outfile:
                json.dump(self.discriminator.to_json(), outfile)

            self.baseline.model.save_weights(param_dir +
                                             "baseline_model_%d.h5" % i,
                                             overwrite=True)
            with open(param_dir + "baseline_model_%d.json" % i,
                      "w") as outfile:
                json.dump(self.baseline.model.to_json(), outfile)

            self.posterior.save_weights(param_dir +
                                        "posterior_model_%d.h5" % i,
                                        overwrite=True)
            with open(param_dir + "posterior_model_%d.json" % i,
                      "w") as outfile:
                json.dump(self.posterior.to_json(), outfile)

            self.posterior_target.save_weights(
                param_dir + "posterior_target_model_%d.h5" % i, overwrite=True)
            with open(param_dir + "posterior_target_model_%d.json" % i,
                      "w") as outfile:
                json.dump(self.posterior_target.to_json(), outfile)
Beispiel #48
0
        for i in range(len(cs)):
            if rand > cs[i]:
                action = i+2
            else:
                break
        color = (sm[action-1], 1-sm[action-1], 1)


        gg_action = env.gg_action(action) # action-höz tartozó vektor lekérése
        v_new, pos_new, reward, end = env.step(gg_action, v, pos, draw, color) # lépés
        # a háló által kiválasztott gyorsulással

        if draw:
            if text is not None: # adatok kiírása
                text.set_visible(False)
            text = plt.text(0, 0, "Q=" + np.array_str(qs) + "\nsm=" + np.array_str(sm) + "\nChosen action: " + str(action) +
                        " Reward: " + str(reward))

        else:
            if (ep%10 == 0):
                print("Q=" + np.array_str(qs) + "\nsm=" + np.array_str(sm) + "\nChosen action: " + str(action) +
                        " Reward: " + str(reward))

        if draw:
            plt.pause(0.001)
            plt.draw()

        exp_memory.append( (np.concatenate((pos, v)), action, np.concatenate((pos_new, v_new)), reward, end) )
        # lépés adatainak hozzáadaása a memóriához

        if len(exp_memory) >= batch_size: # ha már elég adat van a memóriában
Beispiel #49
0
                # sentence = tf.nn.embedding_lookup(final_embeddings, words_np_idx)
                sentence = final_embeddings_np[words_idx]
                # corr_embed = tf.reduce_sum(sentence, 0, keep_dims=True)
                corr_embed = np.sum(sentence, axis=0)
                w = 1.0 / sen_len
                # cor_embedding = tf.multiply(w, corr_embed)
                cor_embedding = w * corr_embed
                norm_cor = np.sqrt(np.sum(np.square(cor_embedding)))
                cor_norm_embedding = cor_embedding / norm_cor
                # # fpw.writelines(np.array_str(cor_norm_embedding.eval()[0,:]))
                # print(cor_norm_embedding.shape)
                # for col in cor_norm_embedding:
                if index % 1000 == 0:
                    print("index: %s"%index)
                fpw.write(np.array_str(cor_norm_embedding,10000)[1:-1])
                fpw.write('\n')
            fpw.close()
            fp.close()
                    #     fpw.writelines(np.array_str(col))






            # Testing final embedding
#             if step % 10000 == 0 and step > 0:
#                 input_dictionary = dict([(v,k) for (k, v) in reverse_dictionary.items()])
#                 test_word_idx_a = np.random.randint(0, len(input_dictionary) - 1)
#                 a = final_embeddings[test_word_idx_a, :]
Beispiel #50
0
 def __repr__(self):
     name = self.__class__.__name__
     shape = str(self.shape)
     data = np.array_str(self.data, precision=4, suppress_small=True)
     return "\n".join([name + " " + shape, data])
Beispiel #51
0
 def render(self, mode='human'):
     outfile = io.StringIO() if mode == 'ansi' else sys.stdout
     outfile.write(np.array_str(self.grid.reshape(self.rows, self.cols)) + "\n")
Beispiel #52
0
def main(A, t_max, M, N_max, R, exec_type, theta, sigma, d_context,
         type_context):

    ############################### MAIN CONFIG  ###############################
    print(
        '{}-armed Contextual Linear Gaussian bandit and nonparametric bandit with TS for {} time-instants and {} realizations'
        .format(A, M, t_max, R))

    # Directory configuration
    dir_string = '../results/{}/A={}/t_max={}/R={}/M={}/N_max={}/d_context={}/type_context={}/theta={}/sigma={}'.format(
        os.path.basename(__file__).split('.')[0], A, t_max, R, M, N_max,
        d_context, type_context,
        '_'.join(str.strip(np.array_str(theta.flatten()), '[]').split()),
        '_'.join(str.strip(np.array_str(sigma.flatten()), '[]').split()))
    os.makedirs(dir_string, exist_ok=True)

    ########## Contextual Bandit configuration ##########
    # Context
    if type_context == 'static':
        # Static context
        context = np.ones((d_context, t_max))
    elif type_context == 'randn':
        # Dynamic context: standard Gaussian
        context = np.random.randn(d_context, t_max)
    elif type_context == 'rand':
        # Dynamic context: uniform
        context = np.random.rand(d_context, t_max)
    else:
        # Unknown context
        raise ValueError('Invalid context type={}'.format(type_context))

    ############################### BANDITS  ###############################
    # Bandits to evaluate as a list
    bandits = []
    bandits_labels = []

    ### Thompson sampling: when sampling with static n=1 and no MC
    thompsonSampling = {
        'sampling_type': 'static',
        'MC_type': 'MC_arms',
        'M': 1,
        'arm_N_samples': 1
    }

    ########## Linear Gaussian with unknown sigma
    # Reward function
    reward_function = {
        'type': 'linear_gaussian',
        'dist': stats.norm,
        'theta': theta,
        'sigma': sigma
    }
    # Reward prior sigmas
    Sigmas = np.zeros((A, d_context, d_context))
    for a in np.arange(A):
        Sigmas[a, :, :] = np.eye(d_context)

    reward_prior_unknown = {
        'dist': 'NIG',
        'theta': np.ones((A, d_context)),
        'Sigma': Sigmas,
        'alpha': np.ones((A, 1)),
        'beta': np.ones((A, 1))
    }
    # Instantiate bandit
    bandits.append(
        BayesianBanditSampling(A, reward_function, reward_prior_unknown,
                               thompsonSampling))
    bandits_labels.append('TS linear Gaussian')

    ########## Nonparametric linear Gaussian mixture
    # MCMC (Gibbs) parameters
    gibbs_max_iter = 10
    gibbs_loglik_eps = 0.01
    # Plotting
    gibbs_plot_save = 'show'
    gibbs_plot_save = None
    if gibbs_plot_save != None and gibbs_plot_save != 'show':
        # Plotting directories
        gibbs_plots = dir_string + '/gibbs_plots'
        os.makedirs(gibbs_plots, exist_ok=True)
        os.makedirs(gibbs_plots + '/nonparametric', exist_ok=True)
        gibbs_plot_save = gibbs_plots + '/nonparametric'

    ########## Priors
    gamma = 0.1
    alpha = 1.
    beta = 1.
    sigma_0 = 1.
    pitman_yor_d = 0
    assert (0 <= pitman_yor_d) and (pitman_yor_d < 1) and (gamma >
                                                           -pitman_yor_d)
    # Reward function
    np_reward_function = {
        'type': 'linear_gaussian_mixture',
        'dist': stats.norm,
        'pi': np.ones((A, 1)),
        'theta': theta[:, None, ],
        'sigma': sigma * np.ones((A, 1))
    }
    np_thompsonSampling = {
        'sampling_type': 'static',
        'MC_type': 'MC_arms',
        'M': 1,
        'arm_N_samples': 1,
        'mixture_expectation': 'pi_expected'
    }
    # Hyperparameters
    # Concentration parameter
    prior_d = pitman_yor_d * np.ones(A)
    prior_gamma = gamma * np.ones(A)
    # NIG for linear Gaussians
    prior_alpha = alpha * np.ones(A)
    prior_beta = beta * np.ones(A)

    # Initial thetas
    prior_theta = np.ones((A, d_context))
    prior_Sigma = np.zeros((A, d_context, d_context))
    # Initial covariances: uncorrelated
    for a in np.arange(A):
        prior_Sigma[a, :, :] = sigma_0 * np.eye(d_context)

    # Reward prior as dictionary
    np_reward_prior = {
        'type': 'linear_gaussian_mixture',
        'dist': 'NIG',
        'K': 'nonparametric',
        'd': prior_d,
        'gamma': prior_gamma,
        'alpha': prior_alpha,
        'beta': prior_beta,
        'theta': prior_theta,
        'Sigma': prior_Sigma,
        'gibbs_max_iter': gibbs_max_iter,
        'gibbs_loglik_eps': gibbs_loglik_eps,
        'gibbs_plot_save': gibbs_plot_save
    }

    # Instantitate bandit
    bandits.append(
        MCMCBanditSampling(A, np_reward_function, np_reward_prior,
                           np_thompsonSampling))
    bandits_labels.append('Gibbs-TS nonparametric linear Gaussian mixture')

    ### BANDIT EXECUTION
    # Execute each bandit
    for (n, bandit) in enumerate(bandits):
        bandit.execute_realizations(R, t_max, context, exec_type)

    # Save bandits info
    with open(dir_string + '/bandits.pickle', 'wb') as f:
        pickle.dump(bandits, f)
    with open(dir_string + '/bandits_labels.pickle', 'wb') as f:
        pickle.dump(bandits_labels, f)

    ############################### PLOTTING  ###############################
    ## Plotting arrangements (in general)
    bandits_colors = [
        colors.cnames['black'], colors.cnames['skyblue'],
        colors.cnames['cyan'], colors.cnames['blue'],
        colors.cnames['palegreen'], colors.cnames['lime'],
        colors.cnames['green'], colors.cnames['yellow'],
        colors.cnames['orange'], colors.cnames['red'], colors.cnames['purple'],
        colors.cnames['fuchsia'], colors.cnames['pink'],
        colors.cnames['saddlebrown'], colors.cnames['chocolate'],
        colors.cnames['burlywood']
    ]

    bandits_colors = [
        colors.cnames['black'], colors.cnames['silver'], colors.cnames['red'],
        colors.cnames['salmon'], colors.cnames['navy'], colors.cnames['blue'],
        colors.cnames['green'], colors.cnames['lime'], colors.cnames['orange'],
        colors.cnames['yellow'], colors.cnames['cyan'],
        colors.cnames['lightblue'], colors.cnames['purple'],
        colors.cnames['fuchsia'], colors.cnames['saddlebrown'],
        colors.cnames['peru']
    ]

    # Plotting direcotries
    dir_plots = dir_string + '/plots'
    os.makedirs(dir_plots, exist_ok=True)

    # Plotting time: all
    t_plot = t_max

    ## GENERAL
    # Plot regret
    plot_std = False
    bandits_plot_regret(bandits,
                        bandits_colors,
                        bandits_labels,
                        t_plot,
                        plot_std,
                        plot_save=dir_plots)
    plot_std = True
    bandits_plot_regret(bandits,
                        bandits_colors,
                        bandits_labels,
                        t_plot,
                        plot_std,
                        plot_save=dir_plots)

    # Plot cumregret
    plot_std = False
    bandits_plot_cumregret(bandits,
                           bandits_colors,
                           bandits_labels,
                           t_plot,
                           plot_std,
                           plot_save=dir_plots)
    plot_std = True
    bandits_plot_cumregret(bandits,
                           bandits_colors,
                           bandits_labels,
                           t_plot,
                           plot_std,
                           plot_save=dir_plots)

    # Plot rewards expected
    plot_std = True
    bandits_plot_rewards_expected(bandits,
                                  bandits_colors,
                                  bandits_labels,
                                  t_plot,
                                  plot_std,
                                  plot_save=dir_plots)

    # Plot actions
    plot_std = False
    bandits_plot_actions(bandits,
                         bandits_colors,
                         bandits_labels,
                         t_plot,
                         plot_std,
                         plot_save=dir_plots)
    plot_std = True
    bandits_plot_actions(bandits,
                         bandits_colors,
                         bandits_labels,
                         t_plot,
                         plot_std,
                         plot_save=dir_plots)

    # Plot correct actions
    plot_std = False
    bandits_plot_actions_correct(bandits,
                                 bandits_colors,
                                 bandits_labels,
                                 t_plot,
                                 plot_std,
                                 plot_save=dir_plots)
    plot_std = True
    bandits_plot_actions_correct(bandits,
                                 bandits_colors,
                                 bandits_labels,
                                 t_plot,
                                 plot_std,
                                 plot_save=dir_plots)

    ## Sampling bandits
    # Plot action predictive density
    plot_std = True
    bandits_plot_arm_density(bandits,
                             bandits_colors,
                             bandits_labels,
                             t_plot,
                             plot_std,
                             plot_save=dir_plots)

    ## Quantile bandits
    # Plot action quantiles
    plot_std = True
    bandits_plot_arm_quantile(bandits,
                              bandits_colors,
                              bandits_labels,
                              t_plot,
                              plot_std,
                              plot_save=dir_plots)
Beispiel #53
0
import sys
import numpy as np


def test_hyp(hypothesis, instance):
    # This function performs a functional XNOR (Not + XOR)
    # If the hypothesis is ever 0 (not) and the input is 1 -> returns 0 -> False
    # Similarly, if the hypothesis is 1 (value) and the input is 0 -> returns 0
    for index, hyp_val in hypothesis.iteritems():
        x_instance = instance[abs(index) - 1]
        if hyp_val != x_instance:
            return 0
    return 1


num_examples = 1000
num_variables = 5
correct = 1, -2, -3, -4, 5
hyp_form = {int(x): (0 if int(x) < 0 else 1) for x in correct}

with open('test-data2.txt', 'w') as outfile:
    for i in range(num_examples):
        train = np.random.randint(2, size=num_variables, dtype=int)
        classify = test_hyp(hyp_form, train)
        train = np.append(train, classify)
        print np.array_str(train)[1:-1]
        outfile.write(np.array_str(train)[1:-1] + '\n')
Beispiel #54
0
def randomised_trials(num_trials, save_dir):
    """
        num_trials (int): number of trials to initialise parameters of PDEs
        save_dir (string): parent node of the ith file folder which will store simulated for the ith number of trial.

    """
    #dtsave = 0.001 #set this constants
    for i in range(num_trials):
        #let ith dir to store data be save_dir + str(i+1
        ith_save_dir = save_dir + "/" + str(i + 1) + "_trial"
        pathlib.Path(ith_save_dir).mkdir(parents=True, exist_ok=True)
        #write parameter/simulation values into ith folder:
        file_name = ith_save_dir + "/" + str(
            i + 1) + "_trial_simulation_values.txt"
        f = open(file_name, "w+")

        #first, let's randomised parameters shared by DD_numr and DD_anly
        Diff_coeff = random.randint(3, 10)  #[a,b], Diff_coeff = D
        #decay_coeff = random.uniform(mu_lower, mu_upper) #[a,b], decay_coeff = mu
        decay_coeff = 0
        #init_conc = init
        init_conc = random.randint(50, 100)
        #Total_sim_time = Ttotal; means total simulation time #time is in seconds
        Total_sim_time = 10
        #randomised dx, Lx, source, dt
        dx = random.uniform(1, 2)
        #Lx = random.randint(30,40)
        Lx = 30
        dt = random.uniform(0.001, (dx)**2 / Diff_coeff)
        source = random.randint(0,
                                round(Lx / dx) -
                                2)  #need to be within round(Lx/dx)-2
        #create xx for DD_anly
        xx = np.arange(0, round(Lx / dx) + 1) * dx

        #write and save parameters
        f.write("Diff_coeff(D):" + str(Diff_coeff) + "\n")
        f.write("decay_coeff(mu):" + str(decay_coeff) + "\n")
        f.write("init_conc(init):" + str(init_conc) + "\n")
        f.write("Total_sim_time(Ttotal):" + str(Total_sim_time) + "\n")
        f.write("dx:" + str(dx) + "\n")
        f.write("Lx:" + str(Lx) + "\n")
        f.write("dt:" + str(dt) + "\n")
        f.write("source:" + str(source) + "\n")
        f.write("xx:" + np.array_str(xx) + "\n")

        f.close()

        DD_numr(D=Diff_coeff,
                mu=decay_coeff,
                init=init_conc,
                source=source,
                Lx=Lx,
                Ttotal=Total_sim_time,
                dtsave=dt,
                dx=dx,
                dt=dt,
                save_dir=ith_save_dir)  #set dtsave as dt

        #DD_anly(x=xx, t=Total_sim_time, source=source, D=Diff_coeff,
        #        m=decay_coeff, j=init_conc,
        #        save_dir=ith_save_dir)

        #attempt to fix bug
        DD_anly(x=xx,
                t=Total_sim_time,
                source=xx[source],
                D=Diff_coeff,
                m=decay_coeff,
                j=init_conc,
                save_dir=ith_save_dir)
                       subject='sample',
                       meg='helmet',
                       bem=sphere,
                       dig=True,
                       surfaces=['brain'])
del raw, epochs

###############################################################################
# To do a dipole fit, let's use the covariance provided by the empty room
# recording.

raw_erm = read_raw_ctf(erm_path).apply_gradient_compensation(0)
raw_erm = mne.preprocessing.maxwell_filter(raw_erm,
                                           coord_frame='meg',
                                           **mf_kwargs)
cov = mne.compute_raw_covariance(raw_erm)
del raw_erm

dip, residual = fit_dipole(evoked, cov, sphere)

###############################################################################
# Compare the actual position with the estimated one.

expected_pos = np.array([18., 0., 49.])
diff = np.sqrt(np.sum((dip.pos[0] * 1000 - expected_pos)**2))
print('Actual pos:     %s mm' % np.array_str(expected_pos, precision=1))
print('Estimated pos:  %s mm' % np.array_str(dip.pos[0] * 1000, precision=1))
print('Difference:     %0.1f mm' % diff)
print('Amplitude:      %0.1f nAm' % (1e9 * dip.amplitude[0]))
print('GOF:            %0.1f %%' % dip.gof[0])
    def from_values(
        cls,
        values: np.ndarray,
        *,
        header: object,
        config: tp.Optional[DisplayConfig] = None,
        outermost: bool = False,
        index_depth: int = 0,
        header_depth: int = 0,
        style_config: tp.Optional[StyleConfig] = None,
    ) -> 'Display':
        '''
        Given a 1 or 2D ndarray, return a Display instance. Generally 2D arrays are passed here only from TypeBlocks.
        '''
        # return a list of lists, where each inner list represents multiple columns
        config = config or DisplayActive.get()

        # create a list of lists, always starting with the header
        rows = []
        if header is not None:
            # NOTE: controlling if the header is applied with type_show is moving to display() methods; this approach will no longer be needed
            # assume that all headers are SF types; skip if type_show is False
            if config.type_show:
                rows.append([cls.to_cell(header, config=config)])
            else:
                rows.append([cls.CELL_EMPTY])

        if values.__class__ is np.ndarray and values.ndim == 2:
            # NOTE: this is generally only used by TypeBlocks
            # get rows from numpy string formatting
            np_rows = np.array_str(values).split('\n')
            last_idx = len(np_rows) - 1
            for idx, row in enumerate(np_rows):
                # trim brackets
                end_slice_len = 2 if idx == last_idx else 1
                row = row[2:len(row) - end_slice_len].strip()
                rows.append([cls.to_cell(row, config=config)])
        else:
            count_max = config.display_rows
            # print('comparing values to count_max', len(values), count_max)
            if len(values) > config.display_rows:
                data_half_count = Display.truncate_half_count(count_max)
                value_gen = partial(
                    _gen_skip_middle,
                    forward_iter=values.__iter__,
                    forward_count=data_half_count,
                    reverse_iter=partial(reversed, values),
                    reverse_count=data_half_count,
                    center_sentinel=cls.ELLIPSIS_CENTER_SENTINEL)
            else:
                value_gen = values.__iter__

            for v in value_gen():
                if v is cls.ELLIPSIS_CENTER_SENTINEL:  # center sentinel
                    rows.append([cls.CELL_ELLIPSIS])
                else:
                    rows.append([cls.to_cell(v, config=config)])

        # add the types to the last row
        if values.__class__ is np.ndarray and config.type_show:
            rows.append(
                [cls.to_cell(values.dtype, config=config, is_dtype=True)])

        return cls(
            rows,
            config=config,
            outermost=outermost,
            index_depth=index_depth,
            header_depth=header_depth,
            style_config=style_config,
        )
Beispiel #57
0
 def __str__(self):
     # Format the output to show operator notation
     return u'O = ' + str.replace(
         np.array_str(self, precision=4, suppress_small=True), '\n',
         '\n    ')
Beispiel #58
0
def get_architecure_from_action(action):
    arc = ""
    for i in range(len(action)):
        arc += np.array_str(action[i]) + " "
    return '"' + arc[:-1] + '"'
 def __str__(self):
     return "Metadata : %s\nPose : \n%s" % (
         " ".join(map(str, self.metadata)),
         np.array_str(self.pose),
     )
Beispiel #60
0
 def __update_state(self):
     self.state = np.random.uniform(low=-1, high=1, size=self.dim)
     print 'Next state: %s' % np.array_str(self.state)
     self.state_tx.publish(time=rospy.Time.now(), feats=self.state)