Beispiel #1
0
def test_batched_dot():
    np.set_printoptions(threshold=8192 * 4, linewidth=600,
                        formatter={'int': lambda x: "%2d" % x, 'float': lambda x: "%2.0f" % x})

    ng = NervanaGPU(stochastic_round=False, bench=1)
    nc = NervanaCPU()

    dtype = np.float32  # np.float16 or np.float32

    X = 100   # Batch Size
    N = 32   # Minibatch Size
    C = 1536  # Input  Features
    K = 768  # Output Features

    cpuI, cpuE, cpuW = setup_test_data(X, N, C, K, dtype)

    ngO, ngB, ngU = run_batched_dot(ng, cpuI, cpuE, cpuW, X, dtype)
    ncO, ncB, ncU = run_batched_dot(nc, cpuI, cpuE, cpuW, X, dtype)
    npO, npB, npU = run_batched_dot(np, cpuI, cpuE, cpuW, X, dtype)

    # set_trace()
    assert_tensors_allclose(npO, ngO, rtol=0, atol=1e-3)
    assert_tensors_allclose(npB, ngB, rtol=0, atol=1e-3)
    assert_tensors_allclose(npU, ngU, rtol=0, atol=1e-3)

    assert_tensors_allclose(npO, ncO, rtol=0, atol=1e-3)
    assert_tensors_allclose(npB, ncB, rtol=0, atol=1e-3)
    assert_tensors_allclose(npU, ncU, rtol=0, atol=1e-3)

    ng.ctx.detach()
    del(ng)
Beispiel #2
0
def check_that_nr_fit_runs():
    from jds_image_proc.clouds import voxel_downsample
    #from brett2.ros_utils import RvizWrapper    
    #import lfd.registration as lr
    ##import lfd.warping as lw    
    #if rospy.get_name() == "/unnamed":
        #rospy.init_node("test_rigidity", disable_signals=True)
        #from time import sleep
        #sleep(1)
    #rviz = RvizWrapper.create()
    
    pts0 = np.loadtxt("../test/rope_control_points.txt")
    pts1 = np.loadtxt("../test/bad_rope.txt")    
    pts_rigid = voxel_downsample(pts0[:10], .02)
    #lr.Globals.setup()
    np.seterr(all='ignore')
    np.set_printoptions(suppress=True)

    lin_ag, trans_g, w_eg, x_ea = tps.tps_nr_fit_enhanced(pts0, pts1, 0.01, pts_rigid, 0.001, method="newton",plotting=1)
    #lin_ag2, trans_g2, w_ng2 = tps_fit(pts0, pts1, .01, .01)
    #assert np.allclose(w_ng, w_ng2)
    def eval_partial(x_ma):
        return tps_eval(x_ma, lin_ag, trans_g, w_eg, x_ea) 
    #lr.plot_orig_and_warped_clouds(eval_partial, pts0, pts1, res=.008)
    #handles = lw.draw_grid(rviz, eval_partial, pts0.min(axis=0), pts0.max(axis=0), 'base_footprint')

    grads = tps.tps_grad(pts_rigid, lin_ag, trans_g, w_eg, x_ea)
def parse_numpy_printoption(kv_str):
  """Sets a single numpy printoption from a string of the form 'x=y'.

  See documentation on numpy.set_printoptions() for details about what values
  x and y can take. x can be any option listed there other than 'formatter'.

  Args:
    kv_str: A string of the form 'x=y', such as 'threshold=100000'

  Raises:
    argparse.ArgumentTypeError: If the string couldn't be used to set any
        nump printoption.
  """
  k_v_str = kv_str.split("=", 1)
  if len(k_v_str) != 2 or not k_v_str[0]:
    raise argparse.ArgumentTypeError("'%s' is not in the form k=v." % kv_str)
  k, v_str = k_v_str
  printoptions = np.get_printoptions()
  if k not in printoptions:
    raise argparse.ArgumentTypeError("'%s' is not a valid printoption." % k)
  v_type = type(printoptions[k])
  if v_type is type(None):
    raise argparse.ArgumentTypeError(
        "Setting '%s' from the command line is not supported." % k)
  try:
    v = (
        v_type(v_str)
        if v_type is not bool else flags.BooleanParser().parse(v_str))
  except ValueError as e:
    raise argparse.ArgumentTypeError(e.message)
  np.set_printoptions(**{k: v})
Beispiel #4
0
def findmaxima(c, var = 'x'):
    """
    Creates a RK-4 approximation of Rossler curve with the given c value
    and returns local maxima along the x(t) curve from T0 to T
    """
    T0 = 250
    e = 3E-4

    Rc = Rossler(c, dt=0.01, T0 = T0)
    Rc.run()

    if var == 'x':
        var = Rc.x
    elif var == 'y':
        var = Rc.y
    else:
        var = Rc.z

    #using only valuse where t > T0
    initial_index = np.where(Rc.t == T0)[0][0]

    #moving back 1 to get a more exact diff
    usable_x = var[initial_index - 1:]
    x_diff = np.diff(usable_x)
    usable_x = usable_x[1:]

    np.set_printoptions(threshold='nan')


    critical_points = usable_x[np.abs(x_diff) < e]
    return critical_points[critical_points > 0]
Beispiel #5
0
    def info(self):
        """

        """
        print '{0:4} , {1:15}, {2:5}, {3:5}, {4:7}, {5:6}, {6:8}, {7:9}'.format('type', 'p', 'value', 'std', 'runable' , 'usable' , 'obsolete' , 'evaluated')
        np.set_printoptions(precision=3)
        print '{0:4} , {1:15}, {2:5}, {3:5}, {4:7}, {5:6}, {6:8}, {7:9}'.format(self.type, self.p, self.value, self.std, self.runable, self.usable , self.obsolete , self.evaluated)
Beispiel #6
0
def evaluate(reference_list, test_list, plot_conf_matrix=False):
    """
    prints results and plots confusion matrix
    :param reference_list:
    :param test_list:
    :return:
    """
    # print ('accuracy_score', accuracy_score(reference_list, test_list))
    print(classification_report(reference_list, test_list))

    if plot_conf_matrix:
        cm = confusion_matrix(reference_list, test_list)

        np.set_printoptions(precision=2)

        # Normalize the confusion matrix by row (i.e by the number of samples
        # in each class)
        cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        # print('Normalized confusion matrix')
        # print(cm_normalized)

        plt.figure()
        plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')

        plt.show()
Beispiel #7
0
def main():
    
    # read data from Bingo-F protocols
    in_dt = InputData(BingoParser(options["bingo_data_dir"]))

    phs = in_dt.GetPhotos()
    pts = in_dt.GetPoints()
    cam_m, distor = in_dt.GetCameras().values()[0].GetParams()
    

    # compute relative orientations and merged them into common system
    ros = RelativeOrientation(in_dt)

    np.set_printoptions(precision=3, suppress=True)

    #PlotScene(pts, phs)
    #PlotRelOrs(pts, phs)

    # performs helmert transformation into world coordinate system 
    HelmertTransform(pts, phs)

    # run bundle block adjustment
    ph_errs, pt_errs = RunBundleBlockAdjutment(in_dt, options['protocol_dir'], flags['f'])   

    PlotScene(pts, phs)
    return 
Beispiel #8
0
    def patch_set(self, attribute, data):
        '''

        Set patch attributes from a Pandas dataframe

        :param attribute: valid NetLogo patch attribute
        :param data: Pandas dataframe with same dimensions as NetLogo world
        :raises: NetLogoException

        '''

        try:
            np.set_printoptions(threshold=np.prod(data.shape))
            datalist = '['+str(data.as_matrix().flatten()).strip('[ ')
            datalist = ' '.join(datalist.split())
            if self.NL_VERSION == '6.0':
                command = '(foreach map [[?1] -> [pxcor] of ?1] sort patches map [[?2] -> [pycor] of ?2] \
                            sort patches {0} [[?1 ?2 ?3 ] -> ask patch ?1 ?2 [set {1} ?3]])'.format(datalist, attribute)
            else:
                command = '(foreach map [[pxcor] of ?] sort patches map [[pycor] of ?] \
                            sort patches {0} [ask patch ?1 ?2 [set {1} ?3]])'.format(datalist, attribute)

            self.link.command(command)

        except jpype.JException(jpype.java.org.nlogo.api.LogoException) as ex:
            raise NetLogoException(ex.message())
        except jpype.JException(jpype.java.org.nlogo.api.CompilerException) as ex:
            raise NetLogoException(ex.message())
        except jpype.JException(jpype.java.lang.Exception) as ex:
            raise NetLogoException(ex.message())
Beispiel #9
0
    def __repr__(self):
        def create_repr(maxlength = 500):
            repr_list = []
            for elem in self.list:
                if not isinstance(elem, AnyPyProcessOutput):
                    repr_list.append( '  ' + _pprint.pformat(elem))
                    continue
                for line in elem._repr_gen(prefix = ' '):
                    repr_list.append(line)
                    if maxlength and len(repr_list) > maxlength:
                        repr_list.append('  ...')
                        return repr_list
                if repr_list and not repr_list[-1].endswith(','):
                    repr_list[-1] = repr_list[-1] + ','

            if len(repr_list):
                repr_list[-1] = repr_list[-1].rstrip(',')
                repr_list[0] = '[' + repr_list[0][1:]
                repr_list[-1] = repr_list[-1] + ']'
            else:
                repr_list.append('[]')
            return repr_list

        repr_str = '\n'.join(create_repr(500))
        if repr_str.endswith('...'):
            np.set_printoptions(threshold = 30)
            repr_str = '\n'.join(create_repr(1000))
            np.set_printoptions()
        return repr_str
Beispiel #10
0
def writeOVERFLOW(filename, X, Y):
    f = open(filename, 'w')
    print 'Writing ', filename
   
    # Overflow requires 3 spanwise ndoes

    ni, nj = X.shape; nk = 3
    
    npy.set_printoptions( precision=16, threshold = ni )
    
    f.write('1'+'\n')
    f.write(str(ni) + ' ' + str(nj) + ' ' + str(nk) + '\n')
    Write3DArray(f,X)
    Write3DArray(f,X)
    Write3DArray(f,X)

    Z = npy.ones(X.shape)*0
    Write3DArray(f,Z)
    Z = npy.ones(X.shape)*-0.5
    Write3DArray(f,Z)
    Z = npy.ones(X.shape)*-1
    Write3DArray(f,Z)

    Write3DArray(f,Y)
    Write3DArray(f,Y)
    Write3DArray(f,Y)
               
    f.close()
Beispiel #11
0
def _test():
    import doctest

    start_suppress = np.get_printoptions()["suppress"]
    np.set_printoptions(suppress=True)
    doctest.testmod()
    np.set_printoptions(suppress=start_suppress)
def main():
    np.set_printoptions(threshold=np.nan)
    
    feature_names = get_feature_names()
    x_train, y_train,  = get_data("training.dat")

    clf = linear_model.LinearRegression()
    clf.fit (x_train, y_train)
    
    w = clf.coef_.reshape(clf.coef_.shape[1],1) 
    y_hat_train = x_train.dot(w)

    rmse_our_train, rmse_oba_train = get_rmse(y_train, y_hat_train)
    
    x_test, y_test = get_data("test.dat")
    y_hat_test = x_test.dot(w)

    rmse_our_test, rmse_oba_test = get_rmse(y_test, y_hat_test)
 
    print "RMSE OUR Train ", rmse_our_train
    print "RMSE OBA Train ", rmse_oba_train
    print "RMSE OUR Test ", rmse_our_test
    print "RMSE OBA Test ", rmse_oba_test

    save_scatter_plot(y_train, y_hat_train, "train")
    save_scatter_plot(y_test, y_hat_test, "test")
    
    build_output_files(y_hat_train, y_hat_test, y_train, y_test)
    print_weights(w, feature_names);
    report_range(y_train)
    report_range(y_test)
Beispiel #13
0
def generate_eventdir_matrix(fileName, header=True, direction=None):
    '''write out directed event matrix from fast5, False if not present'''
    try: # check to make sure the file actually exists
        h5File = h5py.File(fileName, 'r')
        h5File.close()
    except:
        return False
    with h5py.File(fileName, 'r') as h5File:
      rowData = get_telemetry(h5File, "000", fileName)
      channel = str(rowData['channel'])
      mux = str(rowData['mux'])
      runID = rowData['runID']
      dir = "complement" if (direction=="r") else "template"
      eventLocation = "/Analyses/Basecall_1D_000/BaseCalled_%s/Events/" % (dir)
      if(not eventLocation in h5File):
          return False
      readName = rowData['read']
      sampleRate = str(int(rowData['sampleRate']))
      rawStart = str(rowData['rawStart'])
      outData = h5File[eventLocation]
      dummy = h5File[eventLocation]['move'][0] ## hack to get order correct
      numpy.set_printoptions(precision=15)
      headers = h5File[eventLocation].dtype
      if(header):
          sys.stdout.write("runID,channel,mux,read,sampleRate,rawStart,"+",".join(headers.names)+"\n")
      for line in outData:
        res=[repr(x) for x in line]
        # data seems to be normalised, but just in case it isn't in the future,
        # here's the formula for calculation:
        # pA = (raw + offset)*range/digitisation
        # (using channelMeta[("offset", "range", "digitisation")])
        # - might also be useful to know start_time from outMeta["start_time"]
        #   which should be subtracted from event/start
        sys.stdout.write(",".join((runID,channel,mux,readName,sampleRate,rawStart)) + "," + ",".join(res) + "\n")
def work_with_simple_bag_of_words():
    count = CountVectorizer()
    docs = np.array([
        'The sun is shining',
        'The weather is sweet',
        'The sun is shining and the weather is sweet',
    ])
    bag = count.fit_transform(docs)
    print(count.vocabulary_)
    print(bag.toarray())

    np.set_printoptions(precision=2)
    tfidf = TfidfTransformer(use_idf=True, norm='l2', smooth_idf=True)
    print(tfidf.fit_transform(bag).toarray())

    tf_is = 2
    n_docs = 3
    idf_is = np.log((n_docs+1) / (3+1))
    tfidf_is = tf_is * (idf_is + 1)
    print("tf-idf of term 'is' = %.2f" % tfidf_is)

    tfidf = TfidfTransformer(use_idf=True, norm=None, smooth_idf=True)
    raw_tfidf = tfidf.fit_transform(bag).toarray()[-1]
    print(raw_tfidf)

    l2_tfidf = raw_tfidf / np.sqrt(np.sum(raw_tfidf**2))
    print(l2_tfidf)
Beispiel #15
0
    def performance(self, pos_seqs=None, neg_seqs=None):
        """performance."""
        try:
            y_pred, y_binary, y_test = multiprocess_performance(
                pos_seqs, neg_seqs,
                vectorizer=self.vectorizer,
                estimator=self.estimator,
                pos_block_size=self.pos_block_size,
                neg_block_size=self.neg_block_size,
                n_jobs=self.n_jobs)
            # confusion matrix
            cm = metrics.confusion_matrix(y_test, y_binary)
            np.set_printoptions(precision=2)
            logger.info('Confusion matrix:')
            logger.info(cm)

            # classification
            logger.info('Classification:')
            logger.info(metrics.classification_report(y_test, y_binary))

            # roc
            logger.info('ROC: %.3f' % (metrics.roc_auc_score(y_test, y_pred)))

        except Exception as e:
            logger.debug('Failed iteration. Reason: %s' % e)
            logger.debug('Exception', exc_info=True)
Beispiel #16
0
def copy(ntm, seq_length, sess, max_length=50, print_=True):
    start_symbol = np.zeros([ntm.cell.input_dim], dtype=np.float32)
    start_symbol[0] = 1
    end_symbol = np.zeros([ntm.cell.input_dim], dtype=np.float32)
    end_symbol[1] = 1

    seq = generate_copy_sequence(seq_length, ntm.cell.input_dim - 2)

    feed_dict = {input_:vec for vec, input_ in zip(seq, ntm.inputs)}
    feed_dict.update(
        {true_output:vec for vec, true_output in zip(seq, ntm.true_outputs)}
    )
    feed_dict.update({
        ntm.start_symbol: start_symbol,
        ntm.end_symbol: end_symbol
    })

    result = sess.run(ntm.get_outputs(seq_length) + [ntm.get_loss(seq_length)], feed_dict=feed_dict)

    outputs = result[:-1]
    loss = result[-1]

    if print_:
        np.set_printoptions(suppress=True)
        print(" true output : ")
        pp.pprint(seq)
        print(" predicted output :")
        pp.pprint(np.round(outputs))
        print(" Loss : %f" % loss)
        np.set_printoptions(suppress=False)
    else:
        return seq, outputs, loss
Beispiel #17
0
    def get_occ(self, mo_energy=None, mo_coeff=None):
        '''Label the occupancies for each orbital

        Kwargs:
            mo_energy : 1D ndarray
                Obital energies

            mo_coeff : 2D ndarray
                Obital coefficients

        Examples:

        >>> from pyscf import gto, scf
        >>> mol = gto.M(atom='H 0 0 0; F 0 0 1.1')
        >>> mf = scf.hf.SCF(mol)
        >>> mf.get_occ(numpy.arange(mol.nao_nr()))
        array([2, 2, 2, 2, 2, 0])
        '''
        if mo_energy is None: mo_energy = self.mo_energy
        mo_occ = numpy.zeros_like(mo_energy)
        nocc = self.mol.nelectron // 2
        mo_occ[:nocc] = 2
        if nocc < mo_occ.size:
            logger.info(self, 'H**O = %.12g, LUMO = %.12g,',
                        mo_energy[nocc-1], mo_energy[nocc])
            if mo_energy[nocc-1]+1e-3 > mo_energy[nocc]:
                logger.warn(self, '!! H**O %.12g == LUMO %.12g',
                            mo_energy[nocc-1], mo_energy[nocc])
        else:
            logger.info(self, 'H**O = %.12g,', mo_energy[nocc-1])
        if self.verbose >= logger.DEBUG:
            numpy.set_printoptions(threshold=len(mo_energy))
            logger.debug(self, '  mo_energy = %s', mo_energy)
            numpy.set_printoptions()
        return mo_occ
Beispiel #18
0
def _pprint(params, offset=0, printer=repr):
    # Do a multi-line justified repr:
    options = np.get_printoptions()
    np.set_printoptions(precision=5, threshold=64, edgeitems=2)
    params_list = list()
    this_line_length = offset
    line_sep = ',\n' + (1 + offset) * ' '
    for i, (k, v) in enumerate(params):
        if type(v) is float:
            # use str for representing floating point numbers
            # this way we get consistent representation across
            # architectures and versions.
            this_repr = '%s=%s' % (k, str(v))
        else:
            # use repr of the rest
            this_repr = '%s=%s' % (k, printer(v))
        if len(this_repr) > 500:
            this_repr = this_repr[:300] + '...' + this_repr[-100:]
        if i > 0:
            if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
                params_list.append(line_sep)
                this_line_length = len(line_sep)
            else:
                params_list.append(', ')
                this_line_length += 2
        params_list.append(this_repr)
        this_line_length += len(this_repr)

    np.set_printoptions(**options)
    lines = ''.join(params_list)
    # Strip trailing space to avoid nightmare in doctests
    lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
    return lines
    def compareSplitsAll(self, precision=3, linewidth=120):
        nM = len(self.mm)
        nItems = ((nM * nM) - nM)/2
        results = numpy.zeros((nM, nM), numpy.float)
        vect = numpy.zeros(nItems, numpy.float)
        vCounter = 0
        for mNum1 in range(1, nM):
            for mNum2 in range(mNum1):
                ret = self.compareSplits(mNum1, mNum2, verbose=False)
                #print "+++ ret = %s" % ret
                if ret == None:
                    ret = 0.0
                results[mNum1][mNum2] = ret
                results[mNum2][mNum1] = ret
                vect[vCounter] = ret
                vCounter += 1
                if 0:
                    print " %10i " % mNum1,
                    print " %10i " % mNum2,
                    print "%.3f" % ret

        # Save current numpy printoptions, and restore, below.
        curr = numpy.get_printoptions()
        numpy.set_printoptions(precision=precision, linewidth=linewidth)
        print results
        numpy.set_printoptions(precision=curr['precision'], linewidth=curr['linewidth'])
        
        print "For the %i values in one triangle," % nItems
        print "max =  ", vect.max()
        print "min =  ", vect.min()
        print "mean = ", vect.mean()
        print "var =  ", vect.var()
Beispiel #20
0
    def test_0d_arrays(self):
        assert_equal(repr(np.datetime64('2005-02-25')[...]),
                     "array('2005-02-25', dtype='datetime64[D]')")

        x = np.array(1)
        np.set_printoptions(formatter={'all':lambda x: "test"})
        assert_equal(repr(x), "array(test)")
Beispiel #21
0
def train_network(model, num_epochs = 100, minibatch_size = 256, lr = 0.01, mom = 0.9, wd = 0.0000):
    np.set_printoptions(linewidth=200)
    owl.set_device(owl.create_gpu_device(0))
    count = 0
    # load data
    (train_data, test_data) = imageio.load_mb_from_mat("mnist_all.mat", minibatch_size)
    num_test_samples = test_data[0].shape[0]
    (test_samples, test_labels) = map(lambda npdata : owl.from_nparray(npdata), test_data)
    for i in xrange(num_epochs):
        print "---Epoch #", i
        for (mb_samples, mb_labels) in train_data:
            num_samples = mb_samples.shape[0]
            data = owl.from_nparray(mb_samples).reshape([28, 28, 1, num_samples])
            label = owl.from_nparray(mb_labels)
            out, weightgrad, biasgrad = train(model, data, label)
            for k in range(len(model.weights)):
                model.weightdelta[k] = mom * model.weightdelta[k] - lr / num_samples * weightgrad[k] - wd * model.weights[k]
                model.biasdelta[k] = mom * model.biasdelta[k] - lr / num_samples * biasgrad[k]
                model.weights[k] += model.weightdelta[k]
                model.bias[k] += model.biasdelta[k]

            count = count + 1
            if (count % 1) == 0:
                print_training_accuracy(out, label, num_samples)
            if count == 100:
                sys.exit()
Beispiel #22
0
	def general_mix_AMCMC(self, mix, **kwargs):
		"""
			simple mix data test using AMCMC
		"""
		self.mix = mix(K = self.nClass,**kwargs)
		self.mix.set_data(self.Y)
		for i in range(100):#np.int(np.ceil(0.1*self.sim))):  # @UnusedVariable
			self.mix.sample()
		self.mu_sample = list()
		self.sigma_sample = list()
		self.p_sample = list()
		
		for k in range(self.nClass):
			self.mu_sample.append(np.zeros_like(self.Thetas[k])) 
			self.sigma_sample.append(np.zeros_like(self.Sigmas[k])) 
			self.p_sample.append(np.zeros_like(self.P[k])) 
			
		self.mix.set_AMCMC(1200)
		sim_m = 2.
		for i in range(np.int(np.ceil(sim_m*self.sim))):  # @UnusedVariable
			self.mix.sample()
			for k in range(self.nClass):
				
				self.mu_sample[k] += self.mix.mu[k]/sim_m
				self.sigma_sample[k] += self.mix.sigma[k]/sim_m
				self.p_sample[k] += self.mix.p[k]	/sim_m	
		np.set_printoptions(precision=2)
			
		self.compare_class("AMCMC:")
Beispiel #23
0
def align_se3(model, data, precision=False):
    """Align two trajectories using the method of Horn (closed-form).

    Input:
    model -- first trajectory (3xn)
    data -- second trajectory (3xn)

    Output:
    R -- rotation matrix (3x3)
    t -- translation vector (3x1)
    t_error -- translational error per point (1xn)

    """
    if not precision:
        np.set_printoptions(precision=3, suppress=True)
    model_zerocentered = model - model.mean(1).reshape(model.shape[0], 1)
    data_zerocentered = data - data.mean(1).reshape(data.shape[0], 1)

    W = np.zeros((3, 3))
    for column in range(model.shape[1]):
        W += np.outer(model_zerocentered[:, column], data_zerocentered[:, column])
    U, d, Vh = np.linalg.linalg.svd(W.transpose())
    S = np.matrix(np.identity(3))
    if np.linalg.det(U) * np.linalg.det(Vh) < 0:
        S[2, 2] = -1
    R = U * S * Vh
    t = data.mean(1).reshape(data.shape[0], 1) - R * model.mean(1).reshape(model.shape[0], 1)

    model_aligned = R * model + t
    alignment_error = model_aligned - data
    t_error = np.sqrt(np.sum(np.multiply(alignment_error, alignment_error), 0)).A[0]

    return R, t, t_error
Beispiel #24
0
def branch_PSSM(peak_branch_df, fa_dict):
    base_dict = {"A":0, "C":1, "T":2, "G":3}
    nuc_prob = gc_content(fa_dict)
    
    pos_matrix_branch = np.zeros([4,5])
    counter = 0
    if type(peak_branch_df) == str:
        with open(peak_branch_df) as f:
            for line in f:
                counter += 1
                seq = line.strip()
                for a, base in enumerate(seq):
                    pos_matrix_branch[base_dict[base],a] += 1
    else:
        for seq in peak_branch_df['Branch seq']:
            counter += 1
            seq = seq[2:7]
            for a, base in enumerate(seq):
                pos_matrix_branch[base_dict[base],a] += 1

    float_formatter = lambda x: "%.1f" % x
    np.set_printoptions(formatter={'float_kind':float_formatter})
    
    a = 0
    while a < 4:
        b = 0
        while b < 5:
            if pos_matrix_branch[a,b] == 0: pos_matrix_branch[a,b] += 1
            pos_matrix_branch[a,b] = np.log2((pos_matrix_branch[a,b]/float(counter))/nuc_prob[a])
            b += 1
        a += 1
    
    return pos_matrix_branch
def main():
    numpy.set_printoptions(precision=1, linewidth=284, threshold=40, edgeitems=13)

    X = []
    Y = []
    order = 2

    coeffs = raw_readings_norm_coeffs = {"temp": 100,
                                        "light": 100,
                                        "humidity" : 100, "pressure": 1e5, 
                                        "audio_p2p": 100, "motion" : 1}

    data_provider = dataprovider.DataProvider(order=order, debug=True,
                                                      start_time = 1379984887,
                                                      stop_time = 1379984887+3600*24*2,
                                                      device_list = ["17030002"],
                                                      eliminate_const_one=True, device_groupping="dict",
                                                  raw_readings_norm_coeffs = coeffs)
    f = open("2_order_knode.p", "rb")
    knode = pickle.load(f)
    f.close()
    

    for data in data_provider:
        print data
        t, d = data["17030002"]
        X.append(t)
        l = knode.label(d)[0]
        Y.append(l)


    plt.plot(X, Y, 'ro')
    plt.show()
def main(file,N,db_file):
    
    original_db = list(SeqIO.parse(db_file, 'fasta'))
    original_db_dict = defaultdict(Bio.SeqRecord.SeqRecord)
    for i in original_db:
        original_db_dict[i.id] = i
        
    
    np.set_printoptions(suppress=True)
    with open(file) as f:
        lines = f.readlines()
    lines = [l.strip().split() for l in lines if l[0] != "#"]
    mat = np.array([ [v[2],v[3],v[4],v[5],v[6],v[7],v[8],v[9],v[10],v[11]] for v in lines]  ,dtype=float)
    id = np.array([ [l[0],l[1]] for l in lines],dtype=str)
    
    gaps = xrange(100,N-1,-1)
    for i in gaps:
        hit = (mat[:,0] >= i) & (mat[:,0] < i+1) &(mat[:,1] > 370)
        id_result = id[hit,:]
        mat_result = mat[hit,:]
        np.savetxt('.'.join([file,str(i)]),np.hstack((id_result,np.asarray(mat_result,dtype='str'))),delimiter='\t',fmt='%s' )
        for result in set(id_result[:,1]):
            try:
                del original_db_dict[result]
            except:
                1
        SeqIO.write(original_db_dict.values(), "%s.%s" %(db_file,i), "fasta")
Beispiel #27
0
def generate_PSSM(seq_list, fasta_dict):
    #Populate gene dictionary and build genome
    genome = fasta_dict
    nuc_prob = gc_content(fasta_dict)

    base_dict = {"A":0, "C":1, "T":2, "G":3}
    
    #First generate a consensus matrix for the sequence, where 1st row is A counts, second row is C, third row is T, fourth row is G.
    PSSM = np.zeros([4,len(seq_list[0])])

    counter = 0
    for seq in seq_list:
        counter += 1
        for a, base in enumerate(seq):
            PSSM[base_dict[base],a] += 1

    float_formatter = lambda x: "%.1f" % x
    np.set_printoptions(formatter={'float_kind':float_formatter})
    
    a = 0
    while a < 4:
        b = 0
        while b < len(seq_list[0]):
            if PSSM[a,b] == 0: PSSM[a,b] += 1
            PSSM[a,b] = np.log2((PSSM[a,b]/float(counter))/nuc_prob[a])
            b += 1
        a += 1
    
    return PSSM
Beispiel #28
0
	def general_mix(self, mix, **kwargs):
		"""
			simple mix data test
		"""
		npr.seed(122351)
		self.mix = mix(K = self.nClass,**kwargs)
		self.mix.set_data(self.Y)
		self.mu_sample = list()
		self.sigma_sample = list()
		self.p_sample = list()
		for k in range(self.nClass):
			self.mu_sample.append(np.zeros_like(self.Thetas[k])) 
			self.sigma_sample.append(np.zeros_like(self.Sigmas[k])) 
			self.p_sample.append(np.zeros_like(self.P[k])) 
			
			
		for i in range(self.sim):  # @UnusedVariable
			self.mix.sample()
			for k in range(self.nClass):
				self.mu_sample[k] += self.mix.mu[k]
				self.sigma_sample[k] += self.mix.sigma[k]
				self.p_sample[k] += self.mix.p[k]
		np.set_printoptions(precision=2)
			
		self.compare_class("MCMC:")
Beispiel #29
0
    def fo_reverse(self, xs_bar):

        numpy.set_printoptions(precision=None, threshold=None, edgeitems=None, linewidth=200, suppress=None, nanstr=None, infstr=None, formatter=None)
        self.xs_bar = xs_bar.copy()


        self.x0_bar = numpy.zeros(self.x0.shape)
        self.f_bar  = numpy.zeros(self.f.shape)
        self.p_bar  = numpy.zeros(self.p.shape)
        self.q_bar  = numpy.zeros(self.q.shape)
        self.u_bar  = numpy.zeros(self.u.shape)

        for i in range(self.M-1)[::-1]:
            h = self.ts[i+1] - self.ts[i]
            self.update_u(i)

            self.xs_bar[i,:] += self.xs_bar[i + 1, :]

            self.f_bar[:] = h*self.xs_bar[i+1, :]
            self.backend.ffcn_bar(self.ts[i:i+1],
                              self.xs[i, :], self.xs_bar[i,:],
                              self.f, self.f_bar,
                              self.p, self.p_bar,
                              self.u, self.u_bar)

            self.xs_bar[i + 1, :] = 0
            self.update_u_bar(i)

        self.x0_bar[:] += self.xs_bar[0, :]
        self.xs_bar[0, :] = 0.
Beispiel #30
0
def get_response_content(fs):
    # make the laplacian matrix for the graph
    weight = 1 / fs.edge_length
    n = fs.nvertices
    L = np.zeros((n,n))
    # set the diagonal
    for i in range(n):
        L[i,i] = 2 * weight
    L[0,0] = weight
    L[-1,-1] = weight
    # complete the tridiagonal
    for i in range(n-1):
        L[i+1,i] = -weight
        L[i,i+1] = -weight
    # define other matrices
    L_pinv = np.linalg.pinv(L)
    HDH = -2*L_pinv
    v = np.diag(HDH)
    e = np.ones(n)
    D = HDH - (np.outer(v, e) + np.outer(e, v))/2
    # show some matrices
    out = StringIO()
    np.set_printoptions(linewidth=300)
    print >> out, 'Laplacian matrix:'
    print >> out, L
    print >> out
    print >> out, 'HDH:'
    print >> out, HDH
    print >> out
    print >> out, 'EDM:'
    print >> out, D
    print >> out
    return out.getvalue()
Beispiel #31
0
import numpy as np
import random
#random.seed(1)
from numpy.random import seed
import pandas as pd
import sys
#seed(1)
np.set_printoptions(precision=4, suppress=True)


def compute_fitness(population, DIST):
    for chromosome in population:
        total_dist = 0
        for i in range(chromosome.shape[0] - 2):
            #print(chromosome[i], " , ", chromosome[i+1], " = ", DIST[chromosome[i], chromosome[i+1]])
            total_dist += DIST[chromosome[i], chromosome[i + 1]]
        chromosome[-1] = total_dist


def print_population(pop):
    pop_tmp = pop.copy().astype(str)
    #for row in pop_tmp:
    #    for col in row:

    pop_tmp[pop_tmp == "0"] = 'A'
    pop_tmp[pop_tmp == "1"] = 'B'
    pop_tmp[pop_tmp == "2"] = 'C'
    pop_tmp[pop_tmp == "3"] = 'D'
    pop_tmp[pop_tmp == "4"] = 'E'
    pop_tmp[pop_tmp == "5"] = 'F'
Beispiel #32
0
def compare_toPineda():
    F = Fatmodel(fieldstrength_T=1.5, fieldmap_Hz=110, R2s_Hz=50)
    F.deshielding_ppm = np.array([-3.80, -3.40, -2.60, -1.94, -0.39, 0.60])
    F.relamps_percent = 100 * np.array(
        [0.087, 0.693, 0.128, 0.004, 0.039, 0.048])
    F.deshielding_ppm = np.array([-3.4])
    F.relamps_percent = np.array([100])

    F.set_params_matrix()
    F.TE_s = np.array([1.5500, 3.8200, 6.0900]) * 1e-3
    TE_s = F.TE_s
    nTE = len(TE_s)

    F.set_constraints_matrices()
    Cm, Cp, Cf, Cr = F.constraints_matrices

    Ninr = int(100)  # int(1e5)
    snr = 200
    tol = 1e-4
    itermax = 50

    N = 101

    cols = [
        'pdff', 'r2s', 'CRLBs', 'NSAs', 'mcNSAs', 'trF', 'detF', 'trFinv',
        'detFinv', 'snr', 'mean', 'var'
    ]
    df = pd.DataFrame(columns=cols)
    for i, pdff in enumerate(np.linspace(0.1, 99.9, N)):
        F.fatfraction_percent = pdff
        F.set_params_matrix()
        F.build_signal()
        pm0 = F.pm
        sig = F.signal_samp

        mean, var = mc_css_varpro(Ninr, snr, TE_s, sig, pm0, Cm, Cp, Cf, Cr,
                                  tol, itermax)

        print(i, pdff, mean)

        F.set_Fisher_matrix()
        FIM = F.Fisher_matrix

        FIMinv = np.linalg.inv(FIM)

        trF = np.trace(FIM)
        detF = np.linalg.det(FIM)
        CRLBs = np.diagonal(FIMinv)
        trFinv = np.trace(FIMinv)
        detFinv = np.linalg.det(FIMinv)

        NSAs = nTE / np.diag(FIM) / CRLBs

        mcNSAs = (1 / snr)**2 * nTE / np.diag(FIM) / var

        df = df.append(
            pd.DataFrame(data=[[
                pdff, r2s, CRLBs, NSAs, mcNSAs, trF, detF, trFinv, detFinv,
                snr, mean, var
            ]],
                         columns=cols))

    print(df)

    plt.close('all')
    for i in range(6):
        plt.figure()
        # plt.plot(df['pdff'], df['CRLBs'].str[i], 'o-')
        # plt.plot(df['pdff'], (1/snr)**2 * df['var'].str[i], 'o-')
        # plt.plot(df['pdff'], (snr/F.sigamp) * df['NSAs'].str[i], 'o-')
        plt.plot(df['pdff'], df['NSAs'].str[i], 'o-')
        plt.plot(df['pdff'], df['mcNSAs'].str[i], 'o-')
        # plt.xscale('log')
        plt.legend()

    F = Fatmodel(fieldstrength_T=1.5, fieldmap_Hz=110, R2s_Hz=50, sigamp=100)
    F.deshielding_ppm = np.array([-3.4])
    F.relamps_percent = np.array([100])
    F.set_constraints_matrices()
    # print(F.Cm)
    # F.Cm = F.Cm / np.diag(F.Cm.T.astype(bool).dot(F.Cm) / F.Cm.sum(0)).sum()
    # F.constraints_matrices[0] = F.Cm
    # F.constraints_matrices[1] = F.Cm
    print(F.constraints_matrices)
    # print(F.Cm)
    # F.Cp = F.Cm
    # F.Cp = F.Cp / np.diag(F.Cp.T.astype(bool).dot(F.Cp) / F.Cp.sum(0)).sum()
    # F.Cm /= F.Cm.shape[-1]
    # F.Cp /= F.Cp.shape[-1]
    # F.Cf /= F.Cf.shape[-1]
    # F.Cr /= F.Cr.shape[-1]
    F.TE_s = np.array([1.5500, 3.8200, 6.0900]) * 1e-3
    F.set_params_matrix()
    F.set_Fisher_matrix()
    J = css.get_Jacobian(F.TE_s, F.pm, F.Cm, F.Cp, F.Cf, F.Cr)
    # FIM = F.Fisher_matrix
    FIM = J.T.conj().dot(J).real
    crlb = np.diag(np.linalg.inv(FIM))
    np.set_printoptions(precision=2)
    print(FIM)
    print()
    print(crlb)
    print(crlb / 2)
Beispiel #33
0
import os
print("my ProcessID:", os.getpid())
print("PWD:", os.getcwd())

#import unittest

##### NOTE: ONLY run this after you config RESTORE_MODE as 1, 3, 5, or 7 and after runing `save_v2.py`

import numpy as np
np.set_printoptions(suppress=True)

import tensorflow as tf

# two initial values with double type.
num_a = np.array(
    [
        [1, 2],
        [3, 4]
    ], dtype =np.float_)
num_b = np.array(
    [
        [10.1, 20.02],
        [30.003, 40.0004] 
    ],dtype= np.float_)

# the precision requirement of saving and restoring a float number
PRECISION = 1.0/1000

xa = tf.Variable(num_a, dtype = tf.double)
xb = tf.Variable(num_b, dtype = tf.double)
Beispiel #34
0
# -*- coding: utf-8 -*-
"""
Created on Wed Jul  3 16:14:14 2019

@author: wangjinyang
"""
import pandas as pd
import numpy as np
from functools import reduce  #排列组合高阶函数
np.set_printoptions(suppress=True)  # 不以科学计数法显示
adr = r"F:\项目资料\联通日常\201907天馈寻优\Opt_data.xlsx"
# 经纬度、高程计算距离
import math
EARTH_REDIUS = 6378.137
a = 6378245
b = 6356752.3142


def rad(d):
    return d * math.pi / 180.0


class Ant:  #lng	lat	 Azimuth	Downdip_angle 	height 	Fre_DL 	power
    def __init__(self, x1, y1, azu, Downdip_angle, hb, fre, RsPow, x2, y2):
        self.RsPow = RsPow
        self.hb = hb
        self.fre = fre
        self.x1 = x1
        self.y1 = y1
        self.x2 = x2
        self.y2 = y2
@Desc       :   基于 TensorFlow 的线性回归,使用 TensorFlow 实现 多层神经网络 进行数据预测
"""
# common imports
import os
import sys

import matplotlib.pyplot as plt
import numpy as np  # pip install numpy<1.17,小于1.17就不会报错
import sklearn
import tensorflow as tf
import winsound
from tensorflow.python.framework import ops

# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision=8,
                    suppress=True,
                    threshold=np.inf,
                    linewidth=200)
# 利用随机种子,保证随机数据的稳定性,使得每次随机测试的结果一样
seed = 42
tf.set_random_seed(seed)
np.random.seed(seed)

# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
assert sklearn.__version__ >= "0.20"
# numpy 1.16.4 is required
assert np.__version__ in ["1.16.5", "1.16.4"]
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 初始化默认的计算图
Beispiel #36
0
import sys
import numpy as np
import dataloader
from torchsummary import summary
import cv2
import torch
from render import renderKernels, render3dBarCharts

# Numpy pretty print
np.set_printoptions(precision=4)

### Load model
# modelDir = None
# if modelDir == None:
# 	if len(sys.argv) == 1:
# 		model, date, NCLASSES, NFILES, NBATCHES, NLAYERS, NCHANNELS, IMAGE_SIZE, CLASSES, MODELDIR, INDICES_TRAIN, INDICES_TEST = dataloader.loadLatestModel()
# 	else:
# 		model, date, NCLASSES, NFILES, NBATCHES, NLAYERS, NCHANNELS, IMAGE_SIZE, CLASSES, MODELDIR, INDICES_TRAIN, INDICES_TEST = dataloader.loadModelFromDir(sys.argv[1])
# else:
# 	model, date, NCLASSES, NFILES, NBATCHES, NLAYERS, NCHANNELS, IMAGE_SIZE, CLASSES, MODELDIR, INDICES_TRAIN, INDICES_TEST = dataloader.loadModelFromDir(modelDir)

model, date, NCLASSES, NFILES, NBATCHES, NLAYERS, NCHANNELS, IMAGE_SIZE, CLASSES, MODELDIR, INDICES_TRAIN, INDICES_TEST = dataloader.loadLatestModel(
)

# Set model to evaluation mode (disables dropout layers)
model.eval()
summary(model, (1, IMAGE_SIZE, IMAGE_SIZE), 1, "cpu")

# Loading all data
data, iClasses, classToLabel = dataloader.loadAndPrepAllImages(
    nFiles=NFILES, imsize=IMAGE_SIZE, classes=CLASSES)
Beispiel #37
0
    def writeRestart(self, ivars=None, suffix=None):
        """
        -writeRestart-
        Description - Main driver to write the entire pyrandaSim state
        in parallel for later use at restart.
        """

        # Use cycle number if no suffix is given
        if not suffix:
            suffix = '_' + str(self.cycle).zfill(6)

        # Prep directory
        dumpDir = os.path.join(self.PyIO.rootname, "restart" + suffix)
        if self.PyMPI.master == 1:
            try:
                os.mkdir(dumpDir)
            except:
                pass

        self.PyMPI.comm.Barrier()

        ## Persistent data ##
        serial_data = {}

        supported_types = []
        supported_types.append(type(''))
        supported_types.append(type(0))
        supported_types.append(type(1.0))
        supported_types.append(type([]))
        supported_types.append(type({}))
        supported_types.append(type(None))
        supported_types.append(type(numpy.float64))
        supported_types.append(type(True))

        # Check if ivars dictionary is passed
        if ivars:
            # Sanitize "ivars" (dont allow any
            serial_data["local_vars"] = {}
            serial_data['local_vars']['numpyArrays'] = {}

            # For ivars that are numpy arrays, save them individually
            for vv in ivars:
                itype = type(ivars[vv])

                isType = False
                for typ in supported_types:
                    isType = isType or (typ == itype)

                if itype == type(numpy.ones(1)):
                    filename = os.path.join(dumpDir, "%s" % vv)
                    serial_data['local_vars']['numpyArrays'][vv] = filename
                    if self.PyMPI.master:
                        numpy.save(filename, ivars[vv])
                elif isType:
                    serial_data["local_vars"][vv] = ivars[vv]
                #else:
                #    self.iprint("Warning: The variable %s is type %s is not supported by restarts." % (vv,itype))

        # Original domain-decomp
        serial_data['mesh'] = self.meshOptions.copy()
        serial_data['EOM'] = self.eom
        serial_data['ICs'] = self.ics
        serial_data['decomp'] = [self.PyMPI.px, self.PyMPI.py, self.PyMPI.pz]
        serial_data['procMap'] = self.PyMPI.procMap
        serial_data['packages'] = self.packagesRestart
        serial_data['time'] = self.time
        serial_data['deltat'] = self.deltat
        serial_data['cycle'] = self.cycle

        # Serialize the mesh function (if it exists)
        if 'function' in serial_data['mesh']:
            if serial_data['mesh']['function']:
                serial_data['mesh']['function'] = None
        #        import pdb
        #        pdb.set_trace()
        #        serial_data['mesh']['function'] = inspect.getsource(
        #            self.meshOptions['function'] )
        #        serial_data['mesh']['function-name'] = self.meshOptions['function'].__name__

        # Variable map
        serial_data['vars'] = {}
        cnt = 0
        for ivar in self.variables:
            serial_data['vars'][ivar] = cnt
            cnt += 1

        PO = numpy.set_printoptions()
        numpy.set_printoptions(threshold=sys.maxsize)
        numpy.set_printoptions(linewidth=sys.maxsize)
        if self.PyMPI.master == 1:
            fid = open(os.path.join(dumpDir, 'serial.dat'), 'w')
            fid.write(str(serial_data))
        numpy.set_printoptions(PO)

        # Parallel
        # Variables
        DATA = self.PyMPI.emptyVector(len(self.variables))
        for ivar in self.variables:
            DATA[:, :, :,
                 serial_data['vars'][ivar]] = self.variables[ivar].data
        # Grid
        #DATA[:,:,:,serial_data['vars']['meshx']] = self.mesh.coords[0].data
        #DATA[:,:,:,serial_data['vars']['meshy']] = self.mesh.coords[1].data
        #DATA[:,:,:,serial_data['vars']['meshz']] = self.mesh.coords[2].data

        # Write this big thing
        rank = self.PyMPI.comm.rank
        procFile = open(
            os.path.join(dumpDir, 'proc-%s.bin' % str(rank).zfill(5)), 'w')
        DATA.tofile(procFile)
        procFile.close()
Beispiel #38
0
import torchvision.transforms as transforms
import traceback
import torch.nn as nn
import torch
import time
import random
import skimage
import unet
from abc import ABCMeta, abstractmethod
import imageio
import cv2
from skimage.transform import resize
import matplotlib
import sys
matplotlib.use('agg')
np.set_printoptions(threshold=sys.maxsize)


class Dataset(torch.utils.data.Dataset):
    def __init__(self,
                 data_dict,
                 transform=None,
                 normaliser=2**8 - 1,
                 is_valid=False,
                 is_inference=False):
        """Initialisation for the Dataset object

        :param data_dict: dictionary of dictionaries containing images
        :param transform: PyTorch image transformations to apply to the images
        :returns: N/A
        :rtype: N/A
Beispiel #39
0
Einit[0] = 0
Einit[-1] = 0

Hinit = h.initial(xx)
#Hinit = np.zeros(N)
Hinit[0] = 0
Hinit[-1] = 0

H00 = h.D0(xx, L)

a1 = np.zeros(N-1)

initial = np.concatenate((Hinit, Einit))
A = np.diag(a1+(1.0/2), k=1) + np.diag(a1-(1.0/2), k=-1)

np.set_printoptions(precision=3,threshold=10)
#print(A)
Hm = np.copy(A)

Hm[0,:] = 0.0
Hm[-1,:] = 0.0

Hm[1, 0 ] = 0.
Hm[1, 1] = -2.0/3
Hm[1, 2] = 2.0/3

Hm[-2, -1] = 0.
Hm[-2, -2] = 2.0/3
Hm[-2, -3] = -2.0/3
print(Hm)
vals, vects = np.linalg.eig(Hm)
Beispiel #40
0
import numpy as np
from joblib import dump, load
import array
from decimal import Decimal

app = Flask(__name__)

clf = load('heart.joblib')
clf_250 = load('heart_250.joblib')
clf_200 = load('heart_200.joblib')
clf_100 = load('heart_100.joblib')

medic = pd.read_csv('data.csv')

np.set_printoptions(suppress=True,
                    infstr='inf',
                    formatter={'complex_kind': '{:.10f}'.format})


@app.route('/', methods=['GET', 'POST'])
def index():
    y_250 = medic.iloc[:250, 10].values.astype('int32')
    x_250 = (medic.iloc[:250, 0:10].values).astype('int32')
    (xTrain250, xTest250, yTrain250,
     yTest250) = train_test_split(x_250,
                                  y_250,
                                  test_size=0.25,
                                  random_state=42)
    accuracy_250 = '{:.2f}'.format(
        float(accuracy_score(yTest250, clf_250.predict(xTest250))))
    accuracy_250 = float(accuracy_250) * 100
# 00 Initialization
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

desired_width = 320
pd.set_option('display.width', desired_width)
np.set_printoptions(linewidth=desired_width,suppress=True,formatter={'float_kind':'{:f}'.format})



# 01 Set paths
WorkingDirectory = os.getcwd()
DataFolder = os.path.join(WorkingDirectory,'02_Data')
MatchingFolder = os.path.join(WorkingDirectory,'04_Results/02_Individuals_Matching')
MatchesFolder = os.path.join(MatchingFolder,'Matches')
ResultsFolder = os.path.join(MatchingFolder,'Matching_Assessment')


# 02 Load Data
Matched_OI = pd.read_csv(os.path.join(MatchingFolder,'Matched_OI.csv'))
Matched_Healthy = pd.read_csv(os.path.join(MatchingFolder,'Matched_Healthy.csv'))

ScanLists = [File for File in os.listdir(DataFolder) if File.endswith('Scans.csv')]
ScanLists.sort()
HealthyData = pd.read_csv(os.path.join(DataFolder,ScanLists[0]))
OIData = pd.read_csv(os.path.join(DataFolder,ScanLists[1]))

## Adapt labeling and values type
OIData = OIData.dropna()     # Filter patients without data
Beispiel #42
0
import sys, os, pickle
import tarfile, io
import gzip
from pathlib import Path
import itertools
import argparse
import multiprocessing as mp
import matplotlib.pyplot as plt
# atomium is optional, but all other packages should be installed
try:
    import atomium
    has_atomium = True
except ImportError:
    has_atomium = False

np.set_printoptions(threshold=np.inf, linewidth=np.inf)

# term_width = os.get_terminal_size().columns
logo = []
for i, line in enumerate(open(__file__, 'r')):
    if 2 <= i <= 8:
        logo.append((line[2:-2]))
helptext = []
for i, line in enumerate(open(__file__, 'r')):
    if (i > 0 and line.startswith('#')):
        helptext.append((line[1:].rstrip()))
    elif not line.startswith('#'):
        break

### ARGPARSE ROUTINE
Beispiel #43
0
                    T = np.dot(GT[jx], inv(prev_GT[jx]))
                    coord.transform(T)
                line_set.points = V3dV(GT[:,:3,3])

                prev_GT = GT.copy()

                vis.update_geometry()

                vis.poll_events()
                vis.update_renderer()
                time.sleep(0.01)
                vis.run()

if __name__ == '__main__':

    np.set_printoptions(suppress=True, precision=4)


    usc = UnityAvatarSkeleton()
    # usc.visualize_transforms()
    test_visualizer(usc)
    # run_unit_test(usc)

    # usc.visualize_transforms()
    # randomly rotate
    # local_rotations = usc.get_random_rotations()
    # usc.set_node_local_poses(local_rotations)
    # GTX = usc.get_node_transforms()
    # positionsX = GTX[:,:3,3]

    # usc.set_node_local_poses(usc.local_rotations)
rf_enc.fit(rf.apply(X_train))
# 使用OneHot编码作为特征,训练LR
rf_lm = LogisticRegression(solver='lbfgs', max_iter=1000)
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
# 使用LR进行预测
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)

# 基于GBDT监督变换
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd.fit(X_train, y_train)
# 得到OneHot编码
grd_enc = OneHotEncoder(categories='auto')

temp = grd.apply(X_train)
np.set_printoptions(threshold=np.inf)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
#print(grd_enc.get_feature_names()) # 查看每一列对应的特征
# 使用OneHot编码作为特征,训练LR
grd_lm = LogisticRegression(solver='lbfgs', max_iter=1000)
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
# 使用LR进行预测
y_pred_grd_lm = grd_lm.predict_proba(
    grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)

# 直接使用GBDT进行预测
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)

# 直接使用RF进行预测
from UDM import UDM

import pandas as pd
import numpy as np
np.set_printoptions(precision=5, suppress=True, edgeitems=30, linewidth=100000)   
from joblib import dump
import matplotlib.pyplot as plt

# df = pd.read_csv("Tom/table3.csv")
# ordinal = ["O" in c for c in df.columns]
# X = df.values

df = pd.read_csv("Tom/adult.csv", index_col=0)
ordinal = [True, False, True, False, False, False, True]
X = df.values[:,:-1] # Ignore compensation column.

udm = UDM(X, ordinal) 
dump(udm, "Tom/adult_udm.joblib")

# print(udm.R)
# print(udm.phi[0])
# plt.imshow(udm.phi[0])
# plt.show()

# mask = np.ones((X.shape[0], X.shape[0]), dtype=bool)
# mask[2,0] = 0
# mask[0,2] = 0
# mask[1,3] = 0

# print(udm(X, mask=mask, placeholder=np.inf))
def get_layer_swap_acc():
    num_val = 5
    out_dim = IN_dat.num_classes
    batch_size = num_val * IN_dat.num_classes / 5
    np.set_printoptions(precision=6, suppress=True)

    print "----------------imagenet---------------------------"

    inp_file = h5py.File('imagenet_dcranksubset_ori.h5', 'r')
    yval = inp_file['img_label'][:]
    inp_file.close()

    for layer_id in range(0, len(alexnet.layer_names), 1):

        out_file2 = h5py.File('alexnet_imagenet_layer_swap_acc.h5', 'a')

        model100_l1, layer_dict100_l1 = alexnet.AlexNetDNN_layers(
            '../Imagenet_models/alexnet_imagenet_weights.npy',
            layer_id=layer_id,
            trainable=False,
            out_dim=IN_dat.num_classes)
        model100_l1.compile(optimizer=SGD(), loss='categorical_crossentropy')

        print '\n compiled model successfully '
        print '\n layer name : ', alexnet.layer_names[layer_id]
        acc_dist = np.empty(
            (2, alexnet.layer_size101[layer_id, 0], IN_dat.num_dist),
            np.float32)
        acc_blur = np.empty(
            (2, alexnet.layer_size101[layer_id, 0], IN_dat.num_dist),
            np.float32)

        for iter_id in range(IN_dat.num_dist):

            vgg_ori = h5py.File('alexnet_imagenet_layer_outputs.h5', 'r')
            vgg_dist = h5py.File(
                'alexnet_imagenet_layer_outputs_awgn_' + str(iter_id) + '.h5',
                'r')
            vgg_blur = h5py.File(
                'alexnet_imagenet_layer_outputs_blur_' + str(iter_id) + '.h5',
                'r')

            y_pred_dist = np.empty((alexnet.layer_size101[layer_id, 0],
                                    num_val * IN_dat.num_classes, out_dim),
                                   np.float32)
            y_pred_blur = np.empty((alexnet.layer_size101[layer_id, 0],
                                    num_val * IN_dat.num_classes, out_dim),
                                   np.float32)

            for batch_id in range(0,
                                  (num_val * IN_dat.num_classes) / batch_size):
                print '\n Processing batch : ' + str(batch_id + 1)
                t1 = time.time()

                vgg_ori_batch1 = vgg_ori[alexnet.layer_names[layer_id]][
                    batch_size * batch_id:(batch_id + 1) *
                    batch_size, :, :, :].copy()

                vgg_dist_batch = vgg_dist[alexnet.layer_names[layer_id]][
                    batch_size * batch_id:(batch_id + 1) *
                    batch_size, :, :, :].copy()

                vgg_blur_batch = vgg_blur[alexnet.layer_names[layer_id]][
                    batch_size * batch_id:(batch_id + 1) *
                    batch_size, :, :, :].copy()

                t2 = time.time()
                print '\n time required for loading file ' + str(t2 - t1)

                for filter_id in range(alexnet.layer_size101[layer_id, 0]):

                    vgg_dist_t = vgg_dist_batch.copy()
                    vgg_blur_t = vgg_blur_batch.copy()

                    t3 = time.time()

                    vgg_dist_t[:,
                               filter_id, :, :] = vgg_ori_batch1[:,
                                                                 filter_id, :, :].copy(
                                                                 )

                    y_pred_dist[
                        filter_id, batch_id * batch_size:(batch_id + 1) *
                        batch_size, :] = model100_l1.predict(vgg_dist_t)
                    # del vgg_ori_batch2

                    vgg_blur_t[:,
                               filter_id, :, :] = vgg_ori_batch1[:,
                                                                 filter_id, :, :].copy(
                                                                 )

                    y_pred_blur[
                        filter_id, batch_id * batch_size:(batch_id + 1) *
                        batch_size, :] = model100_l1.predict(vgg_blur_t)

                    t4 = time.time()
                    # print '\n finished prediction '
                    # print '\n time required for predicting outputs ' + str(t4 - t3)
                    del vgg_dist_t, vgg_blur_t

                del vgg_ori_batch1, vgg_dist_batch, vgg_blur_batch

            vgg_ori.close()
            vgg_dist.close()
            vgg_blur.close()

            for filter_id in range(alexnet.layer_size101[layer_id, 0]):
                print " AWGN level : " + str(iter_id) + " , layer : " + str(
                    alexnet.layer_names[layer_id]) + ", filter : " + str(
                        filter_id)
                acc_dist[0, filter_id, iter_id], acc_dist[
                    1, filter_id, iter_id] = base_acc.compute_test_accuracy(
                        y_pred_dist[filter_id, :, :], yval)
                print " Blur level : " + str(iter_id) + " , layer : " + str(
                    alexnet.layer_names[layer_id]) + ", filter : " + str(
                        filter_id)
                acc_blur[0, filter_id, iter_id], acc_blur[
                    1, filter_id, iter_id] = base_acc.compute_test_accuracy(
                        y_pred_blur[filter_id, :, :], yval)

            del y_pred_dist, y_pred_blur

        out_file2.create_dataset("imagenet_awgn/" +
                                 str(alexnet.layer_names[layer_id]),
                                 data=acc_dist)
        out_file2.create_dataset("imagenet_blur/" +
                                 str(alexnet.layer_names[layer_id]),
                                 data=acc_blur)

        out_file2.close()
Beispiel #47
0
import sys
import numpy
import logging
import sklearn.cluster
import matplotlib.pyplot as plt
from exp.sandbox.ClusterBound import ClusterBound
from apgl.data.Standardiser import Standardiser
from mpl_toolkits.mplot3d import Axes3D
from sklearn import metrics

numpy.random.seed(21)
numpy.set_printoptions(suppress=True, precision=3)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)

numExamples = 100
numFeatures = 3
std = 0.1

V = numpy.random.rand(numExamples, numFeatures)
V[0:20, :] = numpy.random.randn(20, numFeatures) * std
V[0:20, 0:3] += numpy.array([1, 0.2, -1])

V[20:70, :] = numpy.random.randn(50, numFeatures) * std
V[20:70, 0:3] += numpy.array([-0.5, 1, -1])

V[70:, :] = numpy.random.randn(30, numFeatures) * std
V[70:, 0:3] += numpy.array([-0.3, 0.4, -0.1])

U = V - numpy.mean(V, 0)
U = Standardiser().normaliseArray(U.T).T
Beispiel #48
0
def shiftOrigin(Rt, T, point, offset=None):
    '''
    Shift origin using rotation and translation vector generated based on calibration target
    earlier.
    Works on a single 3D point.
    '''
    npt = np.dot(Rt, point.transpose()) + T.transpose()
    if offset is not None:
        npt = offset + npt # Account for offset
    if (ydir == -1.):
        npt[1] *= ydir ## Make this Y-up coordinate system
    return npt

if __name__ == '__main__':
    fmt = lambda x: "%12.6f" % x
    np.set_printoptions(formatter={'float_kind':fmt})
    print("\n-------------------------------------------------------------------")
    print("All results are in mm scale")
    print("All manual measurements are from top left cage corner")
    print("-------------------------------------------------------------------\n")
    intrinsics = "data/calib/intrinsics.yml"
    extrinsics = "data/calib/extrinsics.yml"

    posefile = 'data/calib/pose_Z5300_1.yml'  # Chessboard closer to left cage
    posefile = 'data/calib/pose_Z5300_2.yml' # Chessboard in the center of cage
    ## Instantiate depthFinder; This will carry out all the required one-time setup including rectification
    df = depthFinder(intrinsics, extrinsics, shift=False, offset=np.float32([0,0,0]), posefile=posefile) ## For now, don't shift origin -- we are testing it
    fsi = cv.FileStorage(posefile, cv.FILE_STORAGE_READ)
    if not fsi.isOpened():
        print("Could not open file {} for reading calibration data".format(posefile))
        sys.exit()
Beispiel #49
0
#  - Allow user to specify file name (control with command-line arg).
#
#Graph Output:
#  - Allow setting of colors.
#  - Proper scaling of axes.
#  - Fix overlapping text due to default Python display of axes tick labels.

import sys
import numpy as np
import math
import time
import matplotlib.pyplot as plt
import scipy.stats as spStats
import scipy.special as spSpecial

np.set_printoptions(threshold=sys.maxsize)  #for debugging purposes


def find_nearest(array, value):
    idx = (np.abs(array - value)).argmin()
    return idx


##----------------------------------------------------------------------------##

##----- USER-MODIFIED VARIABLES -----##

d_DIAM_min = 0.03125  #what is the minimum CEIL(diameter) bin - note that this will be smaller in the end due to bin centering
d_apriori_uncertainty = 0.1  #a priori uncertainty on diameter as a multiple of the diameter
d_apriori_Nvariation = 0.0  #a priori variation in number of craters found as a strict fraction of the total (e.g., "0.3" = +/-30%)
d_descretization = 0.01  #fidelity of the descritization in km (or whatever unit the crater diameters are in)
Beispiel #50
0
#to get positions where elements of two arrays match
import numpy as np
a = np.array([1, 2, 3, 2, 3, 4, 3, 4, 5, 6])
b = np.array([7, 2, 10, 2, 7, 4, 9, 4, 9, 8])
array = np.where(a == b)
print(array)

#2d array containing random floats b/w 5 and 10
import numpy as np
arr = np.arange(9).reshape(3, 3)
rand_array = np.random.uniform(5, 10, size=(5, 3))
print("Output :", rand_array)

#limit the items to 6
import numpy as np
array = np.set_printoptions(threshold=6)
arr = np.arange(15)
print("output:", arr)

#print a numpy arrayby supressing sceintific notation
import numpy as np
np.set_printoptions(suppress=False)
np.random.seed(100)
rand_arr = np.random.random([3, 3]) / 1e3
np.set_printoptions(suppress=True, precision=6)
print(rand_arr)

#swap two coloumns in a 2d numpy array
import numpy as np
arr = np.arange(9).reshape(3, 3)
print(arr[:, [1, 0, 2]])
from abc import ABC, abstractmethod
import numpy as np
from classireg.models.gp_mean import GPmean
from botorch.optim import optimize_acqf
from botorch.gen import gen_candidates_scipy, gen_candidates_torch, get_best_candidates
from botorch.optim.initializers import gen_batch_initial_conditions
from classireg.utils.plotting_collection import PlotProbability
from botorch.acquisition.objective import ConstrainedMCObjective, ScalarizedObjective, AcquisitionObjective
from torch.distributions.normal import Normal
import matplotlib.pyplot as plt
from typing import List
import pdb
from botorch.models import FixedNoiseGP, ModelListGP
from classireg.models.gp_mean_cons import GPmeanConstrained
dist_standnormal = Normal(loc=0.0,scale=1.0)
np.set_printoptions(linewidth=10000)
from classireg.utils.parsing import get_logger
logger = get_logger(__name__)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dtype = torch.float32
idxm = dict(obj=0,cons=1)

def obj_callable(Z):
  return Z[..., 0]

def constraint_callable(Z):
  return 0.0 + Z[..., 1] 	# Z[...,1] represents g(x), with g(x) <= 0 meaning constraint satisfaction.
  												# If we need g(x) >= a, we must return a - Z[..., 1]

# define a feasibility-weighted objective for optimization
constrained_obj = ConstrainedMCObjective(
nr_epochs = 100
model_2.fit(x_train_small, y_train_small,batch_size=samples_per_batch,epochs=nr_epochs,
            callbacks=[get_tensorboard('Model 2 XL')], verbose=0, validation_data=(x_val,y_val))

samples_per_batch = 40000
nr_epochs = 100
model_3.fit(x_train_small, y_train_small,batch_size=samples_per_batch,epochs=nr_epochs,
            callbacks=[get_tensorboard('Model 3 XL')], verbose=0, validation_data=(x_val,y_val))


#Prediction on Individual IMAGES
display(x_val[0].shape) # only 1 dim. here
test = np.expand_dims(x_val[0], axis=0)   #expanding x_val dimension to 2 dimensions beacuse prediction in this case needs more than 1 dim.
display(test.shape) # 2 dims.
model_2.predict(test)  # nice but we want look it better, less decimals..
np.set_printoptions(precision=3)  #makes precision for decimals to 3.
model_2.predict(x_val)
model_2.predict_classes(test)

for iks in range(10):
    test_img = np.expand_dims(x_val[iks], axis=0)
    predicted_val = model_2.predict_classes(test_img)[0]
    print(f'actual value = {y_val[iks][0]} vs predicted = {predicted_val}')


# EVALUATION
test_loss, test_accuracy = model_2.evaluate(x_test,y_test)
print(f'Test loss is {test_loss:0.3} and test accuracy is {test_accuracy:0.1%}') # show loss with 3 number precision and test iwth percentage value and 1  number after dot.

#Confusion matrix
predictions = model_2.predict_classes(test)
Beispiel #53
0
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
from tqdm import tqdm

from . import torch_utils  # , google_utils

matplotlib.rc('font', **{'size': 11})

# Set printoptions
torch.set_printoptions(linewidth=1320, precision=5, profile='long')
np.set_printoptions(linewidth=320,
                    formatter={'float_kind': '{:11.5g}'.format
                               })  # format short g, %precision=5

# Prevent OpenCV from multithreading (to use PyTorch DataLoader)
cv2.setNumThreads(0)


def floatn(x, n=3):  # format floats to n decimals
    return float(format(x, '.%gf' % n))


def init_seeds(seed=0):
    random.seed(seed)
    np.random.seed(seed)
    torch_utils.init_seeds(seed=seed)
def test_np_threshold(kernel):
    """Test that setting Numpy threshold doesn't make the Variable Explorer slow."""

    cmd = "from spyder_kernels.console import start; start.main()"

    with setup_kernel(cmd) as client:

        # Set Numpy threshold, suppress and formatter
        client.execute("""
import numpy as np;
np.set_printoptions(
    threshold=np.inf,
    suppress=True,
    formatter={'float_kind':'{:0.2f}'.format})
    """)
        client.get_shell_msg(timeout=TIMEOUT)

        # Create a big Numpy array and an array to check decimal format
        client.execute("""
x = np.random.rand(75000,5);
a = np.array([123412341234.123412341234])
""")
        client.get_shell_msg(timeout=TIMEOUT)

        # Assert that NumPy threshold, suppress and formatter
        # are the same as the ones set by the user
        client.execute("""
t = np.get_printoptions()['threshold'];
s = np.get_printoptions()['suppress'];
f = np.get_printoptions()['formatter']
""")
        client.get_shell_msg(timeout=TIMEOUT)

        # Check correct decimal format
        client.inspect('a')
        msg = client.get_shell_msg(timeout=TIMEOUT)
        while "data" not in msg['content']:
            msg = client.get_shell_msg(timeout=TIMEOUT)
        content = msg['content']['data']['text/plain']
        assert "123412341234.12" in content

        # Check threshold value
        client.inspect('t')
        msg = client.get_shell_msg(timeout=TIMEOUT)
        while "data" not in msg['content']:
            msg = client.get_shell_msg(timeout=TIMEOUT)
        content = msg['content']['data']['text/plain']
        assert "inf" in content

        # Check suppress value
        client.inspect('s')
        msg = client.get_shell_msg(timeout=TIMEOUT)
        while "data" not in msg['content']:
            msg = client.get_shell_msg(timeout=TIMEOUT)
        content = msg['content']['data']['text/plain']
        assert "True" in content

        # Check formatter
        client.inspect('f')
        msg = client.get_shell_msg(timeout=TIMEOUT)
        while "data" not in msg['content']:
            msg = client.get_shell_msg(timeout=TIMEOUT)
        content = msg['content']['data']['text/plain']
        assert "{'float_kind': <built-in method format of str object" in content
import pandas as pd
import numpy as np
import text_normalizer as tn
import model_evaluation_utils as meu

np.set_printoptions(precision=2, linewidth=80)
dataset = pd.read_csv(r'movie_reviews.csv')

reviews = np.array(dataset['review'])
sentiments = np.array(dataset['sentiment'])

# extract data for model evaluation
test_reviews = reviews[35000:]
test_sentiments = sentiments[35000:]
sample_review_ids = [7626, 3533, 13010]

# normalize dataset
norm_test_reviews = tn.normalize_corpus(test_reviews)

# # Sentiment Analysis with AFINN
from afinn import Afinn
afn = Afinn(emoticons=True)

## Predict sentiment for sample reviews
for review, sentiment in zip(test_reviews[sample_review_ids], test_sentiments[sample_review_ids]):
    print('REVIEW:', review)
    print('Actual Sentiment:', sentiment)
    print('Predicted Sentiment polarity:', afn.score(review))
    print('-'*60)
Beispiel #56
0
    def evaluate(self, opt, videofile):

        self.__S__.eval()

        # ========== ==========
        # Load video
        # ========== ==========
        cap = cv2.VideoCapture(videofile)

        frame_num = 1
        images = []
        while frame_num:
            frame_num += 1
            ret, image = cap.read()
            if ret == 0:
                break

            images.append(image)

        im = numpy.stack(images, axis=3)
        im = numpy.expand_dims(im, axis=0)
        im = numpy.transpose(im, (0, 3, 4, 1, 2))

        imtv = torch.autograd.Variable(
            torch.from_numpy(im.astype(float)).float())

        # ========== ==========
        # Load audio
        # ========== ==========

        audiotmp = os.path.join(opt.tmp_dir, 'audio.wav')

        command = (
            "ffmpeg -y -i %s -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 %s"
            % (videofile, audiotmp))
        output = subprocess.call(command, shell=True, stdout=None)

        sample_rate, audio = wavfile.read(audiotmp)
        mfcc = zip(*python_speech_features.mfcc(audio, sample_rate))
        mfcc = numpy.stack([numpy.array(i) for i in mfcc])

        cc = numpy.expand_dims(numpy.expand_dims(mfcc, axis=0), axis=0)
        cct = torch.autograd.Variable(
            torch.from_numpy(cc.astype(float)).float())

        # ========== ==========
        # Check audio and video input length
        # ========== ==========

        if (float(len(audio)) / 16000) < (float(len(images)) / 25):
            print(
                " *** WARNING: The audio (%.4fs) is shorter than the video (%.4fs). Type 'cont' to continue. *** "
                % (float(len(audio)) / 16000, float(len(images)) / 25))
            pdb.set_trace()

        # ========== ==========
        # Generate video and audio feats
        # ========== ==========

        lastframe = len(images) - 6
        im_feat = []
        cc_feat = []

        tS = time.time()
        for i in range(0, lastframe, opt.batch_size):

            im_batch = [
                imtv[:, :, vframe:vframe + 5, :, :]
                for vframe in range(i, min(lastframe, i + opt.batch_size))
            ]
            im_in = torch.cat(im_batch, 0)
            im_out = self.__S__.forward_lip(im_in.cuda())
            im_feat.append(im_out.data.cpu())

            cc_batch = [
                cct[:, :, :, vframe * 4:vframe * 4 + 20]
                for vframe in range(i, min(lastframe, i + opt.batch_size))
            ]
            cc_in = torch.cat(cc_batch, 0)
            cc_out = self.__S__.forward_aud(cc_in.cuda())
            cc_feat.append(cc_out.data.cpu())

        im_feat = torch.cat(im_feat, 0)
        cc_feat = torch.cat(cc_feat, 0)

        # ========== ==========
        # Compute offset
        # ========== ==========

        print('Compute time %.3f sec.' % (time.time() - tS))

        dists = calc_pdist(im_feat, cc_feat, vshift=opt.vshift)
        mdist = torch.mean(torch.stack(dists, 1), 1)

        minval, minidx = torch.min(mdist, 0)

        offset = opt.vshift - minidx
        conf = torch.median(mdist) - minval

        fdist = numpy.stack([dist[minidx].numpy() for dist in dists])
        # fdist   = numpy.pad(fdist, (3,3), 'constant', constant_values=15)
        fconf = torch.median(mdist).numpy() - fdist
        fconfm = signal.medfilt(fconf, kernel_size=9)

        numpy.set_printoptions(formatter={'float': '{: 0.3f}'.format})
        print('Framewise conf: ')
        print(fconfm)
        print('AV offset: \t%d \nMin dist: \t%.3f\nConfidence: \t%.3f' %
              (offset, minval, conf))

        dists_npy = numpy.array([dist.numpy() for dist in dists])
        return offset.numpy(), conf.numpy(), dists_npy
    m = 0
    cm = np.array([0, 0, 0])
    it = tensor(*np.array([0, 0, 0, 0, 0, 0]))
    table = load_workbook(filename)
    sheet = table[sheetname]
    for idx_r in range(15, max_row):
        item = []
        for idx_c in range(2, 13):
            item.append(sheet.cell(row=idx_r + 1, column=idx_c + 1).value)
        m_new = item[0]
        cm_new = np.array(item[1:4]) * 1e-3
        it_new = tensor(*(np.array(item[4:-1]) * 1e-6))
        m, cm, it = mass_combine(m, m_new, cm, cm_new, it, it_new)
    it = tear_tensor(it)
    return m, cm, it


filename = 'C:/Users/pei.sun/Desktop/InWork_KR_Cybertech_gear_replacement_mass_parameters_V02.xlsx'
sheetname = ['Baseframe', 'Rotation_Column', 'Linkarm_short', 'Linkarm_long']
n_row = [23, 23, 23, 23]

for idx in range(4):
    m, cm, it = masspara_combine(filename, sheetname[idx], n_row[idx])
    np.set_printoptions(precision=2)
    print('{}:\nmass: {:.2f}\ncm: {}'.format(sheetname[idx], m, cm * 1e3))
    np.set_printoptions(precision=6)
    print('IT: {}'.format(it))



Beispiel #58
0
import os
import collections
import nltk
import re
import pickle
import numpy as np
import pandas as pd
np.set_printoptions(threshold=np.nan)

'''
reviews = "C:\\Users\\donkey\\Desktop\\machine learning\\Data\\Sentiment\\reviews.txt"
def review_sentiment(review_path):
    with open(review_path, "r") as file:
        x = []; y = []
        text = list(file)
        for i in text:
            try:
                label_raw = re.findall("\t([0|1])", i)[0]
            except:
                print("error in: " + i)
            if label_raw == "1":
                y.append([1,0])
            elif label_raw == "0":
                y.append([0,1])                
            text_raw = re.sub("\t([0|1])\n", "", i)
            x.append(text_raw)
        return({"x":x, "y":y})
    
twitter = "C:\\Users\\donkey\\Desktop\\machine learning\\Data\\Sentiment\\twitter.txt"
def twitter_sentiment(twitter_path):
    with open(twitter_path, "r", encoding = "utf-8") as file:
Beispiel #59
0
    def __init__(self,options,tgt_opts,forcefield):
        # Initialize base class
        super(Liquid,self).__init__(options,tgt_opts,forcefield)
        # Fractional weight of the density
        self.set_option(tgt_opts,'w_rho',forceprint=True)
        # Fractional weight of the enthalpy of vaporization
        self.set_option(tgt_opts,'w_hvap',forceprint=True)
        # Fractional weight of the thermal expansion coefficient
        self.set_option(tgt_opts,'w_alpha',forceprint=True)
        # Fractional weight of the isothermal compressibility
        self.set_option(tgt_opts,'w_kappa',forceprint=True)
        # Fractional weight of the isobaric heat capacity
        self.set_option(tgt_opts,'w_cp',forceprint=True)
        # Fractional weight of the dielectric constant
        self.set_option(tgt_opts,'w_eps0',forceprint=True)
        # Optionally pause on the zeroth step
        self.set_option(tgt_opts,'manual')
        # Don't target the average enthalpy of vaporization and allow it to freely float (experimental)
        self.set_option(tgt_opts,'hvap_subaverage')
        # Number of time steps in the liquid "equilibration" run
        self.set_option(tgt_opts,'liquid_eq_steps',forceprint=True)
        # Number of time steps in the liquid "production" run
        self.set_option(tgt_opts,'liquid_md_steps',forceprint=True)
        # Number of time steps in the gas "equilibration" run
        self.set_option(tgt_opts,'gas_eq_steps',forceprint=True)
        # Number of time steps in the gas "production" run
        self.set_option(tgt_opts,'gas_md_steps',forceprint=True)
        # Time step length (in fs) for the liquid production run
        self.set_option(tgt_opts,'liquid_timestep',forceprint=True)
        # Time interval (in ps) for writing coordinates
        self.set_option(tgt_opts,'liquid_interval',forceprint=True)
        # Time step length (in fs) for the gas production run
        self.set_option(tgt_opts,'gas_timestep',forceprint=True)
        # Time interval (in ps) for writing coordinates
        self.set_option(tgt_opts,'gas_interval',forceprint=True)
        # Adjust simulation length in response to simulation uncertainty
        self.set_option(tgt_opts,'adapt_errors',forceprint=True)
        # Minimize the energy prior to running any dynamics
        self.set_option(tgt_opts,'minimize_energy',forceprint=True)
        # Isolated dipole (debye) for analytic self-polarization correction.
        self.set_option(tgt_opts,'self_pol_mu0',forceprint=True)
        # Molecular polarizability (ang**3) for analytic self-polarization correction.
        self.set_option(tgt_opts,'self_pol_alpha',forceprint=True)
        # Set up the simulation object for self-polarization correction.
        self.do_self_pol = (self.self_pol_mu0 > 0.0 and self.self_pol_alpha > 0.0)
        # Enable anisotropic periodic box
        self.set_option(tgt_opts,'anisotropic_box',forceprint=True)
        # Whether to save trajectories (0 = never, 1 = delete after good step, 2 = keep all)
        self.set_option(tgt_opts,'save_traj')

        #======================================#
        #     Variables which are set here     #
        #======================================#
        # Read in liquid starting coordinates.
        if not os.path.exists(os.path.join(self.root, self.tgtdir, self.liquid_coords)): 
            logger.error("%s doesn't exist; please provide liquid_coords option\n" % self.liquid_coords)
            raise RuntimeError
        self.liquid_mol = Molecule(os.path.join(self.root, self.tgtdir, self.liquid_coords), toppbc=True)
        # Read in gas starting coordinates.
        if not os.path.exists(os.path.join(self.root, self.tgtdir, self.gas_coords)): 
            logger.error("%s doesn't exist; please provide gas_coords option\n" % self.gas_coords)
            raise RuntimeError
        self.gas_mol = Molecule(os.path.join(self.root, self.tgtdir, self.gas_coords))
        # List of trajectory files that may be deleted if self.save_traj == 1.
        self.last_traj = []
        # Extra files to be copied back at the end of a run.
        self.extra_output = []
        ## Read the reference data
        self.read_data()
        # Extra files to be linked into the temp-directory.
        self.nptfiles += [self.liquid_coords, self.gas_coords]
        # Scripts to be copied from the ForceBalance installation directory.
        self.scripts += ['npt.py']
        # Prepare the temporary directory.
        self.prepare_temp_directory()
        # Build keyword dictionary to pass to engine.
        if self.do_self_pol:
            self.gas_engine_args.update(self.OptionDict)
            self.gas_engine_args.update(options)
            del self.gas_engine_args['name']
            # Create engine object for gas molecule to do the polarization correction.
            self.gas_engine = self.engine_(target=self, mol=self.gas_mol, name="selfpol", **self.gas_engine_args)
        # Don't read indicate.log when calling meta_indicate()
        self.read_indicate = False
        self.write_indicate = False
        # Don't read objective.p when calling meta_get()
        # self.read_objective = False
        #======================================#
        #          UNDER DEVELOPMENT           #
        #======================================#
        # Put stuff here that I'm not sure about. :)
        np.set_printoptions(precision=4, linewidth=100)
        np.seterr(under='ignore')
        ## Saved trajectories for all iterations and all temperatures
        self.SavedTraj = defaultdict(dict)
        ## Evaluated energies for all trajectories (i.e. all iterations and all temperatures), using all mvals
        self.MBarEnergy = defaultdict(lambda:defaultdict(dict))
        ## Saved results for all iterations
        # self.SavedMVals = []
        self.AllResults = defaultdict(lambda:defaultdict(list))
Beispiel #60
0
    def run_ga_optimization(self,
                            optimization_setting: OptimizationSetting,
                            population_size=100,
                            ngen_size=30,
                            output=True):
        """"""
        # Clear lru_cache before running ga optimization
        _ga_optimize.cache_clear()

        # Get optimization setting and target
        settings = optimization_setting.generate_setting_ga()
        target_name = optimization_setting.target_name

        if not settings:
            self.output("优化参数组合为空,请检查")
            return

        if not target_name:
            self.output("优化目标未设置,请检查")
            return

        # Define parameter generation function
        def generate_parameter():
            """"""
            return random.choice(settings)

        def mutate_individual(individual, indpb):
            """"""
            size = len(individual)
            paramlist = generate_parameter()
            for i in range(size):
                if random.random() < indpb:
                    individual[i] = paramlist[i]
            return individual,

        # Create ga object function
        global ga_target_name
        global ga_strategy_class
        global ga_setting
        global ga_vt_symbol
        global ga_interval
        global ga_start
        global ga_rate
        global ga_slippage
        global ga_size
        global ga_pricetick
        global ga_capital
        global ga_end
        global ga_mode
        global ga_inverse

        ga_target_name = target_name
        ga_strategy_class = self.strategy_class
        ga_setting = settings[0]
        ga_vt_symbol = self.vt_symbol
        ga_interval = self.interval
        ga_start = self.start
        ga_rate = self.rate
        ga_slippage = self.slippage
        ga_size = self.size
        ga_pricetick = self.pricetick
        ga_capital = self.capital
        ga_end = self.end
        ga_mode = self.mode
        ga_inverse = self.inverse

        # Set up genetic algorithm
        toolbox = base.Toolbox()
        toolbox.register("individual", tools.initIterate, creator.Individual,
                         generate_parameter)
        toolbox.register("population", tools.initRepeat, list,
                         toolbox.individual)
        toolbox.register("mate", tools.cxTwoPoint)
        toolbox.register("mutate", mutate_individual, indpb=1)
        toolbox.register("evaluate", ga_optimize)
        toolbox.register("select", tools.selNSGA2)

        total_size = len(settings)
        pop_size = population_size  # number of individuals in each generation
        lambda_ = pop_size  # number of children to produce at each generation
        mu = int(
            pop_size *
            0.8)  # number of individuals to select for the next generation

        cxpb = 0.95  # probability that an offspring is produced by crossover
        mutpb = 1 - cxpb  # probability that an offspring is produced by mutation
        ngen = ngen_size  # number of generation

        pop = toolbox.population(pop_size)
        hof = tools.ParetoFront()  # end result of pareto front

        stats = tools.Statistics(lambda ind: ind.fitness.values)
        np.set_printoptions(suppress=True)
        stats.register("mean", np.mean, axis=0)
        stats.register("std", np.std, axis=0)
        stats.register("min", np.min, axis=0)
        stats.register("max", np.max, axis=0)

        # Multiprocessing is not supported yet.
        # pool = multiprocessing.Pool(multiprocessing.cpu_count())
        # toolbox.register("map", pool.map)

        # Run ga optimization
        self.output(f"参数优化空间:{total_size}")
        self.output(f"每代族群总数:{pop_size}")
        self.output(f"优良筛选个数:{mu}")
        self.output(f"迭代次数:{ngen}")
        self.output(f"交叉概率:{cxpb:.0%}")
        self.output(f"突变概率:{mutpb:.0%}")

        start = time()

        algorithms.eaMuPlusLambda(pop,
                                  toolbox,
                                  mu,
                                  lambda_,
                                  cxpb,
                                  mutpb,
                                  ngen,
                                  stats,
                                  halloffame=hof)

        end = time()
        cost = int((end - start))

        self.output(f"遗传算法优化完成,耗时{cost}秒")

        # Return result list
        results = []

        for parameter_values in hof:
            setting = dict(parameter_values)
            target_value = ga_optimize(parameter_values)[0]
            results.append((setting, target_value, {}))

        print(results)
        return results