Example #1
0
    def __str__(self):      
        numpy.set_printoptions(precision=4, threshold=6)

        samples = self.samples
        if self.samples is not None:
            samples = numpy.array2string(self.samples)

        stats = self.stats
        if self.stats is not None:
            stats = numpy.array2string(self.stats)

        ratios = self.ratios
        if self.ratios is not None:
            ratios = numpy.array2string(self.ratios)

        output = '\n'.join([
                'samples = %s' % samples,
                'stats   = %s' % stats,
                'ratios  = %s' % ratios,
                'null    = %s' % repr(self.null),
                'alt     = %s' % repr(self.alt),
                'lr      = %s' % repr(self.lr),
                'ppp     = %s' % repr(self.ppp)
                ])

        return output
Example #2
0
    def __str__(self):
        numpy.set_printoptions(precision=4, threshold=6)

        x = self.x
        if self.x is not None:
            x = numpy.array2string(self.x)

        y = self.y
        if self.y is not None:
            y = numpy.array2string(self.y)

        yerr = self.yerr
        if self.yerr is not None:
            yerr = numpy.array2string(self.yerr)

        xerr = self.xerr
        if self.xerr is not None:
            xerr = numpy.array2string(self.xerr)
        
        return (('x      = %s\n' +
                 'y      = %s\n' +
                 'yerr   = %s\n' +
                 'xerr   = %s\n' +
                 'xlabel = %s\n' +
                 'ylabel = %s\n' +
                 'title  = %s\n' +
                 'plot_prefs = %s') %
                ( x,
                  y,
                  yerr,
                  xerr,
                  self.xlabel,
                  self.ylabel,
                  self.title,
                  self.plot_prefs))
Example #3
0
 def __str__(self):
     numpy.set_printoptions(precision=4, threshold=6)
     
     x0 = self.x0
     if self.x0 is not None:
         x0 = numpy.array2string(self.x0)
         
     x1 = self.x1
     if self.x1 is not None:
         x1 = numpy.array2string(self.x1)
         
     y = self.y
     if self.y is not None:
         y = numpy.array2string(self.y)
         
     return (('x0     = %s\n' +
              'x1     = %s\n' +
              'y      = %s\n' +
              'xlabel = %s\n' +
              'ylabel = %s\n' +
              'title  = %s\n' +
              'levels = %s\n' +
              'contour_prefs = %s') %
             ( x0,
               x1,
               y,
               self.xlabel,
               self.ylabel,
               self.title,
               self.levels,
               self.contour_prefs))
    def evaluate(self, raster_plot_time_idx, fire_rate_time_idx):
        """ Displays output of the simulation.

        Calculates the firing rate of each population,
        creates a spike raster plot and a box plot of the
        firing rates.

        """
        if nest.Rank() == 0:
            print(
                'Interval to compute firing rates: %s ms'
                % np.array2string(fire_rate_time_idx)
                )
            fire_rate(
                self.data_path, 'spike_detector',
                fire_rate_time_idx[0], fire_rate_time_idx[1]
                )
            print(
                'Interval to plot spikes: %s ms'
                % np.array2string(raster_plot_time_idx)
                )
            plot_raster(
                self.data_path, 'spike_detector',
                raster_plot_time_idx[0], raster_plot_time_idx[1]
                )
            boxplot(self.net_dict, self.data_path)
Example #5
0
 def write_output(self):
     """ Write output file. """
     text = ["# Input file for profit.py"]
     text.append("a) {0} # Input table".format(self.table))
     text.append("b) {0} # PSF type".format(self.psffunct))
     text.append("c) {0} # PSF parameters".format(
                             np.array2string(self.psf, precision=3)[1:-1]))
     text.append("c1) {0} # PSF parameters err".format(
                         np.array2string(self.psferr, precision=3)[1:-1]))
     text.append("d) {0} # Convolution box".format(self.conv_box))
     text.append("e) {0} # Weights for fitting".format(
                                                       self.header["e"]))
     self.pfit = self.pfit.astype(np.float64)
     self.perr = self.perr.astype(np.float64)
     for idx, comp, comment in zip(self.idx, self.complist, \
                                   self.complist_comments):
         text.append("1) {0} @ {1} # Component type".format(comp, comment))
         for j, i in enumerate(idx):
             text.append("{0}) {1:.7f} {2} +/- {3:.5f} # {4}".format(
                         j+2, self.pfit[i], self.pfix[i], self.perr[i],
                         self.models[comp].comments[j]))
         text.append("\n")
     with open(self.outfile, "w") as f:
         f.write("\n".join(text))
     return
Example #6
0
def export_collada(mesh, file_obj=None):
    '''
    Export a mesh as collada, to filename
    '''
    import os, inspect
    from string import Template
    
    MODULE_PATH = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
    template    = Template(open(os.path.join(MODULE_PATH, 
                                             'templates', 
                                             'collada_template.dae'), 'rb').read())

    # we bother setting this because np.array2string uses these printoptions 
    np.set_printoptions(threshold=np.inf, precision=5, linewidth=np.inf)

    replacement = dict()
    replacement['VERTEX']   = np.array2string(mesh.vertices.reshape(-1))[1:-1]
    replacement['FACES']    = np.array2string(mesh.faces.reshape(-1))[1:-1]
    replacement['NORMALS']  = np.array2string(mesh.vertex_normals.reshape(-1))[1:-1]
    replacement['VCOUNT']   = str(len(mesh.vertices))
    replacement['VCOUNTX3'] = str(len(mesh.vertices) * 3)
    replacement['FCOUNT']   = str(len(mesh.faces))

    export = template.substitute(replacement)
    return _write_export(export, file_obj)
Example #7
0
    def test_0d_arrays(self):
        unicode = type(u'')
        assert_equal(unicode(np.array(u'café', np.unicode_)), u'café')

        if sys.version_info[0] >= 3:
            assert_equal(repr(np.array('café', np.unicode_)),
                         "array('café', dtype='<U4')")
        else:
            assert_equal(repr(np.array(u'café', np.unicode_)),
                         "array(u'caf\\xe9', dtype='<U4')")
        assert_equal(str(np.array('test', np.str_)), 'test')

        a = np.zeros(1, dtype=[('a', '<i4', (3,))])
        assert_equal(str(a[0]), '([0, 0, 0],)')

        assert_equal(repr(np.datetime64('2005-02-25')[...]),
                     "array('2005-02-25', dtype='datetime64[D]')")

        assert_equal(repr(np.timedelta64('10', 'Y')[...]),
                     "array(10, dtype='timedelta64[Y]')")

        # repr of 0d arrays is affected by printoptions
        x = np.array(1)
        np.set_printoptions(formatter={'all':lambda x: "test"})
        assert_equal(repr(x), "array(test)")
        # str is unaffected
        assert_equal(str(x), "1")

        # check `style` arg raises
        assert_warns(DeprecationWarning, np.array2string,
                                         np.array(1.), style=repr)
        # but not in legacy mode
        np.array2string(np.array(1.), style=repr, legacy='1.13')
Example #8
0
    def __repr__(self):
        prefixstr = '    '

        if self._values.shape == ():
            v = [tuple([self._values[nm] for nm in self._values.dtype.names])]
            v = np.array(v, dtype=self._values.dtype)
        else:
            v = self._values

        names = self._values.dtype.names
        precision = np.get_printoptions()['precision']
        fstyle = functools.partial(_fstyle, precision)
        format_val = lambda val: np.array2string(val, style=fstyle)
        formatter = {
            'numpystr': lambda x: '({0})'.format(
                ', '.join(format_val(x[name]) for name in names))
        }

        if NUMPY_LT_1P7:
            arrstr = np.array2string(v, separator=', ',
                                     prefix=prefixstr)

        else:
            arrstr = np.array2string(v, formatter=formatter,
                                     separator=', ',
                                     prefix=prefixstr)

        if self._values.shape == ():
            arrstr = arrstr[1:-1]

        unitstr = ('in ' + self._unitstr) if self._unitstr else '[dimensionless]'
        return '<{0} ({1}) {2:s}\n{3}{4}>'.format(
            self.__class__.__name__, ', '.join(self.components),
            unitstr, prefixstr, arrstr)
Example #9
0
    def __repr__(self):
        prefixstr = "    "

        if self._values.shape == ():
            v = [tuple([self._values[nm] for nm in self._values.dtype.names])]
            v = np.array(v, dtype=self._values.dtype)
        else:
            v = self._values

        names = self._values.dtype.names
        precision = np.get_printoptions()["precision"]
        fstyle = functools.partial(_fstyle, precision)
        format_val = lambda val: np.array2string(val, style=fstyle)
        formatter = {"numpystr": lambda x: "({0})".format(", ".join(format_val(x[name]) for name in names))}

        if NUMPY_LT_1P7:
            arrstr = np.array2string(v, separator=", ", prefix=prefixstr)

        else:
            arrstr = np.array2string(v, formatter=formatter, separator=", ", prefix=prefixstr)

        if self._values.shape == ():
            arrstr = arrstr[1:-1]

        unitstr = ("in " + self._unitstr) if self._unitstr else "[dimensionless]"
        return "<{0} ({1}) {2:s}\n{3}{4}>".format(
            self.__class__.__name__, ", ".join(self.components), unitstr, prefixstr, arrstr
        )
Example #10
0
def plot(arr,max_arr=None):
	if max_arr == None: max_arr = arr
	max_val = max(abs(np.max(max_arr)),abs(np.min(max_arr)))
	print np.array2string(arr,
		formatter={'float_kind': lambda x: visual(x,max_val)},
		max_line_width = 5000
	)
Example #11
0
    def test_structure_format(self):
        dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
        x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
        assert_equal(np.array2string(x),
                "[('Sarah', [ 8.,  7.]) ('John', [ 6.,  7.])]")

        # for issue #5692
        A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
        A[5:].fill(np.datetime64('NaT'))
        assert_equal(np.array2string(A),
                "[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) " +
                "('1970-01-01T00:00:00',)\n ('1970-01-01T00:00:00',) " +
                "('1970-01-01T00:00:00',) ('NaT',) ('NaT',)\n " +
                "('NaT',) ('NaT',) ('NaT',)]")

        # See #8160
        struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
        assert_equal(np.array2string(struct_int),
                "[([  1,  -1],) ([123,   1],)]")
        struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
                dtype=[('B', 'i4', (2, 2))])
        assert_equal(np.array2string(struct_2dint),
                "[([[ 0,  1], [ 2,  3]],) ([[12,  0], [ 0,  0]],)]")

        # See #8172
        array_scalar = np.array(
                (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
        assert_equal(np.array2string(array_scalar), "( 1.,  2.12345679,  3.)")
Example #12
0
def main(argv):
    file_loc = os.path.dirname(os.path.relpath(__file__))
    with open(file_loc + '/../constants_runspec.json') as constants_file:
        constants = json.load(constants_file)
    data_file = open(file_loc + '/../data/' + argv[0])
    data = read_file(data_file, constants)

    time = range(0, constants['NUM_DAYS'])
    fig = plt.figure(1)
    for n in xrange(constants['NUM_RUNS']):
        plt.plot(time, data[n, :, constants['ADULT_SUSC']], 'r', time, data[n, :, constants['ADULT_SICK']], 'g', time, data[n, :, constants['ADULT_IMMUNE']], 'b', time, data[n, :, constants['ADULT_CARRIERS']], 'k')
    plt.legend(['Susceptible adults', 'Sick adults', 'Immune adults', 'Adult carriers'])

    stats_array = np.zeros((constants['NUM_RUNS'], 3), dtype=np.float_)
    np.seterr(divide = 'ignore')
    for n in xrange(constants['NUM_RUNS']):
        pop_sum = np.sum(data[n, :, :],1)
        rate_carriers = np.nanmean(np.divide(data[n, :, constants['ADULT_CARRIERS']], pop_sum))
        ccr = np.nanmean(np.divide(data[n, :, constants['ADULT_SICK']], data[n, :, constants['ADULT_CARRIERS']]))
        max_sick = np.max(data[n,:,constants['ADULT_SICK']])
        stats_array[n,:] = [rate_carriers, ccr, max_sick]
    print '{0:22}|| {1:18} || {2:18} ||'.format('   Rate of carriers', 'Case-Carrier ratio', 'Top notation of sick')
    print np.array2string(stats_array, separator= '||', formatter={'float_kind':lambda x: "%20f" % x})

    fig = plt.figure(2)
    top_sick = np.sort(stats_array[:,2])
    plt.plot(top_sick)
    plt.show()
Example #13
0
    def __repr__(self):
        import flowvb.core._flow_vb_str

        # Add data dimensions to data dictionary
        opt = self.options.copy()
        opt.update({'num_obs': self.data.shape[0],
                    'num_features': self.data.shape[1]})

        # Build summary string
        str_summary = flowvb.core._flow_vb_str.str_summary_data
        str_summary += flowvb.core._flow_vb_str.str_summary_options_init_all

        if self.options['init_mean'] is not None:
            str_summary += flowvb.core._flow_vb_str.str_summary_init_mean
            opt['init_mean'] = np.array2string(opt['init_mean'])
        if self.options['init_covar'] is not None:
            str_summary += flowvb.core._flow_vb_str.str_summary_init_covar
            opt['init_covar'] = np.array2string(opt['init_covar'])
        if self.options['init_mixweights'] is not None:
            str_summary += flowvb.core._flow_vb_str.str_summary_init_mixweights
            opt['init_mixweights'] = np.array2string(opt['init_mixweights'])

        str_summary += flowvb.core._flow_vb_str.str_summary_optim_display

        return str_summary % opt
Example #14
0
File: noise.py Project: Solvi/pyhrf
    def sampleNextInternal(self, variables):
        #TODO : comment

        smplARp = variables[self.samplerEngine.I_NOISE_ARP]
        InvAutoCorrNoise = smplARp.InvAutoCorrNoise
        smplHRF = variables[self.samplerEngine.I_HRF]
        varXh = smplHRF.varXh
        smplDrift =  variables[self.samplerEngine.I_DRIFT]
        varMBYPl = smplDrift.varMBYPl
        smplNRL = variables[self.samplerEngine.I_NRLS]
        varNRLs = smplNRL.currentValue
        self.computeVarYTilde(varNRLs, varXh, varMBYPl)
#        self.varYtilde = variables[self.samplerEngine.I_NRLS].varYtilde

        for i in xrange(self.nbVox):
            varYtildeTdelta = np.dot(self.varYTilde[:,i],InvAutoCorrNoise[:,:,i])
            self.beta[i] = 0.5*np.dot(varYtildeTdelta,self.varYTilde[:,i])
        pyhrf.verbose(6,'betas apost :')
        pyhrf.verbose(6,np.array2string(self.beta,precision=3))
        pyhrf.verbose(6,'sigma2 ~betas/Ga(%1.3f,1)'
                      %(0.5*(self.ny + 1)))
        gammaSamples = np.random.gamma(0.5*(self.ny + 1), 1, self.nbVox)
        self.currentValue = np.divide(self.beta, gammaSamples)
        pyhrf.verbose(6, 'All noise vars :')
        pyhrf.verbose(6,
                      np.array2string(self.currentValue,precision=3))
        pyhrf.verbose(4, 'noise vars = %1.3f(%1.3f)'
                      %(self.currentValue.mean(), self.currentValue.std()))
Example #15
0
    def __str__(self):
        numpy.set_printoptions(precision=4, threshold=6)

        x = self.x
        if self.x is not None:
            x = numpy.array2string(self.x)

        y = self.y
        if self.y is not None:
            y = numpy.array2string(self.y)

        return (('x     = %s\n' +
                 'y     = %s\n' +
                 'min   = %s\n' +
                 'max   = %s\n' +
                 'nloop = %s\n' +
                 'delv  = %s\n' +
                 'fac   = %s\n' +
                 'log   = %s') %
                (x,
                 y,
                 self.min,
                 self.max,
                 self.nloop,
                 self.delv,
                 self.fac,
                 self.log))
Example #16
0
    def __str__(self):
        numpy.set_printoptions(precision=4, threshold=6)

        xlo = self.xlo
        if self.xlo is not None:
            xlo = numpy.array2string(numpy.asarray(self.xlo))

        xhi = self.xhi
        if self.xhi is not None:
            xhi = numpy.array2string(numpy.asarray(self.xhi))

        y = self.y
        if self.y is not None:
            y = numpy.array2string(numpy.asarray(self.y))
        
        return (('xlo    = %s\n' +
                 'xhi    = %s\n' +
                 'y      = %s\n' +
                 'xlabel = %s\n' +
                 'ylabel = %s\n' +
                 'title  = %s\n' +
                 'histo_prefs = %s') %
                ( xlo,
                  xhi,
                  y,
                  self.xlabel,
                  self.ylabel,
                  self.title,
                  self.histo_prefs))
Example #17
0
    def write_ascii_gz(self, out_file):
        """
        Works only in serial mode!
        """
        if self.mpi_size != 1:
            print("Error: only serial calculation supported.")
            return False

        np.set_printoptions(threshold=np.inf)

        with gzip.open(out_file, 'wt') as f_out:
            f_out.write("%d %d %d %d %d\n" % (self.natom, self.nspin, self.nao, self.nset_max, self.nshell_max))

            f_out.write(np.array2string(self.nset_info) + "\n")
            f_out.write(np.array2string(self.nshell_info) + "\n")
            f_out.write(np.array2string(self.nso_info) + "\n")

            for ispin in range(self.nspin):

                if self.nspin == 1:
                    n_el = 2*(self.i_homo_loc[ispin]+1)
                else:
                    n_el = self.i_homo_loc[ispin]+1

                f_out.write("%d %d %d %d\n" % (len(self.coef_array[ispin]), self.i_homo_cp2k[ispin], self.lfomo[ispin], n_el))

                evals_occs = np.hstack([self.evals_sel[ispin], self.occs_sel[ispin]])
                f_out.write(np.array2string(evals_occs) + "\n")

                for imo in range(len(self.coef_array[ispin])):
                    f_out.write(np.array2string(self.coef_array[ispin][imo]) + "\n")
def distmat_to_txt( pdblist , distmat, filedir , name):
		#write out distmat in phylip compatible format
	outstr =' ' + str(len(pdblist)) + '\n'
	for i,pdb in enumerate(pdblist):
		if len(pdb)>10:
			namestr= pdb[0:10]
		if len(pdb)<10:
			namestr = pdb
			for pad in range(10 -len(pdb)):
				namestr += ' '
		outstr += namestr+ ' ' + np.array2string( distmat[i,:], formatter={'float_kind':lambda x: "%.2f" % x}).replace('[', '').replace(']', '')  + ' \n'
	print( outstr)
	handle = open(filedir + name + 'phylipmat.txt' , 'w')
	handle.write(outstr)
	handle.close()
	outstr = str(len(pdblist)) + '\n'
	for i,pdb in enumerate(pdblist):
		namestr = pdb.replace('.','').replace('_','')[0:20]
		outstr += namestr+ ' ' + np.array2string( distmat[i,:], formatter={'float_kind':lambda x: "%.2f" % x}).replace('[', '').replace(']', '').replace('\n', '')  + '\n'

	print( outstr)
	handle = open(filedir + name + 'fastmemat.txt' , 'w')
	handle.write(outstr)
	handle.close()
	return filedir + name + 'fastmemat.txt'
Example #19
0
    def sampleNextInternal(self, variables):
        # TODO : comment

        smplARp = variables[self.samplerEngine.I_NOISE_ARP]
        InvAutoCorrNoise = smplARp.InvAutoCorrNoise
        smplHRF = self.get_variable('hrf')
        varXh = smplHRF.varXh
        smplDrift = self.get_variable('drift')
        varMBYPl = smplDrift.varMBYPl
        smplNRL = self.get_variable('nrl')
        varNRLs = smplNRL.currentValue
        self.computeVarYTilde(varNRLs, varXh, varMBYPl)

        for i in xrange(self.nbVox):
            varYtildeTdelta = np.dot(
                self.varYTilde[:, i], InvAutoCorrNoise[:, :, i])
            self.beta[i] = 0.5 * np.dot(varYtildeTdelta, self.varYTilde[:, i])
        logger.debug('betas apost :')
        logger.debug(np.array2string(self.beta, precision=3))
        logger.debug('sigma2 ~betas/Ga(%1.3f,1)', 0.5 * (self.ny + 1))
        gammaSamples = np.random.gamma(0.5 * (self.ny + 1), 1, self.nbVox)
        self.currentValue = np.divide(self.beta, gammaSamples)
        logger.debug('All noise vars :')
        logger.debug(np.array2string(self.currentValue, precision=3))
        logger.info('noise vars = %1.3f(%1.3f)', self.currentValue.mean(),
                    self.currentValue.std())
Example #20
0
    def test_unexpected_kwarg(self):
        # ensure than an appropriate TypeError
        # is raised when array2string receives
        # an unexpected kwarg

        with assert_raises_regex(TypeError, 'nonsense'):
            np.array2string(np.array([1, 2, 3]),
                            nonsense=None)
Example #21
0
def test_array2string():
    """Basic test of array2string."""
    a = np.arange(3)
    assert_(np.array2string(a) == '[0 1 2]')
    assert_(np.array2string(a, max_line_width=4) == '[0 1\n 2]')
    stylestr = np.array2string(np.array(1.5),
                               style=lambda x: "Value in 0-D array: " + str(x))
    assert_(stylestr == 'Value in 0-D array: 1.5')
Example #22
0
def assert_frames_close(actual, expected, **kwargs):
    """
    Compare DataFrame items by column and
    raise AssertionError if any column is not equal.

    Ordering of columns is unimportant, items are compared only by label.
    NaN and infinite values are supported.

    Parameters
    ----------
    actual: pandas.DataFrame
    expected: pandas.DataFrame
    kwargs:

    Examples
    --------
    >>> assert_frames_close(pd.DataFrame(100, index=range(5), columns=range(3)),
    ...                   pd.DataFrame(100, index=range(5), columns=range(3)))

    >>> assert_frames_close(pd.DataFrame(100, index=range(5), columns=range(3)),
    ...                   pd.DataFrame(110, index=range(5), columns=range(3)),
    ...                   rtol=.2)

    >>> assert_frames_close(pd.DataFrame(100, index=range(5), columns=range(3)),
    ...                   pd.DataFrame(150, index=range(5), columns=range(3)),
    ...                   rtol=.2)  # doctest: +IGNORE_EXCEPTION_DETAIL
    Traceback (most recent call last):
    ...
    AssertionError:
    ...

    References
    ----------
    Derived from: http://nbviewer.jupyter.org/gist/jiffyclub/ac2e7506428d5e1d587b
    """

    assert (isinstance(actual, pd.DataFrame) and
            isinstance(expected, pd.DataFrame)), \
        'Inputs must both be pandas DataFrames.'

    assert set(expected.columns) == set(actual.columns), \
        'test set columns must be equal to those in actual/observed set.'

    assert np.all(np.equal(expected.index.values, actual.index.values)), \
        'test set and actual set must share a common index' \
        'instead found' + expected.index.values + 'vs' + actual.index.values

    for col in expected.columns:
        try:
            assert_allclose(expected[col].values,
                            actual[col].values,
                            **kwargs)
        except AssertionError as e:
            assertion_details = 'Expected values: ' + np.array2string(expected[col].values, precision=2, separator=', ') + \
                '\nActual values:   ' + np.array2string(actual[col].values, precision=2, separator=',', suppress_small=True)
            raise AssertionError('Column: ' + str(col) + ' is not close.\n' + assertion_details)
Example #23
0
    def __str__(self):
        vals = self.modelvals
        if self.modelvals is not None:
            vals = array2string(asarray(self.modelvals), separator=",", precision=4, suppress_small=False)

        flux = self.flux
        if self.flux is not None:
            flux = array2string(asarray(self.flux), separator=",", precision=4, suppress_small=False)

        return "\n".join(["modelvals = %s" % vals, "flux = %s" % flux, ModelHistogram.__str__(self)])
Example #24
0
def matprint(A,**kwargs):
    imin = kwargs.get('imin',0)
    imax = kwargs.get('imax',4)
    jmin = kwargs.get('jmin',imin)
    jmax = kwargs.get('jmax',imax)
    label = kwargs.get('label',None)
    suppress_small = kwargs.get('suppress_small',True)
    if label: print "Matrix ",label
    print array2string(A[imin:imax,jmin:jmax],suppress_small=suppress_small)
    return
Example #25
0
 def save_estimates(self, file_name):
     with open(os.path.join('.','estimates_%s.txt' % file_name), 'w') as outfile:
         outfile.write("MU\n")
         outfile.write(np.array2string(self.mu, separator=',') + '\n')
         outfile.write("lambdas\n")
         outfile.write(np.array2string(self.lambdas, separator=',') + '\n')
         outfile.write("phi\n")
         outfile.write(np.array2string(self.phi, separator=',') + '\n')
         outfile.write("pi\n")
         outfile.write(np.array2string(self.pi, separator=',') + '\n')
Example #26
0
 def test_wide_element(self):
     a = np.array(['xxxxx'])
     assert_equal(
         np.array2string(a, max_line_width=5),
         "['xxxxx']"
     )
     assert_equal(
         np.array2string(a, max_line_width=5, legacy='1.13'),
         "[ 'xxxxx']"
     )
def plot3d(step_first, step_last):
    u.shape = (maxn+1, Nz+1, Ny+1, Nx+1)
    startTime = time.time()
    for n in range(step_first, step_last):
        SCRIPT = PLOTSCRIPT3D.format(
            "png", OUTPUTDIR+"plot-t={:06.3f}.png".format(n*tau),
            "t = {:6.3f} ps".format(n*tau), 0, Nx, 0, Ny, u.min(), u.max(),
            "z = 0.0",
            np.array2string(u[n,0,:,:]).translate(trnstbl),
            "z = {}".format(int(Nz/2)/float(Nz)),
            np.array2string(u[n,int(Nz/2),:,:]).translate(trnstbl),
            "z = {}".format(1.0),
            np.array2string(u[n,Nz,:,:]).translate(trnstbl),
            "y = 0.0",
            np.array2string(u[n,:,0,:]).translate(trnstbl),
            "y = {}".format(int(Ny/2)/float(Ny)),
            np.array2string(u[n,:,int(Ny/2),:]).translate(trnstbl),
            "y = {}".format(1.0),
            np.array2string(u[n,:,Ny,:]).translate(trnstbl),
            "x = 0.0",
            np.array2string(u[n,:,:,0]).translate(trnstbl),
            "x = {}".format(int(Nx/2)/float(Nx)),
            np.array2string(u[n,:,:,int(Nx/2)]).translate(trnstbl),
            "x = {}".format(1.0),
            np.array2string(u[n,:,:,Nx]).translate(trnstbl))
        gnuplot.run(SCRIPT)
        print("Step: {} ({} ps / {} ps) {} seconds lasts".format(
            n, n*tau,  step_last*tau, time.time() - startTime))

    return
Example #28
0
    def __str__(self):
        vals = self.modelvals
        if self.modelvals is not None:
            vals = array2string(asarray(self.modelvals), separator=',', precision=4, suppress_small=False)

        flux = self.flux
        if self.flux is not None:
            flux = array2string(asarray(self.flux), separator=',', precision=4, suppress_small=False)

        return '\n'.join(['modelvals = %s' % vals,'flux = %s' % flux,
                          ModelHistogram.__str__(self)])
 def test_refcount(self):
     # make sure we do not hold references to the array due to a recursive
     # closure (gh-10620)
     gc.disable()
     a = np.arange(2)
     r1 = sys.getrefcount(a)
     np.array2string(a)
     np.array2string(a)
     r2 = sys.getrefcount(a)
     gc.collect()
     gc.enable()
     assert_(r1 == r2)
Example #30
0
def G_to_text(G, N):
    ns = np.squeeze(G.num)
    ds = np.squeeze(G.den)
    numstr = np.array2string(ns,separator=',')
    denstr = np.array2string(ds,separator=',')
    num2 = 'num%i = %s' % (N, numstr)
    den2 = 'den%i = %s' % (N, denstr)
    Gstr = 'G%i = control.TransferFunction(num%i,den%i)' % \
               (N, N, N)
    append_line = 'G_list.append(G%i)' % N
    lines_out = [num2,den2,Gstr, append_line]
    return lines_out
Example #31
0
            else:
                if game.turn in[1,3,5,7,9]:
                    print("\nPLAYER 1 TURN", game.turn)
                    player = 1
                    # Predict Move
                    #print("P1 Predict Move")
                    if game.turn == 1:
                        predicted_move = game.pick_random_legal_move(player)
                        step = predicted_move
                        #print("RANDOM PREDICT", step)
                    if game.turn > 1:
                        predicted_move = sess.run(Hypothesis, feed_dict={x_: game.obs_space})
                        step = np.argmax(predicted_move)
                       # print("NN PREDICT", step)

                    print("Hypothesis     ", np.array2string(np.asanyarray(predicted_move), max_line_width=np.inf),
                          "\n Predicted Move ", step)

                    #print("Game Status P1 ",game.game_status)

                    game.yrl_ = np.copy(game.allowed_total_action_space)
                    game.yrl_[np.where(game.yrl_ == 1)] = 2
                    game.yrl_[np.where(game.yrl_ == 0)] = 1
                    game.yrl_[np.where(game.yrl_ == 2)] = 0
                    game.yrl_ = np.reshape(game.yrl_, [1, 9])


                    if not game.check_if_move_legal(player,step):
                        game.yrl_[0][step] = 0
                        game.game_status=2
                        illegal_moves_made=illegal_moves_made+1
Example #32
0
        # Adaptive learning rate decrese by 0.01 every 100 epochs
        # if (epoch % 100000 == 0):
        #     LR = LR * math.exp(-0.04*(epoch/100000))

        epoch += 1

    print("Epochs: ", epoch)
    print("Layer 1\nBias: \n", np.array2string(bh, separator=', '),
          "\nWeights: \n,", np.array2string(wh, separator=', '))
    print()
    print("Layer 2\nBias: \n", np.array2string(bo, separator=', '),
          "\nWeights: \n,", np.array2string(wo, separator=', '))


if __name__ == '__main__':
    try:
        main()
    except KeyboardInterrupt:
        print('Stopped')
        print("Epochs: ", epoch)
        print("Layer 1\nBias: \n", np.array2string(bh, separator=', '),
              "\nWeights: \n,", np.array2string(wh, separator=', '))
        print()
        print("Layer 2\nBias: \n", np.array2string(bo, separator=', '),
              "\nWeights: \n,", np.array2string(wo, separator=', '))

        try:
            sys.exit(0)
        except SystemExit:
            os._exit(0)
Example #33
0
def validate(mval_loader, SM, eval_mode, GPU):
    tqdm.write("Validation...")
    submit = []
    gt = []
    total_vloss = 0
    total_vcorrects = 0
    total_vquery = 0
    val_sessions_iter = iter(mval_loader)

    for val_session in trange(len(val_sessions_iter),
                              desc='val-sessions',
                              position=2,
                              ascii=True):
        SM.eval()
        x, labels, y_mask, num_items, index = val_sessions_iter.next(
        )  # FIXED 13.Dec. SEPARATE LOGS. QUERY SHOULT NOT INCLUDE LOGS
        # Sample data for 'support' and 'query': ex) 15 items = 7 sup, 8 queries...
        num_support = num_items[:, 0].detach().numpy().flatten(
        )  # If num_items was odd number, query has one more item.
        num_query = num_items[:, 1].detach().numpy().flatten()
        batch_sz = num_items.shape[0]

        # x: the first 10 items out of 20 are support items left-padded with zeros. The last 10 are queries right-padded.
        x = x.permute(0, 2, 1)  # bx70*20

        x_feat = torch.zeros(batch_sz, 80, 20)
        x_feat[:, :70, :] = x.clone()
        x_feat[:, :41, 10:] = 0
        x_feat[:, 70, :10] = 1
        x_feat[:, 71:74, :10] = labels[:, :10, :].permute(0, 2, 1).clone()
        x_feat_sup = Variable(x_feat[:, :, :10]).cuda(GPU)
        x_feat_que = Variable(x_feat[:, :, 10:]).cuda(GPU)
        del x_feat
        # y
        y = labels[:, :, 1].clone()

        # y_mask
        y_mask_que = y_mask.clone()
        y_mask_que[:, :10] = 0

        y_hat = SM(x_feat_sup, x_feat_que)  # y_hat: b*20

        #        if USE_PRED_LABEL is True:
        #            # Predict
        #            li = 70 if USE_SUPLOG is True else 29 # the label's dimension indice
        #            _x = x[:,:,:11] # bx72*11
        #            for q in range(11,20):
        #                y_hat = SM(Variable(_x, requires_grad=False)) # will be bx11 at the first round
        #                # Append next features
        #                _x = torch.cat((_x, x[:,:,q].unsqueeze(2)), 2) # now bx72*12
        #                _x[:,li,q] = torch.sigmoid(y_hat[:,-1])
        #            y_hat = SM(Variable(_x, requires_grad=False)) # y_hat(final): bx20
        #            del _x
        #        else:
        #            y_hat = SM(x)

        # Calcultate BCE loss: 뒤에q만 봄
        loss = F.binary_cross_entropy_with_logits(
            input=y_hat * y_mask_que.cuda(GPU),
            target=y.cuda(GPU) * y_mask_que.cuda(GPU))
        total_vloss += loss.item()

        # Decision
        y_prob = torch.sigmoid(
            y_hat * y_mask_que.cuda(GPU)).detach().cpu().numpy()  # bx20
        y_pred = (y_prob[:, 10:] > 0.5).astype(np.int)  # bx10
        y_numpy = labels[:, 10:, 1].numpy()  # bx10
        # Acc
        total_vcorrects += np.sum(
            (y_pred == y_numpy) * y_mask_que[:, 10:].numpy())
        total_vquery += np.sum(num_query)

        # Eval, Submission
        if eval_mode is not 0:
            for b in np.arange(batch_sz):
                submit.append(y_pred[b, :num_query[b]].flatten())
                gt.append(y_numpy[b, :num_query[b]].flatten())

        if (val_session + 1) % 400 == 0:
            sample_sup = labels[0, (10 - num_support[0]):10,
                                1].long().numpy().flatten()
            sample_que = y_numpy[0, :num_query[0]].astype(int)
            sample_pred = y_pred[0, :num_query[0]]
            sample_prob = y_prob[0, 10:10 + num_query[0]]
            tqdm.write("S:" + np.array2string(sample_sup) + '\n' + "Q:" +
                       np.array2string(sample_que) + '\n' + "P:" +
                       np.array2string(sample_pred) + '\n' + "prob:" +
                       np.array2string(sample_prob))
            tqdm.write("val_session:{0:}  vloss:{1:.6f}  vacc:{2:.4f}".format(
                val_session, loss.item(), total_vcorrects / total_vquery))
        del loss, y_hat, x  # Restore GPU memory

    # Avg.Acc
    if eval_mode == 1:
        aacc = evaluate(submit, gt)
        tqdm.write("AACC={0:.6f}, FirstAcc={1:.6f}".format(aacc[0], aacc[1]))

    hist_vloss.append(total_vloss / val_session)
    hist_vacc.append(total_vcorrects / total_vquery)
    return submit
Example #34
0
    def learn_model(self):
        print "learning weights..."
        print "no of features are %s" % str(self.num_features)
        #new_stuff
        '''print 'train_images'
		print self.train_images.shape
		print self.train_images[0]
		
		print 'train_label_vector'
		print self.train_label_vector.shape
		print self.train_label_vector[0]'''

        for i in range(0, self.num_passes):
            Z = []
            A = [self.train_images]
            dbias = []
            DJDW = []
            k = -2
            dropout_u = []
            if self.dropout == True:
                for x in range(0, self.num_hlayers):
                    u = np.random.binomial(
                        1, 0.5,
                        (self.num_train_example, self.hidden_layer_dim)) / 0.5
                    dropout_u.append(u)

            for j in range(0, self.num_hlayers + 1):

                z = A[-1].dot(self.model_weights[j]) + self.model_bias[j]

                #print len(z)
                #print z

                Z.append(z)
                if self.activation == 'sigmoid':
                    a = 1 / (1 + np.exp(-z))
                elif self.activation == 'tanh':
                    if j != self.num_hlayers:
                        a = np.tanh(z)
                    else:
                        a = 1 / (1 + np.exp(-z))
                if j != 0 and j != self.num_hlayers:
                    if self.dropout == True:

                        a *= dropout_u[j - 1]
                #print a.shape
                #print a
                A.append(a)

            self.A = A
            self.Z = Z

            #print A[-1]

            print "Forward propagation complete, starting backprop"
            if self.activation == 'sigmoid':
                if self.cost_type == 'MSE':
                    delta = np.multiply(
                        -(self.train_label_vector - self.A[-1]),
                        self.sigmoidDerivative(self.Z[-1]))
                elif self.cost_type == 'cross':
                    delta = -(self.train_label_vector - self.A[-1])

                db_last = np.sum(delta, axis=0, keepdims=True)
                dbias.append(db_last)
                dJdW = np.dot(self.A[k].T, delta)

                print dJdW.shape
                with open('djdw2.txt', 'w') as grad_file:
                    for row in dJdW:
                        grad_file.write(np.array2string(row) + '\n')

                DJDW.append(dJdW)
                k -= 1
                for j in range(0, self.num_hlayers):

                    #dJdW2 = np.dot(self.a2.T, delta)
                    delta_b = np.dot(delta, self.model_bias[
                        k + 2].T) * self.sigmoidDerivative(self.Z[k + 1])
                    db = np.sum(delta_b, axis=0)
                    dbias.append(db)
                    delta = np.dot(delta, self.model_weights[
                        k + 2].T) * self.sigmoidDerivative(self.Z[k + 1])

                    dJdW = np.dot(self.A[k].T, delta)
                    DJDW.append(dJdW)
                    k -= 1
                    #print "one iteration done.."

            elif self.activation == 'tanh':
                if self.cost_type == 'MSE':
                    delta = np.multiply(-(self.train_label_vector - A[-1]),
                                        self.sigmoidDerivative(self.Z[-1]))
                    db_last = np.sum(delta, axis=0, keepdims=True)
                    dbias.append(db_last)
                    dJdW = np.dot(self.A[k].T, delta)
                    DJDW.append(dJdW)
                    k -= 1
                    for j in range(0, self.num_hlayers):

                        #dJdW2 = np.dot(self.a2.T, delta)
                        delta_b = np.dot(
                            delta, self.model_bias[k + 2].T) * self.tanh_deriv(
                                self.Z[k + 1])
                        db = np.sum(delta_b, axis=0)
                        dbias.append(db)
                        delta = np.dot(delta, self.model_weights[
                            k + 2].T) * self.tanh_deriv(self.Z[k + 1])

                        dJdW = np.dot(self.A[k].T, delta)
                        DJDW.append(dJdW)
                        k -= 1
                        #print "one iteration done.."

                elif self.cost_type == 'cross':
                    print "tanh not to be used with cross-entropy.."

            db = np.array(dbias)
            db = db[::-1]
            DJDW_v = np.array(DJDW)
            DJDW_v = DJDW_v[::-1]

            #do L2 regularisation.
            if self.reg_type == 'L2':
                print "L2 regularisation is set to true.."
                for t in range(0, len(self.model_weights)):
                    DJDW_v[t] += self.reg_lambda * self.model_weights[t]

            if self.reg_type == 'L1':
                print "L1 regularisation is set to true..."
                for u in range(0, len(self.model_weights)):
                    DJDW_v[u] += self.reg_lambda * np.sign(
                        self.model_weights[u])

            for k in range(0, len(self.model_weights)):
                self.model_weights[k] += -self.epsilon * DJDW_v[k]
                self.model_bias[k] += -self.epsilon * db[k]
            self.predict(i)
            accuracy = self.calculate_accuracy()
            #accuracy = 0
            train_cost, test_cost = self.computeCost()
            print "training cost after iteration: %s" % str(i + 1)
            print train_cost
            print "test cost after iteration: %s" % str(i + 1)
            print test_cost
            #plt.scatter((i+1),train_cost)
            self.train_error_data.append(((i + 1), train_cost))
            #plt.scatter((i+1), test_cost)
            self.test_error_data.append(((i + 1), test_cost))
            self.accuracy_data.append(((i + 1), accuracy))
Example #35
0
 def __repr__(self):
     matrix_repr = np.array2string(self.matrix, separator=',')
     return f"magicsquare('{self.name}', {matrix_repr}, {self.patterntype})"
Example #36
0
def string_from_2darray(A, language='python', presentation_type='f', digits=2):
    """string_from_2darray(A)

    This function assumes that A is one of these things:

        - a number (float or complex)
        - a 2D ndarray (float or complex)

    It returns A as a string.

    If language is 'python' and A is a 2D ndarray, the string looks like this:

        [[ ..., ... ], [ ..., ... ]]

    If language is 'matlab' and A is a 2D ndarray, the string looks like this:

        [ ... ... ; ... ... ]

    In either case, if A is not a 2D ndarray, the string is a single number,
    not wrapped in brackets.

    If presentation_type is 'sigfig', each number is formatted using the
    to_precision module to "digits" significant figures.

    Otherwise, each number is formatted as '{:.{digits}{presentation_type}}'.
    """

    # if A is a scalar
    if np.isscalar(A):
        if presentation_type == 'sigfig':
            return string_from_number_sigfig(A, digits=digits)
        else:
            return '{:.{digits}{presentation_type}}'.format(
                A, digits=digits, presentation_type=presentation_type)

    # if A is a 2D ndarray
    if language == 'python':
        if presentation_type == 'sigfig':
            formatter = {
                'float_kind': lambda x: to_precision.to_precision(x, digits),
                'complex_kind':
                lambda x: _string_from_complex_sigfig(x, digits)
            }
        else:
            formatter = {
                'float_kind':
                lambda x: '{:.{digits}{presentation_type}}'.format(
                    x, digits=digits, presentation_type=presentation_type),
                'complex_kind':
                lambda x: '{:.{digits}{presentation_type}}'.format(
                    x, digits=digits, presentation_type=presentation_type)
            }
        return np.array2string(A, formatter=formatter,
                               separator=', ').replace('\n', '')
    elif language == 'matlab':
        if presentation_type == 'sigfig':
            return numpy_to_matlab_sf(A, ndigits=digits)
        else:
            return numpy_to_matlab(A, ndigits=digits, wtype=presentation_type)
    else:
        raise Exception(
            'language "{:s}" must be either "python" or "matlab"'.format(
                language))
Example #37
0
def array2string(x):
  """
  Convert the numpy array to a string.
  """
  return np.array2string(x, formatter={"float": lambda f: "%-8.3f" % f})
    x1 = [-1, -1, 1, -1, 1, -1, -1, 1]
    x2 = [-1, -1, -1, -1, -1, 1, -1, -1]
    x3 = [-1, 1, 1, -1, -1, 1, -1, 1]
    X = np.vstack((x1, x2, x3))

    x1d = [1, -1, 1, -1, 1, -1, -1, 1]
    x2d = [1, 1, -1, -1, -1, 1, -1, -1]
    x3d = [1, 1, 1, -1, 1, 1, -1, 1]
    Xd = np.vstack((x1d, x2d, x3d))

    net = Hopfield()

    net.train(X)

    trained_attractors = set([ np.array2string(x) for x in X ])

    # part 1
    for x,xd in zip(X,Xd):
        p = net.predict_sync(xd)
        print (np.all(p == x))
        print (np.array2string(p) in trained_attractors)
        print (len(net.past_energy))

    # part 2 find attractors
    attractors = net.get_attractors()

    print ("Attractors:", len(attractors))
    [ print (a) for a in attractors ]
    for i, x in enumerate(X):
        print (f"x{i+1} in attractors:",np.array2string(x) in attractors)
Example #39
0
 def _show_canvas(self):
     kopy = np.copy(self.canvas)
     kopy[self.position] = 5
     kopy = np.array2string(kopy, max_line_width=10000)
     return kopy
Example #40
0
 def __repr__(self):
     prefix = 'Operator('
     pad = len(prefix) * ' '
     return '{}{},\n{}input_dims={}, output_dims={})'.format(
         prefix, np.array2string(self.data, separator=', ', prefix=prefix),
         pad, self.input_dims(), self.output_dims())
def Generate_Image_Data(font,
                        fontsize=32,
                        shape=(40, 40),
                        borderthickness=3,
                        translate=None,
                        rotate=None,
                        rotation_bound=[45, -45],
                        blur=None,
                        magnify=0,
                        magnify_bound=[101, 90],
                        stretch=0,
                        stretch_bound=[1.11, 0.9],
                        distort=None,
                        savepath="",
                        fontpath="",
                        imageshow=False,
                        detectblank=False,
                        allfont=False,
                        word="ALL",
                        save=True,
                        special=False):
    if allfont:
        font = [
            x for x in listdir(fontpath) if ".ttf" in x or ".otf" in x
            or ".ttc" in x or ".TTF" in x or ".OTF" in x or ".TTC" in x
        ]
        font = font[:-1]
        random.shuffle(font)
    wordlist = [
        "zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
        "nine", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "ศูนย์ ",
        "หนึ่ง ", "สอง ", "สาม ", "สี่ ", "ห้า ", "หก ", "เจ็ด ", "แปด ",
        "เก้า "
    ]
    filename = {
        "zero": "zero",
        "one": "one",
        "two": "two",
        "three": "three",
        "four": "four",
        "five": "five",
        "six": "six",
        "seven": "seven",
        "eight": "eight",
        "nine": "nine",
        "0": "0",
        "1": "1",
        "2": "2",
        "3": "3",
        "4": "4",
        "5": "5",
        "6": "6",
        "7": "7",
        "8": "8",
        "9": "9",
        "ศูนย์ ": "ZeroTH",
        "หนึ่ง ": "OneTH",
        "สอง ": "TwoTH",
        "สาม ": "ThreeTH",
        "สี่ ": "FourTH",
        "ห้า ": "FiveTH",
        "หก ": "SixTH",
        "เจ็ด ": "SevenTH",
        "แปด ": "EightTH",
        "เก้า ": "NineTH"
    }

    stretch_value = np.arange(stretch_bound[1], stretch_bound[0],
                              0.05).tolist()
    magnify_value = np.arange(magnify_bound[1], magnify_bound[0], 5).tolist()
    magnify_value.remove(100)
    morph_value = [ipaddr.DILATE, ipaddr.ERODE]
    skeleton = [2, 3]

    if word is "EN":
        wordlist = wordlist[0:10]
    elif word is "NUM":
        wordlist = wordlist[10:20]
    elif word is "TH":
        wordlist = wordlist[20:]

    multiplicate = 0
    if translate is None:
        translate_quantify = 1
    else:
        translate_quantify = len(translate)
        multiplicate = 1
    if rotate is 0:
        rotate_quantify = 1
    else:
        rotate_quantify = rotate
        multiplicate = 1
    if blur is None:
        blur_quantify = 1
    else:
        blur_quantify = len(blur)
        multiplicate = 1
    if magnify is 0:
        magnify_quantify = 1
    else:
        magnify_quantify = magnify
        multiplicate = 1
    if stretch is 0:
        stretch_quantify = 1
    else:
        stretch_quantify = stretch * 2
        multiplicate = 1

    # print(len(font))
    # print([x for x in range(0,magnify_quantify) ])
    # print([x for x in range(0, stretch_quantify//2)])
    piccount = len(font) + len(
        font
    ) * multiplicate * translate_quantify * rotate_quantify * blur_quantify * magnify_quantify * (
        stretch_quantify + 1) * 3 * 2 * 9
    print("pic per num : " + str(piccount))
    for x in wordlist:
        write = ""
        skip = False
        n = 0
        for y in font:
            # print(y)
            img = ipaddr.font_to_image(fontpath + y, fontsize, 0, x)
            # cv2.imshow("suck",img)
            plate = ipaddr.get_plate(img, shape)
            extracted_word = plate[0].UnrotateWord
            # extracted_word=255-extracted_word
            # extracted_word = ipaddr.binarize(extracted_word, method=ipaddr.SAUVOLA_THRESHOLDING, value=29)
            ret, extracted_word = cv2.threshold(extracted_word, 200, 255, 0)
            # extracted_word = ipaddr.binarize(extracted_word, method=ipaddr.SAUVOLA_THRESHOLDING,value=29)
            if imageshow and not skip:
                cv2.imshow("original", extracted_word)
                key = cv2.waitKey(0)
                if key == ord('s'):
                    skip = True
            extracted_word_string = (extracted_word.ravel()) / 255
            extracted_word_string = np.array2string(
                extracted_word_string.astype(int),
                max_line_width=80000,
                separator=',')
            n += 1
            write += extracted_word_string[1:-1] + "\n"
            for z in range(0, magnify_quantify):
                if magnify == 0 or ((z == magnify_quantify - 1)
                                    and special == True):
                    magnify_img = extracted_word
                else:
                    magnify_img = ipaddr.magnifly(
                        extracted_word,
                        percentage=magnify_value[random.randint(
                            0,
                            len(magnify_value) - 1)])
                    # magnify_img=255-magnify_img
                    # magnify_img = ipaddr.binarize(magnify_img, method=ipaddr.ADAPTIVE_CONTRAST_THRESHOLDING,value=[15,-0.8])
                    # magnify_img=255-magnify_img
                    magnify_img_string = np.array2string(
                        ((magnify_img.ravel()) / 255).astype(int),
                        max_line_width=80000,
                        separator=',')
                    # n+=1
                    write += magnify_img_string[1:-1] + "\n"
                if imageshow and not skip:
                    cv2.imshow("magnify", magnify_img)
                    key = cv2.waitKey(0)
                    if key == ord('s'):
                        skip = True
                '''for m in skeleton:
                    if m > 2:
                        skel_img=ipaddr.zkeleton(magnify_img,1,m)
                        # skel_img=ip
                    else:
                        skel_img = magnify_img
                    skel_img_string =np.array2string(((skel_img.ravel()) / 255).astype(int), max_line_width=80000,
                                                             separator=',')
                    # n += 1
                    write += skel_img_string[1:-1] + "\n"
                    if imageshow and not skip:
                        cv2.imshow("skel", skel_img)
                        key = cv2.waitKey(0)
                        if key == ord('s'):
                            skip = True'''
                if 1:
                    skel_img = magnify_img

                    for c in morph_value:
                        prep = 255 - skel_img
                        morph_image = ipaddr.morph(prep, c, value=[2, 2])
                        # morph_image = ipaddr.binarize(morph_image, method=ipaddr.ADAPTIVE_CONTRAST_THRESHOLDING,
                        #                               value=[15, -0.8])
                        morph_image = 255 - morph_image
                        morph_image_string = np.array2string(
                            ((morph_image.ravel()) / 255).astype(int),
                            max_line_width=80000,
                            separator=',')
                        # n += 1
                        write += morph_image_string[1:-1] + "\n"
                        if imageshow and not skip:
                            cv2.imshow("morph", morph_image)
                            key = cv2.waitKey(0)
                            if key == ord('s'):
                                skip = True
                        for g in range(2, -3, -2):
                            for h in range(2, -3, -2):
                                tran_image = ipaddr.translate(
                                    morph_image, (g, h), [
                                        cv2.INTER_LINEAR,
                                        ipaddr.BORDER_CONSTANT, 255
                                    ])
                                tran_image_string = np.array2string(
                                    ((tran_image.ravel()) / 255).astype(int),
                                    max_line_width=80000,
                                    separator=',')
                                # n += 1
                                write += tran_image_string[1:-1] + "\n"
                                if imageshow and not skip:
                                    cv2.imshow("tran", tran_image)
                                    key = cv2.waitKey(0)
                                    if key == ord('s'):
                                        skip = True
                        #         cv2.imwrite(
                        #             savePath + x.split(".")[0] + "_" + "None" + "_" + "None" + "_" + "None" + "_" + str(
                        #                 z) + "_" + "None" + "_" + "trans" + str(g) + "l" + str(h) + "_" +
                        #             y + ".jpg",
                        #             tran_image)  # + wfilenamelist[y]
                # for k in range(0, stretch_quantify//2):
                #         if stretch == 0:
                #             stretch_img = magnify_img
                #         else:
                #             stretch_img = ipaddr.ztretch(magnify_img,
                #                                          percentage=round(random.uniform(stretch_bound[1], stretch_bound[0]),2),
                #                                          axis='horizontal')
                #
                #             if imageshow and not skip:
                #                 cv2.imshow("stretch", stretch_img)
                #                 key = cv2.waitKey(0)
                #                 if key == ord('s'):
                #                     skip = True
                #             stretch_img_string = np.array2string(((stretch_img.ravel()) / 255).astype(int), max_line_width=80000,
                #                                                  separator=',')
                #             write += stretch_img_string[1:-1] + "\n"
                #             stretch_img = ipaddr.ztretch(magnify_img,
                #                                          percentage=random.uniform(stretch_bound[1], stretch_bound[0]),
                #                                          axis='vertical')
                #             if imageshow and not skip:
                #                 cv2.imshow("stretch", stretch_img)
                #                 key = cv2.waitKey(0)
                #                 if key == ord('s'):
                #                     skip = True
                #             stretch_img_string = np.array2string(((stretch_img.ravel()) / 255).astype(int), max_line_width=80000,
                #                                                  separator=',')
                #             n+=2
                #             write += stretch_img_string[1:-1] + "\n"
            if n == len(font) * 0.2:
                print(n)
                if save:
                    open(
                        savepath + "dataset" + "_" + filename[x] + "_" +
                        "test" + '.txt', 'w').close()
                    file = open(
                        savepath + "dataset" + "_" + filename[x] + "_" +
                        "test" + '.txt', 'a')
                    file.write(write)
                    file.close()
                    write = ""
            elif n == len(font) * 0.4:
                print(n)
                if save:
                    open(
                        savepath + "dataset" + "_" + filename[x] + "_" +
                        "validate" + '.txt', 'w').close()
                    file = open(
                        savepath + "dataset" + "_" + filename[x] + "_" +
                        "validate" + '.txt', 'a')
                    file.write(write)
                    file.close()
                    write = ""
        if save:
            open(
                savepath + "dataset" + "_" + filename[x] + "_" + "train" +
                '.txt', 'w').close()
            file = open(
                savepath + "dataset" + "_" + filename[x] + "_" + "train" +
                '.txt', 'a')
            file.write(write)
            file.close()
        print(filename[x])
        print(n)
Example #42
0
    def check(self, verbose=True):
        """Check constraints object.

        Check that lambdas are greater than zero, and that necessary parameters
        are supplied. Optionally print summary of constraints.

        Parameters
        ----------
        verbose : bool
            Verbosity.
        """

        # Set defaults
        lambda_defaults = {"bcc": 0., "hsc": 0., "mhs": 0.}
        lambda_all = lambda_defaults
        if self.lambdas is not None:
            for k, v in self.lambdas.items():
                if k not in lambda_all:
                    raise ValueError(
                        "constraint_lambdas key not recognized - %s" % k)
                elif v is not None:
                    lambda_all[k] = float(v)
        self.lambdas = lambda_all

        params_defaults = {"hsc": None, "mhs": None}
        params_all = params_defaults
        if self.params is not None:
            for k, v in self.params.items():
                if k not in params_all:
                    raise ValueError('params key not recognized - %s' % k)
                elif v is not None:
                    if isinstance(v, int):
                        v = float(v)
                    params_all[k] = v
        self.params = params_all

        # Check constraints
        for k, v in self.lambdas.items():
            if v != lambda_defaults[k]:
                if v < 0:
                    raise ValueError("Lambdas must be >= 0. Lambda for"
                                     " %s is %g" % (k, v))
                if k in self.params and self.params[k] is None:
                    raise ValueError("Lambda for %s is supplied,"
                                     " but constraint is not" % k)
            elif k in self.params and not np.array_equal(
                    self.params[k], params_defaults[k]):
                print(self.params[k], type(self.params[k]))
                raise ValueError("Constraint for %s is supplied, but lambda is"
                                 " 0" % k)

        if (self.lambdas["hsc"] or self.lambdas["mhs"]) and self.ploidy == 1:
            raise ValueError("Homolog-separating constraint can not be"
                             " applied to haploid genome.")

        # Print constraints
        constraint_names = {
            "bcc": "bead chain connectivity",
            "hsc": "homolog-separating",
            "mhs": "multiscale homolog-separating"
        }
        lambda_to_print = {k: v for k, v in self.lambdas.items() if v != 0}
        if verbose and len(lambda_to_print) > 0:
            for constraint, lambda_val in lambda_to_print.items():
                print("CONSTRAINT: %s lambda = %.2g" %
                      (constraint_names[constraint], lambda_val),
                      flush=True)
                if constraint in self.params and constraint in ("hsc", "mhs"):
                    if self.params[constraint] is None:
                        print("            param = inferred", flush=True)
                    elif isinstance(self.params[constraint], np.ndarray):
                        label = "            param = "
                        print(label + np.array2string(
                            self.params[constraint],
                            formatter={'float_kind': lambda x: "%.3g" % x},
                            prefix=" " * len(label),
                            separator=", "))
                    elif isinstance(self.params[constraint], float):
                        print("            param = %.3g" %
                              self.params[constraint],
                              flush=True)
                    else:
                        print("            %s" % self.params[constraint],
                              flush=True)
Example #43
0
def main():
    # Trainset stats: 2072002577 items from 124950714 sessions
    print('Initializing dataloader...')
    mtrain_loader = SpotifyDataloader(
        config_fpath=args.config,
        mtrain_mode=True,
        data_sel=(0, 124050714),  # 80% 트레인
        batch_size=TR_BATCH_SZ,
        shuffle=True,
        seq_mode=True)  # seq_mode implemented

    mval_loader = SpotifyDataloader(
        config_fpath=args.config,
        mtrain_mode=True,  # True, because we use part of trainset as testset
        data_sel=(124050714, 124950714),  #(99965071, 124950714), # 20%를 테스트
        batch_size=TS_BATCH_SZ,
        shuffle=False,
        seq_mode=True)

    # Init neural net
    SM = SeqModel().cuda(GPU)
    SM_optim = torch.optim.Adam(SM.parameters(), lr=LEARNING_RATE)
    SM_scheduler = StepLR(SM_optim, step_size=1, gamma=0.8)

    # Load checkpoint
    if args.load_continue_latest is None:
        START_EPOCH = 0
    else:
        latest_fpath = max(glob.iglob(MODEL_SAVE_PATH + "check*.pth"),
                           key=os.path.getctime)
        checkpoint = torch.load(latest_fpath,
                                map_location='cuda:{}'.format(GPU))
        tqdm.write("Loading saved model from '{0:}'... loss: {1:.6f}".format(
            latest_fpath, checkpoint['loss']))
        SM.load_state_dict(checkpoint['SM_state'])
        SM_optim.load_state_dict(checkpoint['SM_opt_state'])
        SM_scheduler.load_state_dict(checkpoint['SM_sch_state'])
        START_EPOCH = checkpoint['ep']

    # Train
    for epoch in trange(START_EPOCH,
                        EPOCHS,
                        desc='epochs',
                        position=0,
                        ascii=True):
        tqdm.write('Train...')
        tr_sessions_iter = iter(mtrain_loader)
        total_corrects = 0
        total_query = 0
        total_trloss = 0
        for session in trange(len(tr_sessions_iter),
                              desc='sessions',
                              position=1,
                              ascii=True):
            SM.train()
            x, labels, y_mask, num_items, index = tr_sessions_iter.next(
            )  # FIXED 13.Dec. SEPARATE LOGS. QUERY SHOULT NOT INCLUDE LOGS

            # Sample data for 'support' and 'query': ex) 15 items = 7 sup, 8 queries...
            num_support = num_items[:, 0].detach().numpy().flatten(
            )  # If num_items was odd number, query has one more item.
            num_query = num_items[:, 1].detach().numpy().flatten()
            batch_sz = num_items.shape[0]

            # x: the first 10 items out of 20 are support items left-padded with zeros. The last 10 are queries right-padded.
            x = x.permute(0, 2, 1)  # bx70*20

            x_feat = torch.zeros(batch_sz, 80, 20)
            x_feat[:, :70, :] = x.clone()
            x_feat[:, :41, 10:] = 0
            x_feat[:, 70, :10] = 1
            x_feat[:, 71:74, :10] = labels[:, :10, :].permute(0, 2, 1).clone()
            x_feat_sup = Variable(x_feat[:, :, :10]).cuda(GPU)
            x_feat_que = Variable(x_feat[:, :, 10:]).cuda(GPU)
            del x_feat
            # y
            y = labels[:, :, 1].clone()

            # y_mask
            y_mask_que = y_mask.clone()
            y_mask_que[:, :10] = 0

            # Forward & update
            y_hat = SM(x_feat_sup, x_feat_que)  # y_hat: b*20

            # Calcultate BCE loss
            loss = F.binary_cross_entropy_with_logits(
                input=y_hat * y_mask_que.cuda(GPU),
                target=y.cuda(GPU) * y_mask_que.cuda(GPU))
            total_trloss += loss.item()
            SM.zero_grad()
            loss.backward()
            # Gradient Clipping
            #torch.nn.utils.clip_grad_norm_(SM.parameters(), 0.5)
            SM_optim.step()

            # Decision
            y_prob = torch.sigmoid(
                y_hat * y_mask_que.cuda(GPU)).detach().cpu().numpy()  # bx20
            y_pred = (y_prob[:, 10:] > 0.5).astype(np.int)  # bx10
            y_numpy = labels[:, 10:, 1].numpy()  # bx10
            # Acc
            total_corrects += np.sum(
                (y_pred == y_numpy) * y_mask_que[:, 10:].numpy())
            total_query += np.sum(num_query)

            # Restore GPU memory
            del loss, y_hat

            if (session + 1) % 500 == 0:
                hist_trloss.append(total_trloss / 900)
                hist_tracc.append(total_corrects / total_query)
                # Prepare display
                sample_sup = labels[0, (10 - num_support[0]):10,
                                    1].long().numpy().flatten()
                sample_que = y_numpy[0, :num_query[0]].astype(int)
                sample_pred = y_pred[0, :num_query[0]]
                sample_prob = y_prob[0, 10:10 + num_query[0]]

                tqdm.write("S:" + np.array2string(sample_sup) + '\n' + "Q:" +
                           np.array2string(sample_que) + '\n' + "P:" +
                           np.array2string(sample_pred) + '\n' + "prob:" +
                           np.array2string(sample_prob))
                tqdm.write(
                    "tr_session:{0:}  tr_loss:{1:.6f}  tr_acc:{2:.4f}".format(
                        session, hist_trloss[-1], hist_tracc[-1]))
                total_corrects = 0
                total_query = 0
                total_trloss = 0

            if (session + 1) % 30500 == 0:
                # Validation
                validate(mval_loader, SM, eval_mode=True, GPU=GPU)
                # Save
                torch.save(
                    {
                        'ep': epoch,
                        'sess': session,
                        'SM_state': SM.state_dict(),
                        'loss': hist_trloss[-1],
                        'hist_vacc': hist_vacc,
                        'hist_vloss': hist_vloss,
                        'hist_trloss': hist_trloss,
                        'SM_opt_state': SM_optim.state_dict(),
                        'SM_sch_state': SM_scheduler.state_dict()
                    }, MODEL_SAVE_PATH +
                    "check_{0:}_{1:}.pth".format(epoch, session))
        # Validation
        validate(mval_loader, SM, eval_mode=True, GPU=GPU)
        # Save
        torch.save(
            {
                'ep': epoch,
                'sess': session,
                'SM_state': SM.state_dict(),
                'loss': hist_trloss[-1],
                'hist_vacc': hist_vacc,
                'hist_vloss': hist_vloss,
                'hist_trloss': hist_trloss,
                'SM_opt_state': SM_optim.state_dict(),
                'SM_sch_state': SM_scheduler.state_dict()
            }, MODEL_SAVE_PATH + "check_{0:}_{1:}.pth".format(epoch, session))
        SM_scheduler.step()
Example #44
0
def print_array(arr, name="array"):
    print(f"Array {name}")
    print(f"\tShape: {arr.shape}, Max: {np.max(arr)}, Min: {np.min(arr)}")
    for line in np.array2string(arr).split("\n"):
        print(f"\t\t{line}")
    print()
def predictModel():
    data = request.get_json()
    prediction = np.array2string(model.predict(data))

    return jsonify(results=prediction)
Example #46
0
 def _arr2str(self, x):
     return np.array2string(x, precision=4, suppress_small=True)
Example #47
0
def main():
    global wh
    global wo
    global bo
    global bh
    global epoch

    cost = math.inf

    error = 1000.0

    desired_error = 0.1

    LR = 8e-4

    error_cost = []

    # Training
    while epoch < 10000000:

        zh = np.dot(training_set, wh) + bh
        ah = sigmoid(zh)  # [SET_SIZE x HIDDEN_LAYER_NODES]

        zo = np.dot(ah, wo) + bo
        ao = sigmoid(zo)  # [SET_SIZE x 8]

        dcost_dzo = targets - ao  # [SET_SIZE x 8]
        dzo_dwo = ah
        dcost_wo = np.dot(dzo_dwo.T, dcost_dzo)

        dcost_bo = dcost_dzo

        dzo_dah = wo
        dcost_dah = np.dot(dcost_dzo, dzo_dah.T)
        dah_dzh = sigmoid_der(zh)
        dzh_dwh = training_set
        dcost_wh = np.dot(dzh_dwh.T, dah_dzh * dcost_dah)

        dcost_bh = dcost_dah * dah_dzh

        wh += LR * dcost_wh
        bh += LR * dcost_bh.sum(axis=0)

        wo += LR * dcost_wo
        bo += LR * dcost_bo.sum(axis=0)

        if epoch % 500 == 0 and cost > 1e-5:
            cost = np.sum((targets - ao)**2) / len(targets)
            print('Cost (MSE): ', cost, "\tLR: ", LR)
            error_cost.append(cost)

            if cost < 0.1: break

        # Adaptive learning rate decrese by 0.01 every 100 epochs
        # if (epoch % 100000 == 0):
        #     LR = LR * math.exp(-0.04*(epoch/100000))

        epoch += 1

    print("Epochs: ", epoch)
    print("Layer 1\nBias: \n", np.array2string(bh, separator=', '),
          "\nWeights: \n,", np.array2string(wh, separator=', '))
    print()
    print("Layer 2\nBias: \n", np.array2string(bo, separator=', '),
          "\nWeights: \n,", np.array2string(wo, separator=', '))
#annealing_time
annealing_time = input("Annealing time [20]: ")
if annealing_time == "":
	annealing_time = 20
annealing_time = int(annealing_time)

#annealing_time
chain_strength = input("chain strength [10]: ")
if chain_strength == "":
	chain_strength = 10
chain_strength = int(chain_strength)

print("\n")
print("num_reads_times_hundred = ",num_reads_times_hundred,"; annealing time = ", annealing_time,"; chain_strength = ", chain_strength)
print("\n")

for read in range(1,num_reads_times_hundred+1):
	sampler = EmbeddingComposite(DWaveSampler())
	responses.append(sampler.sample_qubo(qubo, num_reads=reads_per_request, annealing_time=annealing_time, chain_strength=chain_strength))
	#print('Response ',read,' from the D-Wave:\n', responses[read], '\n')
	print('Saved result from request ',read,' in results.txt','   ',read*reads_per_request,' from ',num_reads_times_hundred*reads_per_request)

	with open('results.txt','w') as file:
		file.write('numreads = %f; annealing_time = %d; chain_strength = %s\n' % ((num_reads_times_hundred*100), annealing_time,chain_strength))
		for response in responses:
			for sample, energy, num_occurrences, cbf in response.record:
				file.write('%f\t%g\t%d\t%s\n' % (energy,cbf, num_occurrences, np.array2string(sample, max_line_width=None).replace('\n','')))
				#file.write('\n')
print('Saved all results in results.txt')
Example #49
0
def write_traj(iiwaNo, iiwa_start_trans, iiwa_end_quat):

    pack_path = exo.Tools.parsePath('{stentgraft_sewing_planning}/resources/')
    iHee = np.eye(4, 4)
    for i in range(4):
        iHee[0, i] = iiwa_start_trans[i]
        iHee[1, i] = iiwa_start_trans[i + 4]
        iHee[2, i] = iiwa_start_trans[i + 8]
    print iHee
    if (iiwaNo == 0):
        cHi0 = np.loadtxt(pack_path + 'iiwa02CameraTransFile.txt')
        cHee0 = np.matmul(cHi0, iHee)
        print cHee0
        iiwa_start_quat = quaternion(cHee0[0:3, 0:3])
        cHee = cHee0

    if (iiwaNo == 1):
        cHi1 = np.loadtxt(pack_path + 'iiwa12CameraTransFile.txt')
        cHee1 = np.matmul(cHi1, iHee)
        iiwa_start_quat = quaternion(cHee1[0:3, 0:3])
        cHee = cHee1

    print iiwaNo
    iiwa_traj = np.zeros(shape=(2, 8))

    #iiwa_traj[0, 1] = iiwa_start_trans[3]
    #iiwa_traj[0, 2] = iiwa_start_trans[7]
    #iiwa_traj[0, 3] = iiwa_start_trans[11]
    iiwa_traj[0, 1] = cHee[0, 3]
    iiwa_traj[0, 2] = cHee[1, 3]
    iiwa_traj[0, 3] = cHee[2, 3]
    iiwa_traj[0, 4] = iiwa_start_quat[1]
    iiwa_traj[0, 5] = iiwa_start_quat[2]
    iiwa_traj[0, 6] = iiwa_start_quat[3]
    iiwa_traj[0, 7] = iiwa_start_quat[0]

    iiwa_traj[1, 1] = iiwa_end_quat[0]
    iiwa_traj[1, 2] = iiwa_end_quat[1]
    iiwa_traj[1, 3] = iiwa_end_quat[2]
    iiwa_traj[1, 4] = iiwa_end_quat[3]
    iiwa_traj[1, 5] = iiwa_end_quat[4]
    iiwa_traj[1, 6] = iiwa_end_quat[5]
    iiwa_traj[1, 7] = iiwa_end_quat[6]

    #dt = 10.0 / (length - 1.0)
    dt = 10
    for i in range(0, 2):
        iiwa_traj[i, 0] = i * dt

    np.set_printoptions(threshold='nan')

    tmp = '1\n' + str(length) + '\t8\n'
    iiwa_traj = np.array2string(iiwa_traj,
                                separator='\t',
                                max_line_width=np.inf)

    iiwa_traj = iiwa_traj.replace('[', '')
    iiwa_traj = iiwa_traj.replace(']', '')
    iiwa_traj = iiwa_traj.replace(' ', '')
    pack_path = exo.Tools.parsePath('{stentgraft_sewing_planning}/resources/')
    text_file = open(pack_path + 'iiwa_' + str(iiwaNo) + '.traj', "w")
    text_file.write(tmp + iiwa_traj)
    text_file.close()
Example #50
0
def main():
    ### 1) least-squares fit to the data
    x = np.array([
        0.2, 0.4, 0.6, 0.8, 1., 1.2, 1.4, 1.6, 1.8, 2., 2.2, 2.4, 2.6, 2.8, 3.,
        3.2, 3.4, 3.6, 3.8
    ])
    y = gv.gvar([
        '0.38(20)', '2.89(20)', '0.85(20)', '0.59(20)', '2.88(20)', '1.44(20)',
        '0.73(20)', '1.23(20)', '1.68(20)', '1.36(20)', '1.51(20)', '1.73(20)',
        '2.16(20)', '1.85(20)', '2.00(20)', '2.11(20)', '2.75(20)', '0.86(20)',
        '2.73(20)'
    ])
    prior = make_prior()
    fit = lsqfit.nonlinear_fit(data=(x, y),
                               prior=prior,
                               fcn=fitfcn,
                               extend=True)
    if LSQFIT_ONLY:
        sys.stdout = tee.tee(STDOUT, open('case-outliers-lsq.out', 'w'))
    elif not MULTI_W:
        sys.stdout = tee.tee(STDOUT, open('case-outliers.out', 'w'))
    print(fit)

    # plot data
    plt.errorbar(x, gv.mean(y), gv.sdev(y), fmt='o', c='b')

    # plot fit function
    xline = np.linspace(x[0], x[-1], 100)
    yline = fitfcn(xline, fit.p)
    plt.plot(xline, gv.mean(yline), 'k:')
    yp = gv.mean(yline) + gv.sdev(yline)
    ym = gv.mean(yline) - gv.sdev(yline)
    plt.fill_between(xline, yp, ym, color='0.8')
    plt.xlabel('x')
    plt.ylabel('y')
    plt.savefig('case-outliers1.png', bbox_inches='tight')
    if LSQFIT_ONLY:
        return

    ### 2) Bayesian integral with modified PDF
    pdf = ModifiedPDF(data=(x, y), fcn=fitfcn, prior=prior)

    # integrator for expectation values with modified PDF
    expval = lsqfit.BayesIntegrator(fit, pdf=pdf)

    # adapt integrator to pdf
    expval(neval=1000, nitn=15)

    # evaluate expectation value of g(p)
    def g(p):
        w = 0.5 + 0.5 * p['2w-1']
        c = p['c']
        return dict(w=[w, w**2], mean=c, outer=np.outer(c, c))

    results = expval(g, neval=1000, nitn=15, adapt=False)
    print(results.summary())
    # expval.map.show_grid(15)

    if MULTI_W:
        sys.stdout = tee.tee(STDOUT, open('case-outliers-multi.out', 'w'))

    # parameters c[i]
    mean = results['mean']
    cov = results['outer'] - np.outer(mean, mean)
    c = mean + gv.gvar(np.zeros(mean.shape), gv.mean(cov))
    print('c =', c)
    print(
        'corr(c) =',
        np.array2string(gv.evalcorr(c), prefix=10 * ' '),
        '\n',
    )

    # parameter w
    wmean, w2mean = results['w']
    wsdev = gv.mean(w2mean - wmean**2)**0.5
    w = wmean + gv.gvar(np.zeros(np.shape(wmean)), wsdev)
    print('w =', w, '\n')

    # Bayes Factor
    print('logBF =', np.log(expval.norm))
    sys.stdout = STDOUT

    if MULTI_W:
        return

    # add new fit to plot
    yline = fitfcn(xline, dict(c=c))
    plt.plot(xline, gv.mean(yline), 'r--')
    yp = gv.mean(yline) + gv.sdev(yline)
    ym = gv.mean(yline) - gv.sdev(yline)
    plt.fill_between(xline, yp, ym, color='r', alpha=0.2)
    plt.savefig('case-outliers2.png', bbox_inches='tight')
Example #51
0
imgcat = cv2.imread("../img/cat.jpg", 0)
imgiris = cv2.imread("../img/iris.jpg", 0)

imgcars = imgcars[20:400, 100:800]

imgcars = cv2.resize(imgcars, (400, 200), interpolation=cv2.INTER_CUBIC)
imgwrit = cv2.resize(imgwrit, (400, 200), interpolation=cv2.INTER_CUBIC)
imgcat = cv2.resize(imgcat, (400, 200), interpolation=cv2.INTER_CUBIC)
imgiris = cv2.resize(imgiris, (400, 200), interpolation=cv2.INTER_CUBIC)

cv2.imwrite("../img/gcarpeople.jpg", imgcars)
cv2.imwrite("../img/ghandwriting.jpg", imgwrit)
cv2.imwrite("../img/gcat.jpg", imgcat)
cv2.imwrite("../img/giris.jpg", imgiris)

f = open("../img/gcarpeople.txt", "w")
f.write(np.array2string(imgcars))
f.close()

f = open("../img/ghandwriting.txt", "w")
f.write(np.array2string(imgwrit))
f.close()

f = open("../img/gcat.txt", "w")
f.write(np.array2string(imgcat))
f.close()

f = open("../img/giris.txt", "w")
f.write(np.array2string(imgiris))
f.close()
 def __repr__(self):
     prefixstr = '<' + self.__class__.__name__ + ' '
     arrstr = np.array2string(self.filled_data[:].value, separator=',',
                              prefix=prefixstr)
     return '{0}{1}{2:s}>'.format(prefixstr, arrstr, self._unitstr)
Example #53
0
def write_tape5_irradiation(irr_type,
                            irr_time,
                            irr_value,
                            outfile="TAPE5.INP",
                            decay_nlb=(1, 2, 3),
                            xsfpy_nlb=(204, 205, 206),
                            cut_off=1E-10,
                            out_table_nes=(False, False, True),
                            out_table_laf=(True, True, True),
                            out_table_num=None):
    """Writes an irradiation TAPE5 file.

    Parameters
    ----------
    irr_type : str
        Flag that determines whether this is a constant power "IRP"
        irradiation or a constant flux "IRF" irradiation calculation.
    irr_time : float 
        Irradiation time durration in days.
    irr_value : float 
        Magnitude of the irradiation. If irr_type = "IRP", then
        this is a power.  If irr_type = "IRF", then this is a flux. 
    outfile : str or file-like object
        Path or file to write the tape5 to.
    decay_nlb : length 3 sequence
        Three tuple of library numbers from the tape9 file decay data, eg (1, 2, 3).
    xsfpy_nlb : length 3 sequence
        Three tuple of library numbers from the tape9 file for cross section and fission
        product yields, eg (204, 205, 206).
    cut_off : float, optional
        Cut-off concentration, below which reults are not recorded.
    out_table_nes :  length 3 sequence of bools, optional
        Specifies which type of output tables should be printed by ORIGEN.  The fields 
        represent (Nuclide, Element, Summary).  The default value of (False, False, True) 
        only prints the summary tables. 
    out_table_laf :  length 3 sequence of bools, optional 
        Specifies whether to print the activation products (l), actinides (a), and 
        fission products (f).  By default all three are printed.
    out_table_num : sequence of ints or None
        Specifies which tables, by number, to print according to the rules given by 
        out_table_nes and out_table_laf.  For example the list [10, 5] would print 
        tables 5 and 10.  There are 24 tables available. If None, then all tables 
        are printed.   
    """
    if irr_type not in ["IRP", "IRF"]:
        raise TypeError("Irradiation type must be either 'IRP' or 'IRF'.")

    # Make template fill-value dictionary
    tape5_kw = {
        'CUT_OFF': "{0:.3E}".format(cut_off),
        'DECAY_NLB1': decay_nlb[0],
        'DECAY_NLB2': decay_nlb[1],
        'DECAY_NLB3': decay_nlb[2],
        'XSFPY_NLB1': xsfpy_nlb[0],
        'XSFPY_NLB2': xsfpy_nlb[1],
        'XSFPY_NLB3': xsfpy_nlb[2],
        'irr_type': irr_type,
        'irr_time': '{0:.10E}'.format(irr_time),
        'irr_value': '{0:.10E}'.format(irr_value),
    }

    no_print_string = np.array2string(8 * np.ones(24, dtype=int))[1:-1]

    # Activation Product Print String
    if out_table_laf[0]:
        tape5_kw['optl'] = _out_table_string(out_table_nes, out_table_num)
    else:
        tape5_kw['optl'] = no_print_string

    # Actinide Print String
    if out_table_laf[1]:
        tape5_kw['opta'] = _out_table_string(out_table_nes, out_table_num)
    else:
        tape5_kw['opta'] = no_print_string

    # Fission Product Print String
    if out_table_laf[2]:
        tape5_kw['optf'] = _out_table_string(out_table_nes, out_table_num)
    else:
        tape5_kw['optf'] = no_print_string

    # Fill the template and write it to a file
    tape5 = _tape5_irradiation_template.format(**tape5_kw)

    opened_here = False
    if isinstance(outfile, basestring):
        outfile = open(outfile, 'w')
        opened_here = True

    outfile.write(tape5)

    if opened_here:
        outfile.close()
import pandas as pd
import numpy as np

file = open("2019_Random_Forest_values_domestic.txt", "w+")
for i in range(15):
    data = pd.read_excel("/home/karthik/Cauvery Study/dataset-domestic.xlsx",
                         i)
    X = data.iloc[:, [0, 1, 2, 3, 4, 5]].values
    y = data.iloc[:, -1].values
    y = y.reshape(-1, 1)
    X1 = data.iloc[:, [0, 1, 2, 3, 4, 6]].values
    #from sklearn.model_selection import train_test_split
    #X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.1,random_state=0)

    from sklearn.ensemble import RandomForestRegressor
    regressor = RandomForestRegressor(1000)
    regressor.fit(X, y)

    y_pred = regressor.predict(X1).astype(int)
    y = y.reshape(1, -1)
    print(f"Village : {i}\ny = {y.mean()} \ny_pred : {y_pred.mean()}")
    file.write(
        str(i + 1) + "\t" + np.array2string(y) + "\t" +
        np.array2string(y_pred) + "\n")
file.close()
Example #55
0
            ax.get_position().y0, 0.02,
            ax.get_position().height
        ])
        fig.colorbar(subfig, cax=cax)
        plt.draw()
        plt.pause(1.0 / 10.0)

print('Time used to calculate vs emulate gradients: {} vs {}'.format(
    *t_used.tolist()))
# save to file
import pandas as pd
savepath = folder + '/DNN/summary'
if not os.path.exists(savepath): os.makedirs(savepath)
file = os.path.join(savepath, 'dif-' + ctime + '.txt')
np.savetxt(file, dif)
con_str = np.array2string(np.array(node_sizes), separator=',').replace(
    '[', '').replace(']', '') if 'node_sizes' in locals(
    ) or 'node_sizes' in globals() else str(depth)
act_str = ','.join([
    val.__name__
    if type(val).__name__ == 'function' else val.name if callable(val) else val
    for val in activations.values()
])
dif_fun_sumry = [dif[:, 0].min(), np.median(dif[:, 0]), dif[:, 0].max()]
dif_fun_str = np.array2string(np.array(dif_fun_sumry),
                              precision=2,
                              separator=',').replace('[', '').replace(
                                  ']',
                                  '')  # formatter={'float': '{: 0.2e}'.format}
dif_grad_sumry = [dif[:, 1].min(), np.median(dif[:, 1]), dif[:, 1].max()]
dif_grad_str = np.array2string(np.array(dif_grad_sumry),
                               precision=2,
def min_numerical_convertible_type(string, check_accuracy=True):
    """
    Parse the string and return the smallest numerical type to use for a safe
    conversion.

    :param str string: Representation of a float/integer with text
    :param bool check_accuracy: If true, a warning is pushed on the logger
        in case there is a loss of accuracy.
    :raise ValueError: When the string is not a numerical value
    :retrun: A numpy numerical type
    """
    if string == "":
        raise ValueError("Not a numerical value")
    match = _parse_numeric_value.match(string)
    if match is None:
        raise ValueError("Not a numerical value")
    number, decimal, exponent = match.groups()

    if decimal is None and exponent is None:
        # It's an integer
        # TODO: We could find the int type without converting the number
        value = int(string)
        return numpy.min_scalar_type(value).type

    # Try floating-point
    try:
        value = _biggest_float(string)
    except ValueError:
        raise ValueError("Not a numerical value")

    if number is None:
        number = ""
    if decimal is None:
        decimal = ""
    if exponent is None:
        exponent = "0"

    nb_precision_digits = int(exponent) - len(decimal) - 1
    precision = _biggest_float(10)**nb_precision_digits * 2.5
    previous_type = _biggest_float
    for numpy_type in _float_types:
        if numpy_type == _biggest_float:
            # value was already casted using the bigger type
            continue
        reduced_value = numpy_type(value)
        if not numpy.isfinite(reduced_value):
            break
        # numpy isclose(atol=is not accurate enough)
        diff = value - reduced_value
        # numpy 1.8.2 looks to do the substraction using float64...
        # we lose precision here
        diff = numpy.abs(diff)
        if diff > precision:
            break
        previous_type = numpy_type

    # It's the smaller float type which fit with enougth precision
    numpy_type = previous_type

    if check_accuracy and numpy_type == _biggest_float:
        # Check the precision using the original string
        expected = number + decimal
        # This format the number without python convertion
        try:
            result = numpy.array2string(value,
                                        precision=len(number) + len(decimal),
                                        floatmode="fixed")
        except TypeError:
            # numpy 1.8.2 do not have floatmode argument
            _logger.warning(
                "Not able to check accuracy of the conversion of '%s' using %s",
                string, _biggest_float)
            return numpy_type

        result = result.replace(".", "").replace("-", "")
        if not result.startswith(expected):
            _logger.warning(
                "Not able to convert '%s' using %s without losing precision",
                string, _biggest_float)

    return numpy_type
Example #57
0
def print_grid(grid):
    print(np.array2string(grid, formatter={'float_kind': '{0:2.0f}'.format}))
Example #58
0
Mlist = [M01, M12, M23, M34, M45, M56, M67]
Slist = [[0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 1], [1, 0, 0, 0, -1, 0],
         [0, -0.089159, -0.089159, -0.089159, -0.10915, 0.005491],
         [0, 0, 0, 0, 0.81725, 0], [0, 0, 0.425, 0.81725, 0, 0.81725]]
###########################################
thetalist = np.array([0, pi / 6, pi / 4, pi / 3, pi / 2, (2 * pi / 3)]).T
dthetalist = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2]).T
ddthetalist = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]).T
g = np.array([0, 0, -9.81]).T
F_tip = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]).T
taulist = np.array([0.0128, -41.1477, -3.7809, 0.0323, 0.037, 0.1034]).T
#joint_forces=mr.InverseDynamics(thetalist,joint_speed,joint_accelration,g,F_tip,Mlist,Glist,Slist)

Mass_Matrix = mr.MassMatrix(thetalist, Mlist, Glist, Slist)
print("\nQuestion 1:\n",
      np.array2string(np.around(Mass_Matrix, decimals=2), separator=','),
      sep='')
c = mr.VelQuadraticForces(thetalist, dthetalist, Mlist, Glist, Slist)
print("\nQuestion 2:\n",
      np.array2string(np.around(c, decimals=2), separator=','),
      sep='')
grav = mr.GravityForces(thetalist, g, Mlist, Glist, Slist)
print("\nQuestion 3:\n",
      np.array2string(np.around(grav, decimals=2), separator=','),
      sep='')
JTFtip = mr.EndEffectorForces(thetalist, F_tip, Mlist, Glist, Slist)
print("\nQuestion 4:\n",
      np.array2string(np.around(JTFtip, decimals=2), separator=','),
      sep='')
ddthetalist = mr.ForwardDynamics(thetalist, dthetalist, taulist, g, F_tip,
                                 Mlist, Glist, Slist)
Example #59
0
Sl = 0.05
data = pd.read_excel("/home/karthik/Cauvery Study/dataset_whole.xlsx")
X = data.iloc[:, :-1].values
y = data.iloc[:, -1].values

from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.05)

col_train = len(X_train[:])
X2 = np.append(arr=np.ones((col_train, 1)).astype(int), values=X_train, axis=1)
k = [0, 1, 2, 3, 4, 5, 6]
X4 = X2[:, k]
col_test = len(X_test[:])
X3 = np.append(arr=np.ones((col_test, 1)).astype(int), values=X_test, axis=1)

num = len(X_train[0])
for i in range(0, num):
    regressor_OLS = sm.OLS(y_train, X4).fit()
    maxvar = max(regressor_OLS.pvalues.astype(float))
    if maxvar > Sl:
        for j in range(0, num - i):
            if regressor_OLS.pvalues[j].astype(float) == maxvar:
                k.pop(j)
                X4 = X2[:, k]

regressor_OLS = sm.OLS(y_train, X4).fit()
X_real = X3[:, k]
y_back = regressor_OLS.predict(X_real).astype(int)

file.write(np.array2string(y_test) + "\n" + np.array2string(y_back))
file.close()
Example #60
0
        print('\n')

        # output file with all multipliers, not just maxima:
        h5_filename = stat_path + "multiplier_Re%i_a%i.h5" % (
            Rej[j], int(ai[i] * 1000))
        f2 = h5py.File(h5_filename, "w")
        dset = f2.create_dataset('CFL', data=CFL, dtype='f8')
        dset = f2.create_dataset('Nz', data=Nz, dtype='f8')
        dset = f2.create_dataset('H', data=H, dtype='f8')
        dset = f2.create_dataset('multR', data=np.real(Fmult), dtype='f8')
        dset = f2.create_dataset('multI', data=np.imag(Fmult), dtype='f8')

        # email with all multipliers, not just maxima:
        if email_flag == 1:
            Fmult = np.array2string(Fmult,
                                    precision=6,
                                    separator=',',
                                    suppress_small=True)
            Fmult_Psi = np.array2string(Fmult_Psi,
                                        precision=6,
                                        separator=',',
                                        suppress_small=True)
            message = """\
            Subject: data k = %.3f Re = %.1f

            Nz = %i\n
            CFL = %.3f\n
            maximum modulus zeta = %.3f\n 
            maximum modulus psi = %.3f\n zeta multiplier:\n""" % (
                ai[i], Rej[j], Nz, CFL, M[j, i], MP[j, i])
            message = message + Fmult + """\n psi multiplier:\n""" + Fmult_Psi
            context = ssl.create_default_context()