Exemple #1
0
    def _get_angle_parameters(self, system, atom1, atom2, atom3):
        """
        Utility function to retrieve harmonic angle parameters for
        a given set of atoms in a system

        Arguments
        ---------
        system : simtk.openmm.System
            the system with parameters
        atom1 : int
            the index of the first atom
        atom2 : int
            the index of the second atom
        atom3 : int
            the index of the third atom

        Returns
        -------
        theta0 : float
            equilibrium bond angle
        k_eq : float
            angle spring constant
        """
        list_of_angle_atoms = [atom1, atom2, atom3]
        #compare sorted lists of atoms
        forces = {system.getForce(index).__class__.__name__ : system.getForce(index) for index in range(system.getNumForces())}
        angle_force = forces['HarmonicAngleForce']
        for angle_index in range(angle_force.getNumAngles()):
            parameters = angle_force.getAngleParameters(angle_index)
            #the first three "parameters" are atom indices
            atoms = parameters[:3]
            if np.all(np.equal(list_of_angle_atoms, atoms)) or np.all(np.equal(list_of_angle_atoms[::-1], atoms)):
                return parameters[3], parameters[4]
Exemple #2
0
    def testLoadSave(self):
        """Plot with an image: test MaskToolsWidget operations"""
        self.plot.addImage(numpy.arange(1024**2).reshape(1024, 1024),
                           legend='test')
        self.qapp.processEvents()

        # Draw a polygon mask
        toolButton = getQToolButtonFromAction(self.maskWidget.polygonAction)
        self.assertIsNot(toolButton, None)
        self.mouseClick(toolButton, qt.Qt.LeftButton)
        self._drawPolygon()

        ref_mask = self.maskWidget.getSelectionMask()
        self.assertFalse(numpy.all(numpy.equal(ref_mask, 0)))

        with temp_dir() as tmp:
            success = self.maskWidget.save(
                os.path.join(tmp, 'mask.npy'), 'npy')
            self.assertTrue(success)

            self.maskWidget.resetSelectionMask()
            self.assertTrue(
                numpy.all(numpy.equal(self.maskWidget.getSelectionMask(), 0)))

            result = self.maskWidget.load(os.path.join(tmp, 'mask.npy'))
            self.assertTrue(result)
            self.assertTrue(numpy.all(numpy.equal(
                self.maskWidget.getSelectionMask(), ref_mask)))
Exemple #3
0
    def __init__(self, stretch='linear', exponent=5, vmid=None, vmin=None,
                 vmax=None, clip=False):
        '''
        Initalize an APLpyNormalize instance.

        Optional Keyword Arguments:

            *vmin*: [ None | float ]
                Minimum pixel value to use for the scaling.

            *vmax*: [ None | float ]
                Maximum pixel value to use for the scaling.

            *stretch*: [ 'linear' | 'log' | 'sqrt' | 'arcsinh' | 'power' ]
                The stretch function to use (default is 'linear').

            *vmid*: [ None | float ]
                Mid-pixel value used for the log and arcsinh stretches. If
                set to None, a default value is picked.

            *exponent*: [ float ]
                if self.stretch is set to 'power', this is the exponent to use.

            *clip*: [ True | False ]
                If clip is True and the given value falls outside the range,
                the returned value will be 0 or 1, whichever is closer.
        '''

        if vmax < vmin:
            raise Exception("vmax should be larger than vmin")

        # Call original initalization routine
        Normalize.__init__(self, vmin=vmin, vmax=vmax, clip=clip)

        # Save parameters
        self.stretch = stretch
        self.exponent = exponent

        if stretch == 'power' and np.equal(self.exponent, None):
            raise Exception("For stretch=='power', an exponent should be specified")

        if np.equal(vmid, None):
            if stretch == 'log':
                if vmin > 0:
                    self.midpoint = vmax / vmin
                else:
                    raise Exception("When using a log stretch, if vmin < 0, then vmid has to be specified")
            elif stretch == 'arcsinh':
                self.midpoint = -1./30.
            else:
                self.midpoint = None
        else:
            if stretch == 'log':
                if vmin < vmid:
                    raise Exception("When using a log stretch, vmin should be larger than vmid")
                self.midpoint = (vmax - vmid) / (vmin - vmid)
            elif stretch == 'arcsinh':
                self.midpoint = (vmid - vmin) / (vmax - vmin)
            else:
                self.midpoint = None
    def test_SolveUsingSpline_3D(self):
        xKnotPointsShouldBe = numpy.array([0.607, 0.607, 0.607, 3.017, 3.017, 3.017])
        yKnotPointsShouldBe = numpy.array([1.984, 1.984, 1.984, 3.153, 3.153, 3.153])
        coefficientsShouldBe = numpy.array(
            [2.33418963, 1.80079612, 5.07902936, 0.54445029, 1.04110843, 2.14180324, 0.26992805, 0.39148852, 0.8177307]
        )
        testEvaluationShouldBe = numpy.array([0.76020577997])
        model = pyeq2.Models_3D.Spline.Spline(inSmoothingFactor=1.0, inXOrder=2, inYOrder=2)
        pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(DataForUnitTests.asciiDataInColumns_3D, model, False)
        fittedParameters = pyeq2.solverService().SolveUsingSpline(model)

        # example of later using the saved spline knot points and coefficients
        unFittedSpline = scipy.interpolate.fitpack2.SmoothBivariateSpline(
            model.dataCache.allDataCacheDictionary["X"],
            model.dataCache.allDataCacheDictionary["Y"],
            model.dataCache.allDataCacheDictionary["DependentData"],
            s=model.smoothingFactor,
            kx=model.xOrder,
            ky=model.yOrder,
        )
        unFittedSpline.tck = fittedParameters
        testEvaluation = unFittedSpline.ev(2.5, 2.5)

        self.assertTrue(numpy.allclose(testEvaluation, testEvaluationShouldBe, rtol=1.0e-10, atol=1.0e-300))
        self.assertTrue(numpy.equal(fittedParameters[0], xKnotPointsShouldBe).all())
        self.assertTrue(numpy.equal(fittedParameters[1], yKnotPointsShouldBe).all())
        self.assertTrue(numpy.allclose(fittedParameters[2], coefficientsShouldBe, rtol=1.0e-06, atol=1.0e-300))
Exemple #5
0
 def _get_ind_under_point(self, event):
     'get the index of the vertex under point if within epsilon tolerance'
     try:
         x, y = zip(*self._poly.xy)
         
         # display coords
         xt, yt = self._poly.get_transform().numerix_x_y(x, y)
         d = np.sqrt((xt-event.x)**2 + (yt-event.y)**2)
         indseq = np.nonzero(np.equal(d, np.amin(d)))
         ind = indseq[0]
     
         if d[ind]>=self._epsilon:
             ind = None
     
         return ind
     except:
         # display coords
         xy = np.asarray(self._poly.xy)
         xyt = self._poly.get_transform().transform(xy)
         xt, yt = xyt[:, 0], xyt[:, 1]
         d = np.sqrt((xt-event.x)**2 + (yt-event.y)**2)
         indseq = np.nonzero(np.equal(d, np.amin(d)))[0]
         ind = indseq[0]
         
         if d[ind]>=self._epsilon:
             ind = None
         
         return ind
Exemple #6
0
def make_strictly_feasible(x, lb, ub, rstep=1e-10):
    """Shift a point to the interior of a feasible region.
    
    Each element of the returned vector is at least at a relative distance
    `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.
    """
    x_new = x.copy()

    active = find_active_constraints(x, lb, ub, rstep)
    lower_mask = np.equal(active, -1)
    upper_mask = np.equal(active, 1)

    if rstep == 0:
        x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask])
        x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask])
    else:
        x_new[lower_mask] = (lb[lower_mask] +
                             rstep * np.maximum(1, np.abs(lb[lower_mask])))
        x_new[upper_mask] = (ub[upper_mask] -
                             rstep * np.maximum(1, np.abs(ub[upper_mask])))

    tight_bounds = (x_new < lb) | (x_new > ub)
    x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])

    return x_new
 def test_manual_bounds(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         # get a test module
         train_x = torch.tensor([[1.0, 2.0, 3.0]], device=device, dtype=dtype)
         train_y = torch.tensor([4.0], device=device, dtype=dtype)
         likelihood = GaussianLikelihood()
         model = ExactGP(train_x, train_y, likelihood)
         model.covar_module = RBFKernel(ard_num_dims=3)
         model.mean_module = ConstantMean()
         model.to(device=device, dtype=dtype)
         mll = ExactMarginalLogLikelihood(likelihood, model)
         # test the basic case
         x, pdict, bounds = module_to_array(
             module=mll, bounds={"model.covar_module.raw_lengthscale": (0.1, None)}
         )
         self.assertTrue(np.array_equal(x, np.zeros(5)))
         expected_sizes = {
             "likelihood.noise_covar.raw_noise": torch.Size([1]),
             "model.covar_module.raw_lengthscale": torch.Size([1, 3]),
             "model.mean_module.constant": torch.Size([1]),
         }
         self.assertEqual(set(pdict.keys()), set(expected_sizes.keys()))
         for pname, val in pdict.items():
             self.assertEqual(val.dtype, dtype)
             self.assertEqual(val.shape, expected_sizes[pname])
             self.assertEqual(val.device.type, device.type)
         lower_exp = np.full_like(x, 0.1)
         for p in ("likelihood.noise_covar.raw_noise", "model.mean_module.constant"):
             lower_exp[_get_index(pdict, p)] = -np.inf
         self.assertTrue(np.equal(bounds[0], lower_exp).all())
         self.assertTrue(np.equal(bounds[1], np.full_like(x, np.inf)).all())
Exemple #8
0
    def f(self, it, ir, A, Z):
        """Get fraction of a specified ion in a zone.

        :param it: The temporal index
        :param ir: The spatial index
        :param A: The ion mass you're interested in (scalar)
        :param Z: The ion atomic number you're interested in (scalar)
        :returns: Fraction of that ion in a zone
        """
        it, ir = self.__internalIndex__(it, ir)

        # scalars:
        if np.isscalar(it) and np.isscalar(ir):
            Azone = self.IonA(it, ir)
            Zzone = self.IonZ(it, ir)
            for i in range(len(Azone)):
                if A == Azone[i] and Z == Zzone[i]:
                    return self.IonF(it,ir)[i]
            return 0.

        # Truth values, for if a given entry corresponds to the ion we want
        shape = (it[1]-it[0], ir[1]-ir[0], len(self.IonARaw[0][0]))
        testA = np.ones(shape, dtype=np.float) * A
        testZ = np.ones(shape, dtype=np.float) * Z
        truthA = np.equal(testA, self.IonARaw)
        truthZ = np.equal(testZ, self.IonZRaw)
        return np.sum(self.IonFRaw*truthA*truthZ, axis=2)
def merge_img_array(arr_1, nband_1, arr_2, nband_2):
    """
    Merge images from difference arrays of 2D images. (ex. one has 7, the other has 2, both of them has 500 temporal images, then 
    we merge to a list of 500 temporal images with 9 bands.) The variable arr_1 is the main one.
    """
    m = arr_1.shape[0]
    n = arr_1.shape[1]
    l1 = arr_1.shape[2]
    l2 = arr_2.shape[2]
    merged_arr = np.empty((m, n, l1+l2))
    
    
    for i in range(int(l1/nband_1)):
        position_1 = i*nband_1 + i*nband_2
        position_2 = position_1 + nband_1
        merged_arr[:, :, position_1 : position_1+nband_1] = arr_1[:, :, i*nband_1 : i*nband_1+nband_1]
        merged_arr[:, :, position_2 : position_2+nband_2] = arr_2[:, :, i*nband_2 : i*nband_2+nband_2]
        
    # Check before done
    clause_1 = np.all(np.equal(merged_arr[:, :, 0:nband_1], arr_1[:, :, 0:nband_1]))
    clause_2 = np.all(np.equal(merged_arr[:, :, -nband_2:], arr_2[:, :, -nband_2:]))
    if (clause_1 == True) and (clause_2 == True):
        #print('done')
        pass
    else:
        raise ValueError('A very specific bad thing happened. Maybe the number of given band is wrong?')
    return merged_arr
def colon(r1, inc, r2):
    """
      Matlab's colon operator, althought it doesn't although inc is required

    """

    s = np.sign(inc)

    if s == 0:
        return_value = np.zeros(1)
    elif s == 1:
        n = ((r2 - r1) + 2 * np.spacing(r2 - r1)) // inc
        return_value = np.linspace(r1, r1 + inc * n, n + 1)
    else:  # s == -1:
        # NOTE: I think this is slightly off as we start on the wrong end
        # r1 should be exact, not r2
        n = ((r1 - r2) + 2 * np.spacing(r1 - r2)) // np.abs(inc)
        temp = np.linspace(r2, r2 + np.abs(inc) * n, n + 1)
        return_value = temp[::-1]

    # If the start and steps are whole numbers, we should cast as int
    if(np.equal(np.mod(r1, 1), 0) and
       np.equal(np.mod(s, 1), 0) and
       np.equal(np.mod(r2, 1), 0)):
        return return_value.astype(int)
    else:
        return return_value
 def applyMorphologicalCleaning(self, image):
 	"""
 	Applies a variety of morphological operations to improve the detection
 	of worms in the image.
 	Takes 0.030 s on MUSSORGSKY for a typical frame region
 	Takes 0.030 s in MATLAB too
 	"""
     # start with worm == 1
     image = image.copy()
     segmentation.clear_border(image)  # remove objects at edge (worm == 1)
     # fix defects in the thresholding by closing with a worm-width disk
     # worm == 1
     wormSE = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                        (self.wormDiskRadius+1,
                                        	self.wormDiskRadius+1))
     imcl = cv2.morphologyEx(np.uint8(image), cv2.MORPH_CLOSE, wormSE)
     imcl = np.equal(imcl, 1)
     # fix defects by filling holes
     imholes = ndimage.binary_fill_holes(imcl)
     imcl = np.logical_or(imholes, imcl)
     # fix barely touching regions
     # majority with worm pixels == 1 (median filter same?)
     imcl = nf.median_filter(imcl, footprint=[[1, 1, 1],
                                              [1, 0, 1],
                                              [1, 1, 1]])
     # diag with worm pixels == 0
     imcl = np.logical_not(bwdiagfill(np.logical_not(imcl)))
     # open with worm pixels == 1
     openSE = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
     imcl = cv2.morphologyEx(np.uint8(imcl), cv2.MORPH_OPEN, openSE)
     return np.equal(imcl, 1)
Exemple #12
0
def load_flatfile(table, nlines):
    RD_PATH = os.path.join(RD_DATAROOT,"{0}_{1}.npy".format(table,nlines))
    
    times = [utcnow()]
    with open(RD_PATH) as f:
        all_seqs = np.load(RD_PATH)

    times+=[utcnow()]
    print "loaded {0} records in {1} seconds".format(all_seqs.shape, times[1] - times[0])
    
    tests = []
    for e in re.compile(">", re.M).split(ltests.strip()):
        if not e: continue
        match = re.compile( "(?P<id>.*)\n(?P<guide>\S{20})\s*(?P<nrg>\S{3})",re.M).search(e)
        tests.append(match.groupdict())

    tests_array = np.array([translate(e["guide"]) for e in tests])
    times +=[utcnow()]
    min_matches = 12
    sims_1 = np.greater_equal(np.sum(np.equal(all_seqs-tests_array[np.newaxis,,:],0)),min_matches)
    times +=[utcnow()]
    matches_1 = np.nonzero(sims_1)
    times+= [utcnow()]
    print "computed one sims (matches) in {0} ({1})".format(times[-2] - times[-3],
                                                            times[-1] - times[-2])
Exemple #13
0
    def error_type_voxelWise(self, reference):
        '''Returns a dictionary with tagged error type
        for each regions from both reference and this label.'''
        
        self.data = np.where(self.data>0, 1, 0)
        reference.data = np.where(reference.data>0, 1, 0)
        
        interSet = self.intersection(reference).get(1, dict()).get(1, set())
        interArray = np.asarray(list(interSet))
        stats = {'reference': dict(), 'self': dict()}

        sIdx = np.where(self.data>0)
        rIdx = np.where(reference.data>0)
        
        sIdxArray = np.transpose(np.asarray(sIdx))
        rIdxArray = np.transpose(np.asarray(rIdx))
            
        for s in sIdxArray:
            if interArray.any():
                if np.equal(s, interArray).all(axis=1).any():
                    stats['self'][str(s)] = {'type': 'TP', 'regions': 1}
                else:
                    stats['self'][str(s)] = {'type': 'FP'}
            else:
                stats['self'][str(s)] = {'type': 'FP'}

        for r in rIdxArray:
            if interArray.any():
                if not np.equal(r, interArray).all(axis=1).any():
                    stats['reference'][str(r)] = {'type': 'FN'}
            else:
                stats['reference'][str(r)] = {'type': 'FN'}

        return stats
Exemple #14
0
def calc_stats_class(image, mask = None, index = None):
    ''' funcion auxiliar para el calculo estadistico de cada categoria.'''
    import spectral
    from numpy import zeros, transpose,compress, indices, reshape, not_equal,mean,std
    from spectral.io import typecode
    
    typechar = typecode(image)
    (nrows, ncols, B) = image.shape
    (nr,nc) = mask.shape
    mask_i = numpy.equal(mask, index)
    mask_array = mask.reshape(nr*nc)
    mask_index = numpy.equal(mask_array, index)
    nSamples = sum(mask_index.ravel())
    
    
    inds = transpose(indices((nrows, ncols)), (1, 2, 0))
    inds = reshape(inds, (nrows * ncols, 2))
    inds = compress(not_equal(mask_i.ravel(), 0), inds, 0).astype('h')

    vector=zeros((inds.shape[0], B), 'd')
 
    for i in range(inds.shape[0]):
        x = image[inds[i][0], inds[i][1]]
        vector[i] = x     
    media = mean(vector,axis=0)
    desv = std(vector, axis=0)
    return(media,desv)
Exemple #15
0
def rgb_to_hsv( r,g,b ):

    maxc = numpy.maximum(r,numpy.maximum(g,b))
    minc = numpy.minimum(r,numpy.minimum(g,b))

    v = maxc

    minc_eq_maxc = numpy.equal(minc,maxc)

    # compute the difference, but reset zeros to ones to avoid divide by zeros later.
    ones = numpy.ones((r.shape[0],r.shape[1]))
    maxc_minus_minc = numpy.choose( minc_eq_maxc, (maxc-minc,ones) )

    s = (maxc-minc) / numpy.maximum(ones,maxc)
    rc = (maxc-r) / maxc_minus_minc
    gc = (maxc-g) / maxc_minus_minc
    bc = (maxc-b) / maxc_minus_minc

    maxc_is_r = numpy.equal(maxc,r)
    maxc_is_g = numpy.equal(maxc,g)
    maxc_is_b = numpy.equal(maxc,b)

    h = numpy.zeros((r.shape[0],r.shape[1]))
    h = numpy.choose( maxc_is_b, (h,4.0+gc-rc) )
    h = numpy.choose( maxc_is_g, (h,2.0+rc-bc) )
    h = numpy.choose( maxc_is_r, (h,bc-gc) )

    h = numpy.mod(h/6.0,1.0)

    hsv = numpy.asarray([h,s,v])
    
    return hsv
Exemple #16
0
    def test_detector_imxpad_s140(self):
        """
        The masked image has a masked ring around 1.5deg with value
        -10 without mask the pixels should be at -10 ; with mask they
        are at 0
        """
        imxpad = detector_factory("imxpad_s140")

        # check that the cartesian coordinates is cached
        self.assertEqual(hasattr(imxpad, 'COORDINATES'), False)
        y, x = imxpad.calc_cartesian_positions()
        self.assertEqual(hasattr(imxpad, 'COORDINATES'), True)

        # now check that the cached values are identical for each
        # method call
        y1, x1 = imxpad.calc_cartesian_positions()
        self.assertEqual(numpy.all(numpy.equal(y1, y)), True)
        self.assertEqual(numpy.all(numpy.equal(x1, x)), True)

        # check that a few pixel positiopns are ok.
        self.assertAlmostEqual(y[0], 130e-6 / 2.)
        self.assertAlmostEqual(y[1], y[0] + 130e-6)
        self.assertAlmostEqual(y[119], y[118] + 130e-6 * 3.5 / 2.)

        self.assertAlmostEqual(x[0], 130e-6 / 2.)
        self.assertAlmostEqual(x[1], x[0] + 130e-6)
        self.assertAlmostEqual(x[79], x[78] + 130e-6 * 3.5 / 2.)
Exemple #17
0
 def test_support(self):
     self.assertIsInstance(self.f0.support, Domain)
     self.assertIsInstance(self.f1.support, Domain)
     self.assertIsInstance(self.f2.support, Domain)
     self.assertEqual(self.f0.support.size, 0)
     self.assertTrue(np.equal(self.f1.support,[-1,1]).all())
     self.assertTrue(np.equal(self.f2.support,[-1,2]).all())
Exemple #18
0
def parseArgs(data, targetClass, otherClass = None, **args) :
    '''parse arguments for a feature scoring function'''

    if 'feature' in args :
        feature = args['feature']
    else :
        feature = None
    if 'Y' in args :
        Y = args['Y']
        if otherClass is None :
            otherI = numpy.nonzero(numpy.not_equal(Y, targetClass))[0]
        else :
            otherI = numpy.nonzero(numpy.equal(Y, otherClass))[0]
        targetClassSize = numpy.sum(numpy.equal(Y, targetClass))
    else :
        Y = None
        if otherClass is None :
            otherI = numpy.nonzero(numpy.not_equal(data.labels.Y, targetClass))[0]
        else :
            otherI = data.labels.classes[otherClass]
        targetClassSize = len(data.labels.classes[targetClass])
    
    otherClassSize = len(otherI)

    return Y, targetClassSize, otherClassSize, otherI, feature
Exemple #19
0
def golub(data, targetClass, otherClass, **args) :
    '''The Golub feature score:
    s = (mu1 - mu2) / sqrt(sigma1^2 + sigma2^2)
    '''

    if 'Y' in args :
        Y = args['Y']
        targetClassSize = numpy.sum(numpy.equal(Y, targetClass))
        otherClassSize = numpy.sum(numpy.equal(Y, otherClass))        
    else :
        Y = None
        targetClassSize = data.labels.classSize[targetClass] 
        otherClassSize = data.labels.classSize[otherClass]
    
    m1 = numpy.array(featureMean(data, targetClass, Y))
    m2 = numpy.array(featureMean(data, otherClass, Y))
    s1 = numpy.array(featureStd(data, targetClass, Y))
    s2 = numpy.array(featureStd(data, otherClass, Y))

    s = numpy.sqrt(s1**2 + s2**2)
    m = (m1 + m2) / 2.0

    # perfect features will have s[i] = 0, so need to take care of that:
    numpy.putmask(s, numpy.equal(s, 0), m)
    # features that are zero will still have s[i] = 0 so :
    numpy.putmask(s, numpy.equal(s, 0) ,1)
    
    g = (m1 - m2) / s
    
    return g
Exemple #20
0
def issorted(pot, variables=[]):
    """Check whether the variables in the Potential is sorted.

    Parameters :
        pot : Potential :
            The target potential.

        variables : sequence[n_variables, ] or np.ndarray[n_variables, ], optional, default : None :
            The sorted sequence of variables to be compared with.

    Returns :
        issorted : boolean :
            True for pot is sorted, False otherwise.

    Raises :
        None

    Notes :
        If variables is not None, compare the variables of the pot with the
        given variables.
    """
    if variables is None:
        variables = np.array([])
    else:
        variables = np.array(variables)
    originVar = pot.variables
    if np.size(variables) == 0:
        return np.equal(np.sort(originVar), originVar)
    else:
        return np.equal(originVar, variables)
Exemple #21
0
    def test_detector_imxpad_s140(self):
        """
        The masked image has a masked ring around 1.5deg with value
        -10 without mask the pixels should be at -10 ; with mask they
        are at 0
        """
        imxpad = detector_factory("imxpad_s140")

        # check that the cartesian coordinates is cached
        self.assertEqual(hasattr(imxpad, '_pixel_edges'), True)
        self.assertEqual(imxpad._pixel_edges, None)
        y, x, z = imxpad.calc_cartesian_positions()
        self.assertEqual(imxpad._pixel_edges is None, False)

        # now check that the cached values are identical for each
        # method call
        y1, x1, z1 = imxpad.calc_cartesian_positions()
        self.assertEqual(numpy.all(numpy.equal(y1, y)), True)
        self.assertEqual(numpy.all(numpy.equal(x1, x)), True)
        self.assertEqual(z, None)
        self.assertEqual(z1, None)
        # check that a few pixel positions are ok.
        self.assertAlmostEqual(y[0, 0], 1 * 130e-6 / 2.)
        self.assertAlmostEqual(y[3, 0], y[2, 0] + 130e-6)
        self.assertAlmostEqual(y[119, 0], y[118, 0] + 130e-6 * 3.5 / 2.)

        self.assertAlmostEqual(x[0, 0], 1 * 130e-6 / 2.)
        self.assertAlmostEqual(x[0, 3], x[0, 2] + 130e-6)
        self.assertAlmostEqual(x[0, 79], x[0, 78] + 130e-6 * 3.5 / 2.)
Exemple #22
0
	def fit(self,X,y):
		#TODO: check X,y
		
		self.classes = np.unique(y)
		#calculate class prior probabilities: P(y=ck)
		if self.class_prior == None:
                        class_num = len(self.classes)
			if not self.fit_prior:
				self.class_prior = [1.0/class_num for _ in range(class_num)]  #uniform prior
			else:
				self.class_prior = []
				sample_num = float(len(y))
				for c in self.classes:
					c_num = np.sum(np.equal(y,c))
					self.class_prior.append(
                                                (c_num+self.alpha)/(sample_num+class_num*self.alpha))
		
		#calculate Conditional Probability: P( xj | y=ck )
		self.conditional_prob = {}  # like { c0:{ x0:{ value0:0.2, value1:0.8 }, x1:{} }, c1:{...} }
		for c in self.classes:
			self.conditional_prob[c] = {}
			for i in range(len(X[0])):  #for each feature
                                feature = X[np.equal(y,c)][:,i]
				self.conditional_prob[c][i] = self._calculate_feature_prob(feature)
		return self
def long_test_2():
    # Set up the market
    bids = np.arange(1, 2, .10)
    offers = np.arange(1.1, 2.1, .10)
    bid_vols = 2000000 * np.ones(10)
    offer_vols = 1000000 * np.ones(10)
    mean_spread = 0.1
    mean_range = 0.0
    # Set up signals for single long trade
    signals  = [1,0,0,0,0,0,0,0,0,-1]
    # TEST 2
    # Test enter and exit with one buy and sell - no position carry, trade only on signal = False
    # Test for opportunistic profit
    # NB - NO CARRY will average closing prices which again distorts pnl in monotonic markets like the test sets so closing pnl will be exaggerated here
    (pnl, position_deltas, position_running, closing_position, closing_pnl, ignored_signals, m2m_pnl) = simulate2.execute_aggressive(ts, bids, offers, bid_vols, offer_vols, signals, currency_pair, signal_window_time=1, min_window_signals=1, min_profit_prct=0.0001, carry_position = False, default_trade_size = 1, max_position=5, fill_function=None, cut_long = -(mean_spread+mean_range)*2, cut_short= -(mean_spread+mean_range)*2, usd_transaction_cost= 0, trade_file='/tmp/testoutshit', take_profit_pct=0.0001)
    # Profit here is convoluted since the price array is artificial and changes very quickly so when averaging the price to close the position the true price is distorted - i.e. first 4 offers average to 1.30
    assert(round(sum(pnl),2) == 0.35)

    pos_deltas_test = [ 1.,  0., -1.,  0.,  0.,  0.,  0.,  0.,  0., 1.]
    pos_deltas_out = np.equal(position_deltas, pos_deltas_test)
    assert(pos_deltas_out.all() == True)
    assert(sum(position_deltas) + closing_position == 0)

    pos_run_test  = [ 1.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.]
    pos_run_out = np.equal(position_running, pos_run_test)
    assert(pos_run_out.all() == True)
    

    assert(round(closing_pnl,2) == 0.25)
    assert(closing_position == -1.)
def mix_test1():
    signals  = [1,1,1,0,-1,0,-1,0,1,-1]
    mean_spread = 1
    mean_range = 1
    # Reset the market 
    bids = np.arange(1, 2, .10)
    offers = np.arange(1.1, 2.1, .10)
    bid_vols = 2000000 * np.ones(10)
    offer_vols = 1000000 * np.ones(10)

    # TEST 7
    # No take profit - trade only on signals 
    # No cuts - set spread and range high 
    # DO carry position - elimintates averaging problem of closing trde - pnl more transparent
    # No min profit - trade on all signals - not just profitable ones
    # This should trade on ALL signals regardless of profitability and never cut 
    (pnl, position_deltas, position_running, closing_position, closing_pnl, ignored_signals, m2m_pnl) = simulate2.execute_aggressive(ts, bids, offers, bid_vols, offer_vols, signals, currency_pair, signal_window_time=1, min_window_signals=1, min_profit_prct=None, carry_position = True, default_trade_size = 1, max_position=5, fill_function=None, cut_long = -(mean_spread+mean_range)*2, cut_short= -(mean_spread+mean_range)*2, usd_transaction_cost= 0, trade_file='/tmp/testoutshit', take_profit_pct=None)

    assert(round(sum(pnl), 2) == .15)

    pos_deltas_test = [ 1.,  1.,  1.,  0., -3.,  0., -1.,  0.,  1., -1.] 
    pos_deltas_out = np.equal(position_deltas, pos_deltas_test)
    assert(pos_deltas_out.all() == True)

    # End with position since carry_position is True here
    pos_run_test  = [ 1.,  2.,  3.,  3.,  0.,  0., -1.,  -1.,  0., -1.]
    pos_run_out = np.equal(position_running, pos_run_test)
    assert(pos_run_out.all() == True)

    assert(closing_position == -1.)
    assert(closing_pnl == 0)
def mix_test4():
    signals  = [1,1,1,0,-1,0,-1,0,1,-1]
    # Reset the market 
    bids = np.arange(1, 2, .10)
    offers = np.arange(1.1, 2.1, .10)
    bid_vols = 2000000 * np.ones(10)
    offer_vols = 1000000 * np.ones(10)
    mean_spread = 0
    mean_range = 0.0001
    # TEST 10
    # Allow take profit - fully exit profitable position
    # Allow cuts 
    mean_spread = 0
    mean_range = 0.0001
    # DO NOT carry position - close avg price of long/short against avg of past market
    # No min profit - trade on all signals - not just profitable ones
    (pnl, position_deltas, position_running, closing_position, closing_pnl, ignored_signals, m2m_pnl) = simulate2.execute_aggressive(ts, bids, offers, bid_vols, offer_vols, signals, currency_pair, signal_window_time=1, min_window_signals=1, min_profit_prct=None, carry_position = True, default_trade_size = 1, max_position=5, fill_function=None, cut_long = -(mean_spread+mean_range)*2, cut_short= -(mean_spread+mean_range)*2, usd_transaction_cost= 0, trade_file='/tmp/testoutshit', take_profit_pct=0.0001)

    assert(round(sum(pnl), 2) == 0.02)

    pos_deltas_test = [ 1.,  1.,  1., -3., -1.,  1., -1.,  1.,  1., -1.] 
    pos_deltas_out = np.equal(position_deltas, pos_deltas_test)
    assert(pos_deltas_out.all() == True)

    pos_run_test = [ 1.,  2.,  3.,  0., -1.,  0., -1.,  0.,  1.,  0.] 
    pos_run_out = np.equal(position_running, pos_run_test)
    assert(pos_run_out.all() == True)

    assert(closing_position == 0.)
    assert(closing_pnl == 0.)
def short_test2():
    # Set up the market
    bids = np.arange(2, 1, -.10)
    offers = np.arange(2.1, 1.1, -.10)
    bid_vols = 2000000 * np.ones(10)
    offer_vols = 1000000 * np.ones(10)
    mean_spread = 0.1
    mean_range = 0.0
    # Start with short and end with long
    signals = [-1,0,0,0,0,0,0,0,0,1]
    # TEST 5
    # Test short trade first - no carry position, trade only on signal = False
    # Short and take profit - then trade last frame long and close position with short (no carry)
    # NB - NO CARRY will average closing prices which again distorts pnl in monotonic markets like the test sets so closing pnl will be exaggerated here
    (pnl, position_deltas, position_running, closing_position, closing_pnl, ignored_signals, m2m_pnl) = simulate2.execute_aggressive(ts, bids, offers, bid_vols, offer_vols, signals, currency_pair, signal_window_time=1, min_window_signals=1, min_profit_prct=0.0001, carry_position = False, default_trade_size = 1, max_position=5, fill_function=None, cut_long = -(mean_spread+mean_range)*2, cut_short= -(mean_spread+mean_range)*2, usd_transaction_cost= 0, trade_file='/tmp/testoutshit', take_profit_pct=0.0001)
    assert(round(sum(pnl), 2) == 0.35)

    pos_deltas_test = [ -1.,  0., 1.,  0.,  0.,  0.,  0.,  0.,  0., -1.]
    pos_deltas_out = np.equal(position_deltas, pos_deltas_test)
    assert(pos_deltas_out.all() == True)

    pos_run_test  = [ -1.,  -1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.]
    pos_run_out = np.equal(position_running, pos_run_test)
    assert(pos_run_out.all() == True)

    assert(round(closing_pnl,2) == 0.25)
    assert(closing_position == 1.)
def short_test3():
    # Set up the market
    # Cut level is offer[6]
    offers = [ 2.1,  2. ,  1.9,  1.8,  1.7,  1.6,  2.5 ,  1.4,  1.3,  1.2]
    bids = [ 2. ,  1.9,  1.8,  1.7,  1.6,  1.5,  1.4,  1.3,  1.2,  1.1]
    bid_vols = 2000000 * np.ones(10)
    offer_vols = 1000000 * np.ones(10)
    mean_spread = 0.1
    mean_range = 0.0
    # Start with short and end with long
    signals = [-1,0,0,0,0,0,0,0,0,1]
    # TEST 6
    # Test short trade with cutoff
    # NB - NO CARRY will average closing prices which again distorts pnl in monotonic markets like the test sets so closing pnl will be exaggerated here
    mean_spread = 0.0
    mean_range = 0.0001
    (pnl, position_deltas, position_running, closing_position, closing_pnl, ignored_signals, m2m_pnl) = simulate2.execute_aggressive(ts, bids, offers, bid_vols, offer_vols, signals, currency_pair, signal_window_time=1, min_window_signals=1, min_profit_prct=0.0001, carry_position = False, default_trade_size = 1, max_position=5, fill_function=None, cut_long = -(mean_spread+mean_range)*2, cut_short= -(mean_spread+mean_range)*2, usd_transaction_cost= 0, trade_file='/tmp/testoutshit', take_profit_pct=None)
    assert(round(sum(pnl), 2) == -0.55)

    pos_deltas_test = [-1.,  0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.,  -1.]
    pos_deltas_out = np.equal(position_deltas, pos_deltas_test)
    assert(pos_deltas_out.all() == True)

    pos_run_test  = [ -1.,  -1.,  -1.,  -1.,  -1.,  -1.,  0.,  0.,  0.,  0.]
    pos_run_out = np.equal(position_running, pos_run_test)
    assert(pos_run_out.all() == True)

    assert(round(closing_pnl,2) == -0.05)
    assert(closing_position == 1.)
def make_features_in_layers(board):
  feature_layers = [] # These are effectively 'colours' for the CNN
  #print(board)
  
  #print("Board mask")
  mask     = np.greater( board[:, :], 0 )*1.
  feature_layers.append( mask.astype('float32') )
  
  # This works out whether each cell is the same as the cell 'above it'
  for shift_down in [1,2,3,4,5,]:
    #print("\n'DOWN' by %d:" % (shift_down,))
    sameness = np.zeros_like(board, dtype='float32')
    
    sameness[:,:-shift_down] = np.equal( board[:, :-shift_down], board[:, shift_down:] )*1.
    #print(sameness)

    feature_layers.append( sameness )
  
  # This works out whether each cell is the same as the cell in to columns 'to the left of it'
  for shift_right in [1,2,3,]:
    #print("\n'RIGHT' by %d:" % (shift_right,))
    sameness = np.zeros_like(board, dtype='float32')
    
    sameness[:-shift_right,:] = np.equal( board[:-shift_right, :], board[shift_right:, :] )*1.
    #print(sameness)

    feature_layers.append( sameness )
  
  stacked = np.dstack( feature_layers )
  return np.rollaxis( stacked, 2, 0 )
def short_test1():
    # Set up the market
    bids = np.arange(2, 1, -.10)
    offers = np.arange(2.1, 1.1, -.10)
    bid_vols = 2000000 * np.ones(10)
    offer_vols = 1000000 * np.ones(10)
    mean_spread = 0.1
    mean_range = 0.0
    # Start with short and end with long
    signals = [-1,0,0,0,0,0,0,0,0,1]

    # TEST 4
    # Test short trade first - no carry position, trade only on signal = True
    (pnl, position_deltas, position_running, closing_position, closing_pnl, ignored_signals, m2m_pnl) = simulate2.execute_aggressive(ts, bids, offers, bid_vols, offer_vols, signals, currency_pair, signal_window_time=1, min_window_signals=1, min_profit_prct=0.0001, carry_position = False, default_trade_size = 1, max_position=5, fill_function=None, cut_long = -(mean_spread+mean_range)*2, cut_short= -(mean_spread+mean_range)*2, usd_transaction_cost= 0, trade_file='/tmp/testoutshit', take_profit_pct=None)
    assert(round(sum(pnl), 2) == 0.8)

    pos_deltas_test = [ -1.,  0., 0.,  0.,  0.,  0.,  0.,  0.,  0., 1.]
    pos_deltas_out = np.equal(position_deltas, pos_deltas_test)
    assert(pos_deltas_out.all() == True)

    pos_run_test  = [ -1.,  -1.,  -1.,  -1.,  -1.,  -1.,  -1.,  -1.,  -1.,  0.]
    pos_run_out = np.equal(position_running, pos_run_test)
    assert(pos_run_out.all() == True)

    assert(round(closing_pnl,2) == 0.0)
    assert(round(closing_position,2) == 0.0)
Exemple #30
0
def find_intersection(x, tr_bounds, lb, ub):
    """Find intersection of trust-region bounds and initial bounds.

    Returns
    -------
    lb_total, ub_total : ndarray with shape of x
        Lower and upper bounds of the intersection region.
    orig_l, orig_u : ndarray of bool with shape of x
        True means that an original bound is taken as a corresponding bound
        in the intersection region.
    tr_l, tr_u : ndarray of bool with shape of x
        True means that a trust-region bound is taken as a corresponding bound
        in the intersection region.
    """
    lb_centered = lb - x
    ub_centered = ub - x

    lb_total = np.maximum(lb_centered, -tr_bounds)
    ub_total = np.minimum(ub_centered, tr_bounds)

    orig_l = np.equal(lb_total, lb_centered)
    orig_u = np.equal(ub_total, ub_centered)

    tr_l = np.equal(lb_total, -tr_bounds)
    tr_u = np.equal(ub_total, tr_bounds)

    return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u
Exemple #31
0
                    Z[i] *
                    (lambdaa[i] - lambdaa_old[i]) * np.dot(X[i], X[j])) - (
                        Z[j] *
                        (lambdaa[j] - lambdaa_old[j]) * np.dot(X[j], X[j]))

                if (0 < lambdaa[i] and lambdaa[i] < C):
                    bias = b[i]
                elif (0 < lambdaa[j] and lambdaa[j] < C):
                    bias = b[j]
                else:
                    bias = (b[i] + b[j]) / 2

    print("lambdaa values ", lambdaa)
    print("b is", b)

    if (np.equal(lambdaa, lambdaa_old).all()):
        print("Breaking at pass", passes,
              "as both lambda and lambda old are same")
        break
    else:
        continue

w_i = 0.0
w_j = 0.0
for count in range(0, 6):
    w_i += lambdaa[count] * Z[count] * X[count][0]
    w_j += lambdaa[count] * Z[count] * X[count][1]
print("w1 is", w_i, " w2 is ", w_j)
print(Z)
from matplotlib import pyplot as plt
color = ['r' if c == -1. else 'g' for c in Z]
Exemple #32
0
def assign_particle_data(ds, pdata, bbox):
    """
    Assign particle data to the grids using MatchPointsToGrids. This
    will overwrite any existing particle data, so be careful!
    """

    particle_index_fields = [
        f"particle_position_{ax}" for ax in ds.coordinates.axis_order
    ]
    for ptype in ds.particle_types_raw:
        check_fields = [(ptype, pi_field)
                        for pi_field in particle_index_fields]
        check_fields.append((ptype, "particle_position"))
        if all(f not in pdata for f in check_fields):
            pdata_ftype = {}
            for f in [k for k in sorted(pdata)]:
                if not hasattr(pdata[f], "shape"):
                    continue
                if f == "number_of_particles":
                    continue
                mylog.debug("Reassigning '%s' to ('%s','%s')", f, ptype, f)
                pdata_ftype[ptype, f] = pdata.pop(f)
            pdata_ftype.update(pdata)
            pdata = pdata_ftype

    # Note: what we need to do here is a bit tricky.  Because occasionally this
    # gets called before we property handle the field detection, we cannot use
    # any information about the index.  Fortunately for us, we can generate
    # most of the GridTree utilizing information we already have from the
    # stream handler.

    if len(ds.stream_handler.fields) > 1:
        pdata.pop("number_of_particles", None)
        num_grids = len(ds.stream_handler.fields)
        parent_ids = ds.stream_handler.parent_ids
        num_children = np.zeros(num_grids, dtype="int64")
        # We're going to do this the slow way
        mask = np.empty(num_grids, dtype="bool")
        for i in range(num_grids):
            np.equal(parent_ids, i, mask)
            num_children[i] = mask.sum()
        levels = ds.stream_handler.levels.astype("int64").ravel()
        grid_tree = GridTree(
            num_grids,
            ds.stream_handler.left_edges,
            ds.stream_handler.right_edges,
            ds.stream_handler.dimensions,
            ds.stream_handler.parent_ids,
            levels,
            num_children,
        )

        grid_pdata = []
        for _ in range(num_grids):
            grid = {"number_of_particles": 0}
            grid_pdata.append(grid)
            particle_index_fields = [
                f"particle_position_{ax}" for ax in ds.coordinates.axis_order
            ]

        for ptype in ds.particle_types_raw:
            if (ptype, "particle_position_x") in pdata:
                # we call them x, y, z even though they may be different field names
                x, y, z = (pdata[ptype, pi_field]
                           for pi_field in particle_index_fields)
            elif (ptype, "particle_position") in pdata:
                x, y, z = pdata[ptype, "particle_position"].T
            else:
                raise KeyError(
                    "Cannot decompose particle data without position fields!")
            pts = MatchPointsToGrids(grid_tree, len(x), x, y, z)
            particle_grid_inds = pts.find_points_in_tree()
            (assigned_particles, ) = (particle_grid_inds >= 0).nonzero()
            num_particles = particle_grid_inds.size
            num_unassigned = num_particles - assigned_particles.size
            if num_unassigned > 0:
                eps = np.finfo(x.dtype).eps
                s = np.array([
                    [x.min() - eps, x.max() + eps],
                    [y.min() - eps, y.max() + eps],
                    [z.min() - eps, z.max() + eps],
                ])
                sug_bbox = [
                    [min(bbox[0, 0], s[0, 0]),
                     max(bbox[0, 1], s[0, 1])],
                    [min(bbox[1, 0], s[1, 0]),
                     max(bbox[1, 1], s[1, 1])],
                    [min(bbox[2, 0], s[2, 0]),
                     max(bbox[2, 1], s[2, 1])],
                ]
                mylog.warning(
                    "Discarding %s particles (out of %s) that are outside "
                    "bounding box. Set bbox=%s to avoid this in the future.",
                    num_unassigned,
                    num_particles,
                    sug_bbox,
                )
                particle_grid_inds = particle_grid_inds[assigned_particles]
                x = x[assigned_particles]
                y = y[assigned_particles]
                z = z[assigned_particles]
            idxs = np.argsort(particle_grid_inds)
            particle_grid_count = np.bincount(
                particle_grid_inds.astype("intp"), minlength=num_grids)
            particle_indices = np.zeros(num_grids + 1, dtype="int64")
            if num_grids > 1:
                np.add.accumulate(particle_grid_count.squeeze(),
                                  out=particle_indices[1:])
            else:
                particle_indices[1] = particle_grid_count.squeeze()
            for i, pcount in enumerate(particle_grid_count):
                grid_pdata[i]["number_of_particles"] += pcount
                start = particle_indices[i]
                end = particle_indices[i + 1]
                for key in pdata.keys():
                    if key[0] == ptype:
                        grid_pdata[i][key] = pdata[key][idxs][start:end]

    else:
        grid_pdata = [pdata]

    for pd, gi in zip(grid_pdata, sorted(ds.stream_handler.fields)):
        ds.stream_handler.fields[gi].update(pd)
        ds.stream_handler.particle_types.update(set_particle_types(pd))
        npart = ds.stream_handler.fields[gi].pop("number_of_particles", 0)
        ds.stream_handler.particle_count[gi] = npart
    def _step_agent(self, i_agent, action: Optional[RailEnvActions] = None):
        """
        Performs a step and step, start and stop penalty on a single agent in the following sub steps:
        - malfunction
        - action handling if at the beginning of cell
        - movement

        Parameters
        ----------
        i_agent : int
        action_dict_ : Dict[int,RailEnvActions]

        """
        agent = self.agents[i_agent]
        if agent.status in [RailAgentStatus.DONE, RailAgentStatus.DONE_REMOVED]:  # this agent has already completed...
            return

        # agent gets active by a MOVE_* action and if c
        if agent.status == RailAgentStatus.READY_TO_DEPART:
            if action in [RailEnvActions.MOVE_LEFT, RailEnvActions.MOVE_RIGHT,
                          RailEnvActions.MOVE_FORWARD] and self.cell_free(agent.initial_position):
                agent.status = RailAgentStatus.ACTIVE
                self._set_agent_to_initial_position(agent, agent.initial_position)
                self.rewards_dict[i_agent] += self.step_penalty * agent.speed_data['speed']
                return
            else:
                # TODO: Here we need to check for the departure time in future releases with full schedules
                self.rewards_dict[i_agent] += self.step_penalty * agent.speed_data['speed']
                return

        agent.old_direction = agent.direction
        agent.old_position = agent.position

        # if agent is broken, actions are ignored and agent does not move.
        # full step penalty in this case
        if agent.malfunction_data['malfunction'] > 0:
            self.rewards_dict[i_agent] += self.step_penalty * agent.speed_data['speed']
            return

        # Is the agent at the beginning of the cell? Then, it can take an action.
        # As long as the agent is malfunctioning or stopped at the beginning of the cell,
        # different actions may be taken!
        if np.isclose(agent.speed_data['position_fraction'], 0.0, rtol=1e-03):
            # No action has been supplied for this agent -> set DO_NOTHING as default
            if action is None:
                action = RailEnvActions.DO_NOTHING

            if action < 0 or action > len(RailEnvActions):
                print('ERROR: illegal action=', action,
                      'for agent with index=', i_agent,
                      '"DO NOTHING" will be executed instead')
                action = RailEnvActions.DO_NOTHING

            if action == RailEnvActions.DO_NOTHING and agent.moving:
                # Keep moving
                action = RailEnvActions.MOVE_FORWARD

            if action == RailEnvActions.STOP_MOVING and agent.moving:
                # Only allow halting an agent on entering new cells.
                agent.moving = False
                self.rewards_dict[i_agent] += self.stop_penalty

            if not agent.moving and not (
                    action == RailEnvActions.DO_NOTHING or
                    action == RailEnvActions.STOP_MOVING):
                # Allow agent to start with any forward or direction action
                agent.moving = True
                self.rewards_dict[i_agent] += self.start_penalty

            # Store the action if action is moving
            # If not moving, the action will be stored when the agent starts moving again.
            if agent.moving:
                _action_stored = False
                _, new_cell_valid, new_direction, new_position, transition_valid = \
                    self._check_action_on_agent(action, agent)

                if all([new_cell_valid, transition_valid]):
                    agent.speed_data['transition_action_on_cellexit'] = action
                    _action_stored = True
                else:
                    # But, if the chosen invalid action was LEFT/RIGHT, and the agent is moving,
                    # try to keep moving forward!
                    if (action == RailEnvActions.MOVE_LEFT or action == RailEnvActions.MOVE_RIGHT):
                        _, new_cell_valid, new_direction, new_position, transition_valid = \
                            self._check_action_on_agent(RailEnvActions.MOVE_FORWARD, agent)

                        if all([new_cell_valid, transition_valid]):
                            agent.speed_data['transition_action_on_cellexit'] = RailEnvActions.MOVE_FORWARD
                            _action_stored = True

                if not _action_stored:
                    # If the agent cannot move due to an invalid transition, we set its state to not moving
                    self.rewards_dict[i_agent] += self.invalid_action_penalty
                    self.rewards_dict[i_agent] += self.stop_penalty
                    agent.moving = False

        # Now perform a movement.
        # If agent.moving, increment the position_fraction by the speed of the agent
        # If the new position fraction is >= 1, reset to 0, and perform the stored
        #   transition_action_on_cellexit if the cell is free.
        if agent.moving:
            agent.speed_data['position_fraction'] += agent.speed_data['speed']
            if agent.speed_data['position_fraction'] > 1.0 or np.isclose(agent.speed_data['position_fraction'], 1.0,
                                                                  rtol=1e-03):
                # Perform stored action to transition to the next cell as soon as cell is free
                # Notice that we've already checked new_cell_valid and transition valid when we stored the action,
                # so we only have to check cell_free now!

                # cell and transition validity was checked when we stored transition_action_on_cellexit!
                cell_free, new_cell_valid, new_direction, new_position, transition_valid = self._check_action_on_agent(
                    agent.speed_data['transition_action_on_cellexit'], agent)

                # N.B. validity of new_cell and transition should have been verified before the action was stored!
                assert new_cell_valid
                assert transition_valid
                if cell_free:
                    self._move_agent_to_new_position(agent, new_position)
                    agent.direction = new_direction
                    agent.speed_data['position_fraction'] = 0.0

            # has the agent reached its target?
            if np.equal(agent.position, agent.target).all():
                agent.status = RailAgentStatus.DONE
                self.dones[i_agent] = True
                self.active_agents.remove(i_agent)
                agent.moving = False
                self._remove_agent_from_scene(agent)
            else:
                self.rewards_dict[i_agent] += self.step_penalty * agent.speed_data['speed']
        else:
            # step penalty if not moving (stopped now or before)
            self.rewards_dict[i_agent] += self.step_penalty * agent.speed_data['speed']
Exemple #34
0
def test_flip():
    # test assertion for invalid flip_ratio
    with pytest.raises(AssertionError):
        transform = dict(type='RandomFlip', flip_ratio=1.5)
        build_from_cfg(transform, PIPELINES)
    # test assertion for 0 <= sum(flip_ratio) <= 1
    with pytest.raises(AssertionError):
        transform = dict(type='RandomFlip',
                         flip_ratio=[0.7, 0.8],
                         direction=['horizontal', 'vertical'])
        build_from_cfg(transform, PIPELINES)

    # test assertion for mismatch between number of flip_ratio and direction
    with pytest.raises(AssertionError):
        transform = dict(type='RandomFlip', flip_ratio=[0.4, 0.5])
        build_from_cfg(transform, PIPELINES)

    # test assertion for invalid direction
    with pytest.raises(AssertionError):
        transform = dict(type='RandomFlip',
                         flip_ratio=1.,
                         direction='horizonta')
        build_from_cfg(transform, PIPELINES)

    transform = dict(type='RandomFlip', flip_ratio=1.)
    flip_module = build_from_cfg(transform, PIPELINES)

    results = dict()
    img = mmcv.imread(osp.join(osp.dirname(__file__), '../data/color.jpg'),
                      'color')
    original_img = copy.deepcopy(img)
    results['img'] = img
    results['img2'] = copy.deepcopy(img)
    results['img_shape'] = img.shape
    results['ori_shape'] = img.shape
    # Set initial values for default meta_keys
    results['pad_shape'] = img.shape
    results['scale_factor'] = 1.0
    results['img_fields'] = ['img', 'img2']

    results = flip_module(results)
    assert np.equal(results['img'], results['img2']).all()

    flip_module = build_from_cfg(transform, PIPELINES)
    results = flip_module(results)
    assert np.equal(results['img'], results['img2']).all()
    assert np.equal(original_img, results['img']).all()

    # test flip_ratio is float, direction is list
    transform = dict(type='RandomFlip',
                     flip_ratio=0.9,
                     direction=['horizontal', 'vertical', 'diagonal'])
    flip_module = build_from_cfg(transform, PIPELINES)

    results = dict()
    img = mmcv.imread(osp.join(osp.dirname(__file__), '../data/color.jpg'),
                      'color')
    original_img = copy.deepcopy(img)
    results['img'] = img
    results['img_shape'] = img.shape
    results['ori_shape'] = img.shape
    # Set initial values for default meta_keys
    results['pad_shape'] = img.shape
    results['scale_factor'] = 1.0
    results['img_fields'] = ['img']
    results = flip_module(results)
    if results['flip']:
        assert np.array_equal(
            mmcv.imflip(original_img, results['flip_direction']),
            results['img'])
    else:
        assert np.array_equal(original_img, results['img'])

    # test flip_ratio is list, direction is list
    transform = dict(type='RandomFlip',
                     flip_ratio=[0.3, 0.3, 0.2],
                     direction=['horizontal', 'vertical', 'diagonal'])
    flip_module = build_from_cfg(transform, PIPELINES)

    results = dict()
    img = mmcv.imread(osp.join(osp.dirname(__file__), '../data/color.jpg'),
                      'color')
    original_img = copy.deepcopy(img)
    results['img'] = img
    results['img_shape'] = img.shape
    results['ori_shape'] = img.shape
    # Set initial values for default meta_keys
    results['pad_shape'] = img.shape
    results['scale_factor'] = 1.0
    results['img_fields'] = ['img']
    results = flip_module(results)
    if results['flip']:
        assert np.array_equal(
            mmcv.imflip(original_img, results['flip_direction']),
            results['img'])
    else:
        assert np.array_equal(original_img, results['img'])
Exemple #35
0
def test_resize():
    # test assertion if img_scale is a list
    with pytest.raises(AssertionError):
        transform = dict(type='Resize', img_scale=[1333, 800], keep_ratio=True)
        build_from_cfg(transform, PIPELINES)

    # test assertion if len(img_scale) while ratio_range is not None
    with pytest.raises(AssertionError):
        transform = dict(type='Resize',
                         img_scale=[(1333, 800), (1333, 600)],
                         ratio_range=(0.9, 1.1),
                         keep_ratio=True)
        build_from_cfg(transform, PIPELINES)

    # test assertion for invalid multiscale_mode
    with pytest.raises(AssertionError):
        transform = dict(type='Resize',
                         img_scale=[(1333, 800), (1333, 600)],
                         keep_ratio=True,
                         multiscale_mode='2333')
        build_from_cfg(transform, PIPELINES)

    # test assertion if both scale and scale_factor are setted
    with pytest.raises(AssertionError):
        results = dict(img_prefix=osp.join(osp.dirname(__file__), '../data'),
                       img_info=dict(filename='color.jpg'))
        load = dict(type='LoadImageFromFile')
        load = build_from_cfg(load, PIPELINES)
        transform = dict(type='Resize', img_scale=(1333, 800), keep_ratio=True)
        transform = build_from_cfg(transform, PIPELINES)
        results = load(results)
        results['scale'] = (1333, 800)
        results['scale_factor'] = 1.0
        results = transform(results)

    transform = dict(type='Resize', img_scale=(1333, 800), keep_ratio=True)
    resize_module = build_from_cfg(transform, PIPELINES)

    results = dict()
    img = mmcv.imread(osp.join(osp.dirname(__file__), '../data/color.jpg'),
                      'color')
    results['img'] = img
    results['img2'] = copy.deepcopy(img)
    results['img_shape'] = img.shape
    results['ori_shape'] = img.shape
    # Set initial values for default meta_keys
    results['pad_shape'] = img.shape
    results['img_fields'] = ['img', 'img2']

    results = resize_module(results)
    assert np.equal(results['img'], results['img2']).all()

    results.pop('scale')
    results.pop('scale_factor')
    transform = dict(type='Resize',
                     img_scale=(1280, 800),
                     multiscale_mode='value',
                     keep_ratio=False)
    resize_module = build_from_cfg(transform, PIPELINES)
    results = resize_module(results)
    assert np.equal(results['img'], results['img2']).all()
    assert results['img_shape'] == (800, 1280, 3)
def create_new_automaton(d_function_data: Dict[Any, Any],
                         l_t_2d_cells: Any) -> None:
    funcs_str = d_function_data['funcs_str']
    l_func_str_sorted = d_function_data['l_func_str_sorted']
    frame = d_function_data['frame']
    frame_wrap = d_function_data['frame_wrap']

    # sys.exit()

    # TODO: create some random functions too!

    l_func, func_inv, func_rng, start_seed = prepare_functions(
        funcs_str=funcs_str, frame=frame)

    # path_images = PATH_ROOT_DIR + 'images/'

    # file_name = 'nature_trees_river_84761_800x600.jpg'
    # image_path = path_images + file_name

    # img = Image.open(image_path)
    # pix = np.array(img)

    pix_height = 100
    pix_width = 150
    pix_orig = np.random.randint(0,
                                 255, (pix_height, pix_width, 3),
                                 dtype=np.uint8)

    def extract_bits_from_pix(pix: np.ndarray) -> np.ndarray:
        pix_bits = np.array([(channel >> i) & 0x1
                             for channel in pix.transpose(2, 0, 1)
                             for i in range(7, -1, -1)],
                            dtype=np.uint8)
        return pix_bits
        # return pix_bits.transpose(1, 2, 0)

    # def extract_rgb_from_pix(pix : np.ndarray) -> np.ndarray:
    #     return pix.transpose(2, 0, 1)

    pix_bits = extract_bits_from_pix(pix_orig)

    pix = pix_bits[0] * 255
    # pix = pix_bits[:1]

    h, w = pix_bits.shape[1:]
    # h, w = pix_bits.shape[:2]
    # sys.exit()

    # folder_name = 'test_other'
    folder_name_suffix = '{}_{}'.format(
        datetime.strftime(datetime.now(), '%Y-%m-%d_%H:%M:%S'),
        ''.join(np.random.choice(list(string.hexdigits), (8, ))).upper(),
    )
    folder_name = f'test_other_{folder_name_suffix}'
    dir_path_images = os.path.join(DIR_PATH_SAVE_IMAGES, folder_name)
    # dir_path_images = os.path.join(TEMP_DIR, 'save_images/{}/'.format(file_name.split('.')[0]))
    mkdirs(dir_path_images)

    dir_path_combined_images = os.path.join(DIR_PATH_SAVE_IMAGES,
                                            'combined_images')
    mkdirs(dir_path_combined_images)

    dir_path_combined_images_xor = os.path.join(DIR_PATH_SAVE_IMAGES,
                                                'combined_images_xor')
    mkdirs(dir_path_combined_images_xor)

    file_path_funcs = os.path.join(dir_path_images, 'test_functions_python.py')
    with open(file_path_funcs, 'w') as f:
        f.write(funcs_str)

    arr_bits = pix_bits[:1]
    # arr_bits = np.array(
    #     [[list(itertools.chain(*[list(map(int, bin(b)[2:].zfill(8))) for b in v])) for v in row] for row in pix],
    #     dtype=np.uint8,
    # ).transpose(2, 0, 1)

    amount_bit_automaton = arr_bits.shape[0]

    l_bit_automaton = [
        BitAutomaton().init_vals(h=h,
                                 w=w,
                                 frame=frame,
                                 frame_wrap=frame_wrap,
                                 l_func=l_func,
                                 func_inv=func_inv,
                                 func_rng=func_rng)
        for _ in range(0, amount_bit_automaton)
    ]

    for bit_automaton, bits in zip(l_bit_automaton, arr_bits):
        bit_automaton.set_field(bits.astype(bool))

    def convert_bit_field_to_pix_1_bit(l_bit_automaton):
        assert len(l_bit_automaton) == 1  # 1 bits

        arr = (l_bit_automaton[0] << 0).astype(np.uint8) * 255
        return arr

    def convert_bit_field_to_pix_8_bit(l_bit_automaton):
        assert len(l_bit_automaton) == 8  # 8 bits

        arr = np.sum([
            bit_automaton << j
            for j, bit_automaton in zip(range(7, -1, -1), l_bit_automaton)
        ],
                     axis=0).astype(np.uint8)
        return arr

    def convert_bit_field_to_pix_24_bit(l_bit_automaton):
        assert len(l_bit_automaton) == 24  # 24 bits

        arr = np.array([
            np.sum([
                bit_automaton << j for j, bit_automaton in zip(
                    range(7, -1, -1), l_bit_automaton[8 * i:8 * (i + 1)])
            ],
                   axis=0) for i in range(0, 3)
        ],
                       dtype=np.uint8)
        return arr.transpose(1, 2, 0)

    convert_bit_field_to_pix = convert_bit_field_to_pix_1_bit
    # convert_bit_field_to_pix = convert_bit_field_to_pix_8_bit
    # convert_bit_field_to_pix = convert_bit_field_to_pix_24_bit

    pix2 = convert_bit_field_to_pix(l_bit_automaton[:1])
    assert np.all(pix2 == pix)

    print("i: {}".format(0))
    file_path = os.path.join(dir_path_images, '{:04}.png'.format(0))
    Image.fromarray(pix2).save(file_path)

    l_pix = [pix2]

    # cols = 5
    # rows = 10

    cols = 10
    rows = 18
    iterations_amount = cols * rows
    l_func_nr = list(range(0, len(l_func)))
    amount_function_mod = len(l_func_nr)
    rng = func_rng(seed=start_seed)
    # for i in range(1, 100):
    for i in range(1, iterations_amount):
        func_nr = l_func_nr[next(rng) % amount_function_mod]
        print("i: {}, func_nr: {}".format(i, func_nr))

        for bit_automaton in l_bit_automaton:
            # bit_automaton.execute_func(5)
            bit_automaton.execute_func(func_nr)
        pix2 = convert_bit_field_to_pix(l_bit_automaton)
        file_path = os.path.join(dir_path_images, '{:04}.png'.format(i))
        # Image.fromarray(pix2).save(file_path)

        l_pix.append(pix2)

    arr_pixs = np.array(l_pix) // 255

    # find all unique v_2d_cells
    s_2d_cells = set()
    for t_2d_cells in l_t_2d_cells:
        for v_2d_cells in t_2d_cells:
            if v_2d_cells not in s_2d_cells:
                s_2d_cells.add(v_2d_cells)

    # create all possible arr_pixs_roll arrays
    d_arr_pixs_roll = {}
    for dy, dx in s_2d_cells:
        d_arr_pixs_roll[(dy, dx)] = np.roll(np.roll(arr_pixs, dy, 1), dx, 2)

    # get from all the cols * rows images the cluster_roll sum
    l_sum_pix_cluster_roll = []
    arr_pix_temp = np.zeros((rows * cols, pix_height, pix_width),
                            dtype=np.uint8)
    for t_2d_cells in l_t_2d_cells:
        arr_pix_temp[:] = 1
        for v_2d_cells in t_2d_cells:
            arr_pix_temp &= d_arr_pixs_roll[v_2d_cells]
        l_sum_pix_cluster_roll.append(np.sum(np.sum(arr_pix_temp, 2), 1))

    arr_sum_pix_cluster_roll = np.array(l_sum_pix_cluster_roll).T

    arr_historic_ranges = np.zeros((arr_pixs.shape[0], 2), dtype=np.int_)
    for i, pix in enumerate(arr_pixs, 0):
        sum_0 = np.sum(np.equal(pix, 0))
        sum_1 = np.sum(np.equal(pix, 1))
        arr_historic_ranges[i] = [sum_0, sum_1]

    # TODO: find _dynamic in other files too! correct this in every files!
    dm_obj = DotMap(_dynamic=None)
    dm_obj['cols'] = cols
    dm_obj['rows'] = rows
    dm_obj['pix_height'] = pix_height
    dm_obj['pix_width'] = pix_width

    dm_obj['l_t_2d_cells'] = l_t_2d_cells
    dm_obj['l_sum_pix_cluster_roll'] = l_sum_pix_cluster_roll
    dm_obj['arr_sum_pix_cluster_roll'] = arr_sum_pix_cluster_roll

    dm_obj['frame'] = frame
    dm_obj['frame_wrap'] = frame_wrap
    # dm_obj['l_bit_automaton'] = l_bit_automaton
    dm_obj['arr_pixs'] = arr_pixs
    dm_obj['arr_historic_ranges'] = arr_historic_ranges
    dm_obj['func_str'] = l_func_str_sorted[0]
    dm_obj['l_func_str_sorted'] = l_func_str_sorted
    dm_obj['func_str_hash'] = hashlib.sha512(
        dm_obj['func_str'].encode()).hexdigest()
    dm_obj['_version'] = utils_cluster.__version__
    dm_obj['d_function_data'] = d_function_data

    with gzip.open(
            os.path.join(dir_path_images, utils_cluster.dm_obj_file_name),
            'wb') as f:
        dill.dump(dm_obj, f)

    def combine_all_pix(l_pix, w_space_horizontal=10, h_space_vertical=10):
        h_space_horizontal = l_pix[0].shape[0]
        arr_space_horizontal = np.zeros(
            (h_space_horizontal, w_space_horizontal), dtype=np.uint8) + 0x80

        def combine_l_pix_horizontal(
                l_pix_part: List[np.ndarray]) -> np.ndarray:
            l = [l_pix_part[0]]
            for pix in l_pix_part[1:]:
                l.append(arr_space_horizontal)
                l.append(pix)
            return np.hstack(l)

        l_pix_horizontal = [
            combine_l_pix_horizontal(l_pix[cols * i:cols * (i + 1)])
            for i in range(0, rows)
        ]

        w_space_vertical = l_pix_horizontal[0].shape[1]
        # h_space_vertical = 10
        arr_space_vertical = np.zeros(
            (h_space_vertical, w_space_vertical), dtype=np.uint8) + 0x80

        def combine_l_pix_vertical(l_pix_part: List[np.ndarray]) -> np.ndarray:
            l = [l_pix_part[0]]
            for pix in l_pix_part[1:]:
                l.append(arr_space_vertical)
                l.append(pix)
            return np.vstack(l)

        pix_vertical = combine_l_pix_vertical(l_pix_horizontal)

        return pix_vertical

    # TODO: make this into separate functions too!
    pix_combine = combine_all_pix(l_pix=l_pix)
    file_path_combine = os.path.join(dir_path_combined_images,
                                     '{}.png'.format(folder_name))
    Image.fromarray(pix_combine).save(file_path_combine)

    l_pix_xor = [np.zeros(l_pix[0].shape, dtype=np.uint8) + 0x40] + [
        pix1 ^ pix2 for pix1, pix2 in zip(l_pix[:-1], l_pix[1:])
    ]
    pix_combine_xor = combine_all_pix(l_pix=l_pix_xor)
    file_path_combine_xor = os.path.join(dir_path_combined_images_xor,
                                         '{}.png'.format(folder_name))
    Image.fromarray(pix_combine_xor).save(file_path_combine_xor)

    return dm_obj
def test_createbias():
    bias = []
    dd = Data([np.random.random((100, 100))], [1], [None])
    CCDBias._createbias(dd, bias)
    assert np.all(np.equal(bias, [dd.data()]))
Exemple #38
0
def training(file_name):
    # Random seed for reproducible result.
    np.random.seed(0)
    tf.set_random_seed(54)

    # Create folders.
    if not os.path.isdir(SAVE_DIR):
        os.makedirs(SAVE_DIR)
    if not os.path.isdir(CSV_DIR):
        os.makedirs(CSV_DIR)
    if not os.path.isdir(FIGURE_DIR):
        os.makedirs(FIGURE_DIR)
    if not os.path.isdir(FIGURE_DIR + "Average Loss/"):
        os.makedirs(FIGURE_DIR + "Average Loss/")
    if not os.path.isdir(FIGURE_DIR + "Reconstruction/"):
        os.makedirs(FIGURE_DIR + "Reconstruction/")
    if not os.path.isdir(FIGURE_DIR + "Latent Representation/"):
        os.makedirs(FIGURE_DIR + "Latent Representation/")

    # Load data.
    mnist = tf.keras.datasets.mnist
    (training_images, training_labels), (test_images,
                                         test_labels) = mnist.load_data()

    # Normalize and reshape training images.
    training_images = training_images / 255.0
    training_images = np.reshape(training_images,
                                 [-1, INPUT_LENGTH * INPUT_WIDTH])
    training_length = np.shape(training_images)[0]
    num_training_batch = training_length // BATCH_SIZE

    # Normalize and reshape test images.
    test_images = test_images / 255.0
    test_images = np.reshape(test_images, [-1, INPUT_LENGTH * INPUT_WIDTH])
    test_length = np.shape(test_images)[0]
    num_test_batch = test_length // BATCH_SIZE

    # Load the model.
    model = SteinVAE_Model()
    model.steinvae()

    with tf.Session() as sess:
        # Initialize variables.
        sess.run(tf.global_variables_initializer())

        list_training_recons_loss = []
        list_training_kl_divergence = []
        list_test_recons_loss = []
        list_test_kl_divergence = []

        for epoch in range(EPOCH):
            # Shuffle the training data.
            random_index = np.array(range(training_length))
            np.random.shuffle(random_index)
            random_training_images = training_images[random_index]

            training_recons_loss = 0
            training_kl_divergence = 0
            test_recons_loss = 0
            test_kl_divergence = 0

            # Training.
            for i in range(num_training_batch):
                images = random_training_images[i * BATCH_SIZE:(i + 1) *
                                                BATCH_SIZE]
                [_, reconstruction_loss,
                 kl_divergence] = sess.run([
                     model.train_op, model.reconstruction_loss,
                     model.kl_divergence
                 ],
                                           feed_dict={model.Inputs: images})
                training_recons_loss += reconstruction_loss
                training_kl_divergence += kl_divergence

            # Validation.
            for i in range(num_test_batch):
                images = test_images[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
                reconstruction_loss, kl_divergence = sess.run(
                    [model.reconstruction_loss, model.kl_divergence],
                    feed_dict={model.Inputs: images})
                test_recons_loss += reconstruction_loss
                test_kl_divergence += kl_divergence

            training_recons_loss /= num_training_batch
            training_kl_divergence /= num_training_batch
            test_recons_loss /= num_test_batch
            test_kl_divergence /= num_test_batch
            list_training_recons_loss.append(training_recons_loss)
            list_training_kl_divergence.append(training_kl_divergence)
            list_test_recons_loss.append(test_recons_loss)
            list_test_kl_divergence.append(test_kl_divergence)

            print("Epoch ",
                  format(epoch, "03d"),
                  ": Training Loss = [",
                  format(training_recons_loss, ".8f"),
                  ", ",
                  format(training_kl_divergence, ".8f"),
                  "], Test Loss = [",
                  format(test_recons_loss, ".8f"),
                  ", ",
                  format(test_kl_divergence, ".8f"),
                  "]",
                  sep="")

        # Save the parameters.
        saver = tf.train.Saver()
        saver.save(sess, SAVE_DIR + file_name)

        # Store data in the csv file.
        with open(CSV_DIR + file_name + ".csv", "w") as f:
            fieldnames = [
                "Epoch", "Training Reconstruction Loss",
                "Training KL Divergence", "Test Reconstruction Loss",
                "Test KL Divergence"
            ]
            writer = csv.DictWriter(f,
                                    fieldnames=fieldnames,
                                    lineterminator="\n")
            writer.writeheader()
            for epoch in range(EPOCH):
                content = {
                    "Epoch":
                    epoch,
                    "Training Reconstruction Loss":
                    list_training_recons_loss[epoch],
                    "Training KL Divergence":
                    list_training_kl_divergence[epoch],
                    "Test Reconstruction Loss":
                    list_test_recons_loss[epoch],
                    "Test KL Divergence":
                    list_test_kl_divergence[epoch]
                }
                writer.writerow(content)

        # Plot the average loss.
        list_epoch = list(range(EPOCH))

        f, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
        ax[0].plot(list_epoch,
                   list_training_recons_loss,
                   "r-",
                   label="Training")
        ax[0].plot(list_epoch, list_test_recons_loss, "b-", label="Test")
        ax[0].set_title("Reconstruction Loss")
        ax[0].set_xlabel("Epoch")
        ax[0].set_ylabel("Loss")
        ax[0].legend(loc="upper right")
        ax[0].grid()
        ax[1].plot(list_epoch,
                   list_training_kl_divergence,
                   "r-",
                   label="Training")
        ax[1].plot(list_epoch, list_test_kl_divergence, "b-", label="Test")
        ax[1].set_title("KL Divergence")
        ax[1].set_xlabel("Epoch")
        ax[1].set_ylabel("Loss")
        ax[1].legend(loc="lower right")
        ax[1].grid()
        f.tight_layout()

        f.savefig(FIGURE_DIR + "Average Loss/" + file_name + ".png")
        plt.close(f)

        # Test.
        [z, outputs] = sess.run([model.z, model.outputs],
                                feed_dict={model.Inputs: test_images})

        # Plot reconstruction images.
        list_index = [
            np.where(np.equal(test_labels, i))[0] for i in range(N_CLASS)
        ]
        random_index = [
            np.random.choice(list_index[i], 2, replace=False)
            for i in range(N_CLASS)
        ]
        random_index = np.transpose(random_index, (1, 0))
        random_index = np.reshape(random_index, (-1, ))
        imageio.mimsave(FIGURE_DIR + "Reconstruction/" + file_name + ".gif", [
            plot_reconstruction_image(test_images[index], outputs[index])
            for index in random_index
        ],
                        fps=1)

        # Plot the latent representation.
        f, ax = plt.subplots(nrows=1, ncols=1, figsize=(6, 5))
        ax.set_title("Latent Representation")
        cmap = plt.get_cmap("jet", N_CLASS)
        scatter = ax.scatter(z[:, 0], z[:, 1], c=test_labels, cmap=cmap)
        cbar = plt.colorbar(scatter)
        loc = (np.arange(0, N_CLASS) + 0.5) * (N_CLASS - 1) / N_CLASS
        cbar.set_ticks(loc)
        cbar.ax.set_yticklabels(np.arange(0, N_CLASS))
        cbar.set_label("Class Label")
        ax.grid()

        f.savefig(FIGURE_DIR + "Latent Representation/" + file_name + ".png")
        plt.close(f)
    tf.contrib.keras.backend.clear_session()
Exemple #39
0
def pos_f(index, val):
    while True:
        x = random.randrange(data['trimap'][index].shape[0])
        y = random.randrange(data['trimap'][index].shape[1])
        if (np.equal(data['trimap'][index][x][y], np.array(val)).all()):
            return x, y
Exemple #40
0
    num_chips = len(c_img)
    print("\tNum Chips: ", num_chips)

    # For each image chip (i in range(num_chips))
    for i in range(num_chips):

        print("\t\tChip #: ", i)
        print("\t\t\tNum Targets in Chip: ", len(c_cls[i]))
        #print c_cls[i]
        #print c_box[i]

        # Git rid of very small boxes that are artifacts of chipping
        final_boxes = []
        final_classes = []

        if (c_cls[i][0] == 0) and (np.equal(c_box[i], np.array([0, 0, 0, 0
                                                                ])).all()):
            print("\t\t\tEmpty chip! Box: ", c_box[i])
        else:
            for j, box in enumerate(c_box[i]):
                xMin, yMin, xMax, yMax = box
                box_area = (xMax - xMin) * (yMax - yMin)
                if box_area > BOX_AREA_THRESH:
                    # Excludes odd error cases
                    if c_cls[i][j] in class_names_LUT.keys():
                        final_boxes.append(box.tolist())
                        final_classes.append(c_cls[i][j])
                else:
                    print("\t\t\tThrowing away small box... Area: ", box_area)
            #print(final_boxes)
            #print(final_classes)
            assert (len(final_boxes) == len(final_classes))
Exemple #41
0
 def __cmp__(self, other):
     if self.rank != other.rank:
         return 1
     else:
         return not Numeric.logical_and.reduce(
             Numeric.equal(self.array, other.array).ravel())
Exemple #42
0
def accuracy(truth, prediction):
    comparison = np.equal(truth, prediction)
    acc = sum(comparison) / truth.shape[0]
    return acc
def _no_gpu(config, train, validation):
    tf.reset_default_graph()
    with tf.Session() as sess:
        learning_rate = config.LR
        print('build model...')
        opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
        x = tf.placeholder(tf.float32, [None, 9, 40, 1])
        y = tf.placeholder(tf.float32, [None, config.N_SPEAKER])
        model = CTDnn(config, x, y)
        pred = model.prediction
        loss = model.loss
        feature = model.feature
        train_op = opt.minimize(loss)
        vectors = dict()
        print("done...")
        print('run train op...')
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        for epoch in range(config.MAX_STEP):
            start_time = time.time()
            avg_loss = 0.0
            total_batch = int(train.num_examples / config.BATCH_SIZE) - 1
            print('\n---------------------')
            print('Epoch:%d, lr:%.4f, total_batch=%d' %
                  (epoch, config.LR, total_batch))
            feature_ = None
            ys = None
            for batch_id in range(total_batch):
                batch_x, batch_y = train.next_batch
                batch_x = batch_x.reshape(-1, 9, 40, 1)
                _, _loss, batch_feature = sess.run([train_op, loss, feature],
                                                   feed_dict={
                                                       x: batch_x,
                                                       y: batch_y
                                                   })

                avg_loss += _loss
                if feature_ is None:
                    feature_ = batch_feature
                else:
                    feature_ = np.concatenate((feature_, batch_feature), 0)
                if ys is None:
                    ys = batch_y
                else:
                    ys = np.concatenate((ys, batch_y), 0)
                print("batch_%d  batch_loss=%.4f" % (batch_id, _loss),
                      end='\r')
            print('\n')
            train.reset_batch_counter()
            for spkr in range(config.N_SPEAKER):
                if len(feature_[np.argmax(ys, 1) == spkr]):
                    vector = np.mean(feature_[np.argmax(ys, 1) == spkr],
                                     axis=0)
                    if spkr in vectors.keys():
                        vector = (vectors[spkr] + vector) / 2
                    else:
                        vector = vector
                    vectors[spkr] = vector
                else:
                    if spkr not in vectors.keys():
                        vectors[spkr] = np.zeros(400, dtype=np.float32)
            avg_loss /= total_batch
            print('Train loss:%.4f' % (avg_loss))
            total_batch = int(validation.num_examples / config.BATCH_SIZE) - 1
            preds = None
            feature_ = None
            ys = None
            for batch_idx in range(total_batch):
                print("validation in batch_%d..." % batch_idx, end='\r')
                batch_x, batch_y = validation.next_batch
                batch_y, batch_pred, batch_feature = sess.run(
                    [y, pred, feature], feed_dict={
                        x: batch_x,
                        y: batch_y
                    })
                if preds is None:
                    preds = batch_pred
                else:
                    preds = np.concatenate((preds, batch_pred), 0)
                if feature_ is None:
                    feature_ = batch_feature
                else:
                    feature_ = np.concatenate((feature_, batch_feature), 0)
                if ys is None:
                    ys = batch_y
                else:
                    ys = np.concatenate((ys, batch_y), 0)
            validation.reset_batch_counter()
            vec_preds = []
            for sample in range(feature_.shape[0]):
                score = -100
                pred = -1
                for spkr in vectors.keys():
                    if cosine(vectors[spkr], feature_[sample]) > score:
                        score = cosine(vectors[spkr], feature_[sample])
                        pred = int(spkr)
                vec_preds.append(pred)
            correct_pred = np.equal(np.argmax(ys, 1), vec_preds)
            val_accuracy = np.mean(np.array(correct_pred, dtype='float'))
            print('Val Accuracy: %0.4f%%' % (100.0 * val_accuracy))
            stop_time = time.time()
            elapsed_time = stop_time - start_time
            saver.save(sess=sess,
                       save_path=os.path.join(model._save_path,
                                              model._name + ".ckpt"))
            print('Cost time: ' + str(elapsed_time) + ' sec.')
        print('training done.')
Exemple #44
0
 def __init__(self, X, Y):
     self.data = X
     self.target = Y
     self.length = [np.sum(1- np.equal(x,0)) for x in X]
def calculate_accuracy(actual, predicted):
    actual = np.argmax(actual, 1)
    predicted = np.argmax(predicted, 1)
    return (100 * np.sum(np.equal(predicted, actual)) / predicted.shape[0])
def _multi_gpu(config, train, validation, debug_mode=False):
    tf.reset_default_graph()
    with tf.Session() as sess:
        with tf.device('/cpu:0'):
            learning_rate = config.LR
            # opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
            opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
            print('build model...')
            print('build model on gpu tower...')
            models = []
            for gpu_id in range(config.N_GPU):
                with tf.device('/gpu:%d' % gpu_id):
                    print('tower:%d...' % gpu_id)
                    with tf.name_scope('tower_%d' % gpu_id):
                        with tf.variable_scope('cpu_variables',
                                               reuse=tf.AUTO_REUSE):
                            x = tf.placeholder(tf.float32, [None, 9, 40, 1])
                            y = tf.placeholder(tf.float32,
                                               [None, train.spkr_num])
                            model = CTDnn(config, x, y)
                            pred = model.prediction
                            feature = model.feature
                            loss = model.loss
                            grads = opt.compute_gradients(loss)
                            models.append((x, y, pred, loss, grads, feature))
            print('build model on gpu tower done.')

            print('reduce model on cpu...')
            tower_x, tower_y, tower_preds, tower_losses, tower_grads, tower_feature = zip(
                *models)
            aver_loss_op = tf.reduce_mean(tower_losses)
            apply_gradient_op = opt.apply_gradients(
                average_gradients(tower_grads))
            get_feature = tf.reshape(tf.stack(tower_feature, 0), [-1, 400])

            all_y = tf.reshape(tf.stack(tower_y, 0), [-1, config.N_SPEAKER])

            all_pred = tf.reshape(tf.stack(tower_preds, 0),
                                  [-1, config.N_SPEAKER])

            vectors = dict()

            print('reduce model on cpu done.')

            print('run train op...')
            sess.run(tf.global_variables_initializer())
            if debug_mode:
                sess = debug.LocalCLIDebugWrapperSession(sess=sess)
            saver = tf.train.Saver()

            for epoch in range(config.MAX_STEP):
                start_time = time.time()
                payload_per_gpu = int(config.BATCH_SIZE // config.N_GPU)
                if config.BATCH_SIZE % config.N_GPU:
                    print("Warning: Batch size can't to be divisible of N_GPU")
                total_batch = int(train.num_examples / config.BATCH_SIZE) - 1
                avg_loss = 0.0
                print('\n---------------------')
                print('Epoch:%d, lr:%.4f, total_batch:%d' %
                      (epoch, config.LR, total_batch))
                feature_ = None
                ys = None

                # temp log
                for batch_idx in range(total_batch):
                    batch_x, batch_y = train.next_batch
                    batch_x = batch_x.reshape(-1, 9, 40, 1)
                    # Below line is required for big data set
                    # batch_y = np.array((np.eye(config.N_SPEAKER)[np.array(batch_y).reshape(-1)]), dtype=np.float32)
                    inp_dict = dict()
                    # print("data part done...")
                    inp_dict = feed_all_gpu(inp_dict, models, payload_per_gpu,
                                            batch_x, batch_y)
                    _, _loss, batch_feature = sess.run(
                        [apply_gradient_op, aver_loss_op, get_feature],
                        inp_dict)
                    # print("train part done...")
                    avg_loss += _loss
                    if ys is None:
                        ys = batch_y
                    else:
                        ys = np.concatenate((ys, batch_y), 0)
                    if feature_ is None:
                        feature_ = batch_feature
                    else:
                        feature_ = np.concatenate((feature_, batch_feature), 0)
                    print("batch_%d, batch_loss=%.4f, payload_per_gpu=%d" %
                          (batch_idx, _loss, payload_per_gpu),
                          end='\r')
                print("\n")
                train.reset_batch_counter()
                for spkr in range(config.N_SPEAKER):
                    if len(feature_[np.argmax(ys, 1) == spkr]):
                        vector = np.mean(feature_[np.argmax(ys, 1) == spkr],
                                         axis=0)
                        if spkr in vectors.keys():
                            vector = (vectors[spkr] + vector) / 2
                        else:
                            vector = vector
                        vectors[spkr] = vector
                    else:
                        if spkr not in vectors.keys():
                            vectors[spkr] = np.zeros(400, dtype=np.float32)
                # print("vector part done....")
                avg_loss /= total_batch
                print('Train loss:%.4f' % (avg_loss))

                val_payload_per_gpu = int(config.BATCH_SIZE // config.N_GPU)
                if config.BATCH_SIZE % config.N_GPU:
                    print("Warning: Batch size can't to be divisible of N_GPU")

                total_batch = int(
                    validation.num_examples / config.BATCH_SIZE) - 1
                preds = None
                ys = None
                feature_ = None
                for batch_idx in range(total_batch):
                    batch_x, batch_y = validation.next_batch
                    batch_x = batch_x.reshape(-1, 9, 40, 1)
                    # Below line is required for big data set
                    # batch_y = np.array((np.eye(config.N_SPEAKER)[np.array(batch_y).reshape(-1)]), dtype=np.float32)
                    inp_dict = feed_all_gpu({}, models, val_payload_per_gpu,
                                            batch_x, batch_y)

                    batch_pred, batch_y_, batch_feature = sess.run(
                        [all_pred, all_y, get_feature], inp_dict)
                    if preds is None:
                        preds = batch_pred
                    else:
                        preds = np.concatenate((preds, batch_pred), 0)
                    if feature_ is None:
                        feature_ = batch_feature
                    else:
                        feature_ = np.concatenate((feature_, batch_feature), 0)
                    if ys is None:
                        ys = batch_y_
                    else:
                        ys = np.concatenate((ys, batch_y_), 0)

                validation.reset_batch_counter()
                vec_preds = []
                for sample in range(feature_.shape[0]):
                    score = -100
                    pred = -1
                    for spkr in vectors.keys():
                        if cosine(vectors[spkr], feature_[sample]) > score:
                            score = cosine(vectors[spkr], feature_[sample])
                            pred = int(spkr)
                    vec_preds.append(pred)
                correct_pred = np.equal(np.argmax(ys, 1), vec_preds)
                val_accuracy = np.mean(np.array(correct_pred, dtype='float'))
                print('Val Accuracy: %0.4f%%' % (100.0 * val_accuracy))
                saver.save(sess=sess,
                           save_path=os.path.join(model._save_path,
                                                  model._name + ".ckpt"))
                stop_time = time.time()
                elapsed_time = stop_time - start_time
                print('Cost time: ' + str(elapsed_time) + ' sec.')
            print('training done.')
Exemple #47
0
def compute_accuracy(predictions, y):
    """Computes the accuracy of predictions against the gold labels, y."""
    predictions, y = predictions.to('cpu'), y.to('cpu')
    return np.mean(np.equal(predictions.numpy(), y.numpy()))
Exemple #48
0
    def __init__(self,
                 loss_factory,
                 X,
                 penalty_structure=None,
                 group_weights={},
                 elastic_net=iq(0, 0, 0, 0),
                 alpha=0.,
                 intercept=True,
                 positive_part=None,
                 unpenalized=None,
                 lagrange_proportion=0.05,
                 nstep=100,
                 scale=True,
                 center=True):

        self.loss_factory = loss_factory

        self.scale = scale
        self.center = center

        # for group lasso weights, if implied by penalty_structure
        self.group_weights = group_weights

        # normalize X, adding intercept if needed
        self.intercept = intercept
        p = X.shape[1]
        if self.intercept:
            self.penalty_structure = np.ones(p + 1) * L1_PENALTY
            self.penalty_structure[0] = UNPENALIZED
            if penalty_structure is not None:
                self.penalty_structure[1:] = penalty_structure

            if scipy.sparse.issparse(X):
                self._X1 = scipy.sparse.hstack([np.ones((X.shape[0], 1)),
                                                X]).tocsc()
            else:
                self._X1 = np.hstack([np.ones((X.shape[0], 1)), X])
            if self.scale or self.center:
                self._Xn = normalize(self._X1,
                                     center=self.center,
                                     scale=self.scale,
                                     intercept_column=0)
                which_0 = self._Xn.col_stds == 0
            else:
                self._Xn = self._X1
                which_0 = np.zeros(self._Xn.shape)

        else:
            self.penalty_structure = np.ones(p) * L1_PENALTY
            if penalty_structure is not None:
                self.penalty_structure[:] = penalty_structure

            if self.scale or self.center:
                self._Xn = normalize(X, center=self.center, scale=self.scale)
                which_0 = self._Xn.col_stds == 0
            else:
                self._Xn = X
                which_0 = np.zeros(self._Xn.shape)

        if np.any(which_0):
            self._selector = selector(~which_0, self._Xn.input_shape)
            if self.scale or self.center:
                self._Xn = self._Xn.slice_columns(~which_0)
            else:
                self._Xn = self._Xn[:, ~which_0]
        else:
            if self.scale or self.center:
                self._selector = identity(self._Xn.input_shape)
            else:
                self._selector = identity(self._Xn.shape)

        # the penalty parameters
        self.alpha = alpha
        self.lagrange_proportion = lagrange_proportion
        self.nstep = nstep
        self._elastic_net = elastic_net.collapsed()

        self.initial_active = (np.equal(self.penalty_structure, UNPENALIZED) +
                               np.equal(self.penalty_structure, NONNEGATIVE))
        self.ever_active = self.initial_active.copy()
Exemple #49
0
def advance_local_run(H, user_specs, c_flag, run, persis_info):
    """
    Moves a local optimization method one iteration forward. We currently do
    this by feeding all past evaluations from a run to the method and then
    storing the first new point generated
    """

    while 1:
        sorted_run_inds = persis_info['run_order'][run]
        advance_local_run.x_new = np.ones((1, len(user_specs['ub']))) * np.inf
        advance_local_run.pt_in_run = 0

        if user_specs['localopt_method'] in [
                'LN_SBPLX', 'LN_BOBYQA', 'LN_COBYLA', 'LN_NELDERMEAD', 'LD_MMA'
        ]:

            if user_specs['localopt_method'] in ['LD_MMA']:
                fields_to_pass = ['x_on_cube', 'f', 'grad']
            else:
                fields_to_pass = ['x_on_cube', 'f']

            try:
                x_opt, exit_code = set_up_and_run_nlopt(
                    H[fields_to_pass][sorted_run_inds], user_specs)
            except Exception as e:
                x_opt = 0
                exit_code = 0
                display_exception(e)

        elif user_specs['localopt_method'] in ['pounders', 'blmvm']:

            if c_flag:
                Run_H_F = np.zeros(len(sorted_run_inds),
                                   dtype=[('fvec', float,
                                           user_specs['components'])])
                for i, ind in enumerate(sorted_run_inds):
                    a1 = H['pt_id'] == H['pt_id'][ind]
                    Run_H_F['fvec'][i, :] = H['f_i'][a1]
                Run_H = merge_arrays(
                    [H[['x_on_cube']][sorted_run_inds], Run_H_F], flatten=True)
            else:
                if user_specs['localopt_method'] == 'pounders':
                    Run_H = H[['x_on_cube', 'fvec']][sorted_run_inds]
                else:
                    Run_H = H[['x_on_cube', 'f', 'grad']][sorted_run_inds]

            try:
                x_opt, exit_code = set_up_and_run_tao(Run_H, user_specs)
            except Exception as e:
                x_opt = 0
                exit_code = 0
                display_exception(e)

        elif user_specs['localopt_method'] == 'scipy_COBYLA':

            fields_to_pass = ['x_on_cube', 'f']

            try:
                x_opt, exit_code = set_up_and_run_scipy_minimize(
                    H[fields_to_pass][sorted_run_inds], user_specs)
            except Exception as e:
                x_opt = 0
                exit_code = 0
                display_exception(e)

        else:
            raise APOSMMException("Unknown localopt method. Exiting")

        match_ind = np.equal(advance_local_run.x_new, H['x_on_cube']).all(1)
        if ~match_ind.any():
            # Generated a new point
            break
        else:
            # We need to add a previously evaluated point into this run
            persis_info['run_order'][run].append(np.nonzero(match_ind)[0][0])

    return x_opt, exit_code, persis_info, sorted_run_inds, advance_local_run.x_new
    def get_plot_formatted_arrays(self,
                                  coordinates=CARTESIAN,
                                  dimensionless_rabi_rate=True):
        """ Gets arrays for plotting a driven control.

        Parameters
        ----------
        dimensionless_rabi_rate: boolean
            If True, calculates the dimensionless values for segments
        coordinates : string
            Indicated the type of segments that need to be transformed can be 'cartesian' or
            'cylindrical'.

        Returns
        -------
        dict
            A dict with keywords depending on the chosen coordinates. For 'cylindrical', we have
            'rabi_rate', 'azimuthal_angle', 'detuning' and 'times', and for 'cartesian' we have
            'amplitude_x', 'amplitude_y', 'detuning' and 'times'.

        Notes
        -----
        The plot data can have repeated times and for amplitudes, because it is expected
        that these coordinates are to be used with plotting software that 'joins the dots' with
        linear lines between each coordinate. The time array gives the x values for all the
        amplitude arrays, which give the y values.

        Raises
        ------
        ArgumentsValueError
            Raised when an argument is invalid.
        """
        if coordinates not in [CARTESIAN, CYLINDRICAL]:
            raise ArgumentsValueError('Unsupported coordinates provided: ',
                                      arguments={'coordinates': coordinates})

        if dimensionless_rabi_rate:
            normalizer = self.maximum_rabi_rate
        else:
            normalizer = 1

        if coordinates == CARTESIAN:
            control_segments = np.vstack(
                (self.amplitude_x / normalizer, self.amplitude_y / normalizer,
                 self.detunings, self.durations)).T
        elif coordinates == CYLINDRICAL:
            control_segments = np.vstack(
                (self.rabi_rates / normalizer, self.azimuthal_angles,
                 self.detunings, self.durations)).T

        segment_times = np.insert(np.cumsum(control_segments[:, 3]), 0, 0.)
        plot_time = (segment_times[:, np.newaxis] * np.ones((1, 2))).flatten()
        plot_amplitude_x = control_segments[:, 0]
        plot_amplitude_y = control_segments[:, 1]
        plot_amplitude_z = control_segments[:, 2]

        plot_amplitude_x = np.concatenate(
            ([0.], (plot_amplitude_x[:, np.newaxis] * np.ones(
                (1, 2))).flatten(), [0.]))
        plot_amplitude_y = np.concatenate(
            ([0.], (plot_amplitude_y[:, np.newaxis] * np.ones(
                (1, 2))).flatten(), [0.]))
        plot_amplitude_z = np.concatenate(
            ([0.], (plot_amplitude_z[:, np.newaxis] * np.ones(
                (1, 2))).flatten(), [0.]))

        plot_dictionary = {}
        if coordinates == CARTESIAN:
            plot_dictionary = {
                'amplitudes_x': plot_amplitude_x,
                'amplitudes_y': plot_amplitude_y,
                'detunings': plot_amplitude_z,
                'times': plot_time
            }

        if coordinates == CYLINDRICAL:

            x_plot = plot_amplitude_x
            y_plot = plot_amplitude_y
            x_plot[np.equal(x_plot, -0.0)] = 0.
            y_plot[np.equal(y_plot, -0.0)] = 0.
            azimuthal_angles_plot = np.arctan2(y_plot, x_plot)
            amplitudes_plot = np.sqrt(np.abs(x_plot**2 + y_plot**2))

            plot_dictionary = {
                'rabi_rates': amplitudes_plot,
                'azimuthal_angles': azimuthal_angles_plot,
                'detunings': plot_amplitude_z,
                'times': plot_time
            }
        return plot_dictionary
Exemple #51
0
 def is_proper(self):
     """bool : True if this group contains only proper rotations."""
     return np.all(np.equal(self.improper, 0))
Exemple #52
0
 def compute_accuracy(self, x, seq_len, chars, char_seq_len, y):
     return np.mean(
         np.equal(self.predict(x, seq_len, chars, char_seq_len),
                  y).astype(np.float32))
Exemple #53
0
    def identify_tracks(self):

        # Loop to create centroid objects
        for i in self.list_of_frame_matrix_objects:
            i.find_centroids()

        # Begin loop to create track slices through all frames
        for j in self.list_of_frame_matrix_objects:

            for i in j.cluster_centroids:
                # Short hand version of the cluster number
                cluster_number_short = i.cluster_number
                # Long hand (three digits, preceding zeroes for id
                cluster_number = i.cluster_number

                # File Code + Cluster Number is the Cell ID
                cell_id = str(j.file_code) + str(cluster_number)
                j.list_of_track_slices.insert(
                    0, TrackSlice(cell_id, cluster_number))

                # Create a matrix for the individual cell
                matrix_blob = np.copy(j.matrix_radar_blobs)
                matrix_blob[j.matrix_radar_blobs != cluster_number_short] = 0
                j.list_of_track_slices[0].polygon_points = matrix_blob

                # Calculate the volume according to intensity
                volume_matrix = np.multiply(matrix_blob,
                                            np.flipud(j.matrix_radar_finished))
                j.list_of_track_slices[0].volume_matrix = volume_matrix
                volume = np.sum(volume_matrix)
                j.list_of_track_slices[0].volume_matrix = volume

        # Create dictionary for the track objects in form:

        # self.list_of_frame_matrix_objects[j].list_of_track_slices[i]
        # j = YYYYMMDDHHmm:list index
        # i = cluster_number:list index

        track_slice_dictionary = {}
        for j in range(0, len(self.list_of_frame_matrix_objects)):
            track_slice_dictionary[str(
                self.list_of_frame_matrix_objects[j].file_code)] = {}
            for i in range(
                    0,
                    len(self.list_of_frame_matrix_objects[j].
                        list_of_track_slices)):
                track_slice_dictionary[str(
                    self.list_of_frame_matrix_objects[j].file_code)][
                        self.list_of_frame_matrix_objects[j].
                        list_of_track_slices[i].cluster_number] = i

        self.track_slice_dictionary = track_slice_dictionary
        # For printing out the dictionary
        # for j in track_slice_dictionary:
        #     for i in track_slice_dictionary[j]:
        #         print j, i, track_slice_dictionary[j][i]

        # Begin Loop to analyse
        # Need to calculate the average distance
        # Calculate the distance between each centroid from one frame to the next, keep the shortest one
        # TODO Need to cycle through all the images on the list, instead of just these two

        m = 2

        for m in range(0, len(self.list_of_frame_matrix_objects) - 1):

            n = m + 1
            centroid_list_n = self.list_of_frame_matrix_objects[
                m].cluster_centroids
            centroid_list_n_1 = self.list_of_frame_matrix_objects[
                n].cluster_centroids

            centroid_couples = []

            for i in centroid_list_n:
                distance_array = []
                for j in centroid_list_n_1:
                    distance_array.append(
                        [calc_dist(i.x, i.y, j.x, j.y), 0, [i, j]])

                centroid_couples.append(min(distance_array))

            centroid_couples = np.asarray(centroid_couples)

            std = centroid_couples[:, 0].std()

            # TODO Code in how to handle standard deviation for different radar ranges, 64km, 128km, 256km
            while std > 10:  # This will change depending on the range of the radar
                # Remove the centroid couple that has the largest distance.
                centroid_couples = centroid_couples[~(
                    centroid_couples == max(centroid_couples[:, 0])).any(1)]
                std = centroid_couples[:, 0].std()

            # This is the average distance.
            average_distance = np.average(centroid_couples[:, 0].tolist())

            # Now need to find the average direction
            # Direction will be a number between 0 and 360

            # For each centroid pair, calculate the direction
            for i in range(0, len(centroid_couples)):
                x1 = centroid_couples[i][2][0].x
                y1 = centroid_couples[i][2][0].y
                x2 = centroid_couples[i][2][1].x
                y2 = centroid_couples[i][2][1].y
                centroid_couples[i][1] = calc_dir(x1, y1, x2, y2)

            dir_list = []
            # Put all directions in a list, for some reason .average() isn't working for the array
            for i in centroid_couples[:, 1]:
                dir_list.append(i)

            # Find the std dev and the average
            std = np.asarray(dir_list).std()
            ave = np.average(dir_list)

            while std > 30:  # This will change depending on the range of the radar

                ave = np.average(dir_list)
                # Biggest outlier is the one that is furtherest from the average
                biggest_outlier = centroid_couples[(
                    np.fabs(np.asarray(dir_list) - ave) == max(
                        np.fabs(np.asarray(dir_list) - ave)))][0][1]
                # Remove the centroid couple that contains the biggest outlier
                centroid_couples = centroid_couples[~(
                    centroid_couples == biggest_outlier).any(1)]
                # Remove it from the directions list
                dir_list.remove(biggest_outlier)
                # Calculate the new std dev.
                std = np.asarray(dir_list).std()

            # This is the average direction
            average_direction = ave

            print
            print "Centroid Couples Distance and Direction"
            for i in centroid_couples:
                print i[0], i[1], i[2]
            print
            print "Average Distance:", average_distance
            print "Average Direction:", average_direction

            x1 = []
            y1 = []
            # x2 = []
            # y2 = []
            labels = []

            for i in self.list_of_frame_matrix_objects[m].cluster_centroids:
                x1.append(i.x)
                y1.append(512 - i.y)
                # x2.append(i[2][1].x)
                # y2.append(i[2][1].y)
                # labels.append(i[1])

            xshift = int(
                round(
                    average_distance * sin(
                        (90 + 2 * (90 - average_direction)) * pi / 180), 0))
            yshift = int(
                round(
                    average_distance * sin(
                        (90 + 2 * (90 - average_direction)) * pi / 180), 0))

            # Begin Loop through each cluster
            for j in np.unique(
                    self.list_of_frame_matrix_objects[m].matrix_radar_blobs):
                if j == 0:
                    continue
                shifted = ndimage.shift(
                    return_single_cluster(
                        self.list_of_frame_matrix_objects[m].
                        matrix_radar_blobs, j), (yshift, xshift))
                n_1 = self.list_of_frame_matrix_objects[n].matrix_radar_blobs

                shifted = return_binary(shifted)
                n_1_binary = return_binary(n_1)

                overlap = np.copy(n_1)

                overlap[np.equal(n_1_binary, shifted) == False] = 0

                # Now to test the overlap percentage
                # A is the overlap area
                list_of_overlap_blobs = np.unique(overlap)
                for i in list_of_overlap_blobs:
                    if i == 0:
                        continue

                    a = float((overlap == i).sum())
                    at = float((n_1 == i).sum())
                    bt = shifted.sum()

                    n_file_code = self.list_of_frame_matrix_objects[
                        m].file_code
                    n_1_file_code = self.list_of_frame_matrix_objects[
                        n].file_code

                    # Check if a match exists
                    if 100 * a / at + 100 * a / bt > 60:
                        d = track_slice_dictionary
                        self.list_of_frame_matrix_objects[
                            m].list_of_track_slices[
                                d[n_file_code][j]].end_ids.append(
                                    str(n_1_file_code) + str(i))
                        self.list_of_frame_matrix_objects[
                            n].list_of_track_slices[
                                d[n_1_file_code][i]].start_ids.append(
                                    str(n_file_code) + str(j))

                # Finally, will be good to have th locations of the centroids in the track slices (not objects though)
                for j in self.list_of_frame_matrix_objects:
                    for i in j.list_of_track_slices:
                        i.centroid = ndimage.measurements.center_of_mass(
                            i.polygon_points)
Exemple #54
0
def aposmm_logic(H, persis_info, gen_specs, _):
    """
    APOSMM coordinates multiple local optimization runs, starting from points
    which do not have a better point nearby (within a distance ``r_k``). This
    generation function produces/requires the following fields in ``H``:

    - ``'x' [n floats]``: Parameters being optimized over
    - ``'x_on_cube' [n floats]``: Parameters scaled to the unit cube
    - ``'f' [float]``: Objective function being minimized
    - ``'local_pt' [bool]``: True if point from a local optimization run
    - ``'dist_to_unit_bounds' [float]``: Distance to domain boundary
    - ``'dist_to_better_l' [float]``: Dist to closest better local opt point
    - ``'dist_to_better_s' [float]``: Dist to closest better sample point
    - ``'ind_of_better_l' [int]``: Index of point ``'dist_to_better_l``' away
    - ``'ind_of_better_s' [int]``: Index of point ``'dist_to_better_s``' away
    - ``'started_run' [bool]``: True if point has started a local opt run
    - ``'num_active_runs' [int]``: Number of active local runs point is in
    - ``'local_min' [float]``: True if point has been ruled a local minima
    - ``'sim_id' [int]``: Row number of entry in history

    and optionally

    - ``'priority' [float]``: Value quantifying a point's desirability
    - ``'f_i' [float]``: Value of ith objective component (if single_component)
    - ``'fvec' [m floats]``: All objective components (if calculated together)
    - ``'obj_component' [int]``: Index corresponding to value in ``'f_i``'
    - ``'pt_id' [int]``: Identify the point (useful when evaluating different
      objective components for a given ``'x'``)

    When using libEnsemble to do individual objective component evaluations,
    APOSMM will return ``gen_specs['user']['components']`` copies of each point, but
    the component=0 entry of each point will be considered only when

    - deciding where to start a run,
    - determining the best nearby point,
    - storing the order of the points in the run, or
    - storing the combined objective function value

    Necessary quantities in ``gen_specs['user']`` are

    - ``'lb' [n floats]``: Lower bound on search domain
    - ``'ub' [n floats]``: Upper bound on search domain
    - ``'initial_sample_size' [int]``: Number of uniformly sampled points
      must be returned (non-nan value) before a local opt run is started

    - ``'localopt_method' [str]``: Name of an NLopt, PETSc/TAO, or SciPy method
      (see 'advance_local_run' below for supported methods)

    Optional ``gen_specs['user']`` entries are as follows

    - ``'sample_points' [numpy array]``: Points to be sampled (original domain)
    - ``'combine_component_func' [func]``: Function to combine obj components
    - ``'components' [int]``: Number of objective components
    - ``'dist_to_bound_multiple' [float in (0,1]]``: Fraction of the
      distance to the nearest boundary to be used for the initial step size be
      in localopt runs
    - ``'high_priority_to_best_localopt_runs': [bool]``: True if localopt runs
      with smallest observed function value are given priority
    - ``'lhs_divisions' [int]``: Number of Latin hypercube sampling partitions
      (0 or 1 results in uniform sampling)
    - ``'min_batch_size' [int]``: Lower bound on the number of points given
      every time APOSMM is called
    - ``'mu' [float]``: Distance from the boundary that all localopt starting
      points must satisfy
    - ``'nu' [float]``: Distance from identified minima that all starting
      points must satisfy
    - ``'single_component_at_a_time' [bool]``: True if single objective
      components will be evaluated at a time
    - ``'rk_const' [float]``: Multiplier in front of the r_k value
    - ``'max_active_runs' [int]``: Bound on number of runs APOSMM is advancing

    The following are ``gen_specs['user']`` convergence tolerances for NLopt, PETSc/TAO, SciPy

    - ``'fatol' [float]``:
    - ``'ftol_abs' [float]``:
    - ``'ftol_rel' [float]``:
    - ``'gatol' [float]``:
    - ``'grtol' [float]``:
    - ``'xtol_abs' [float]``:
    - ``'xtol_rel' [float]``:
    - ``'tol' [float]``:


    As a default, APOSMM starts a local optimization run from a point that

    - is not in an active local optimization run,
    - is more than ``mu`` from the boundary (in the unit-cube domain),
    - is more than ``nu`` from identified minima (in the unit-cube domain), and
    - does not have a better point within a distance ``r_k`` of it.

    If the above results in more than ``'max_active_runs'`` being advanced, the
    best point in each run is determined, and the dist_to_better is computed
    (with inf being the value for the best run). Then those
    ``'max_active_runs'`` runs with largest dist_to_better are advanced
    (breaking ties arbitrarily).

    :Note:
        ``gen_specs['user']['combine_component_func']`` must be defined when there are
        multiple objective components.

    :Note:
        APOSMM critically uses ``persis_info`` to store information about
        active runs, order of points in each run, etc. The allocation function
        must ensure that it is always given.

    .. seealso::
        `test_sim_dirs.py <https://github.com/Libensemble/libensemble/blob/develop/libensemble/tests/regression_tests/test_sim_dirs.py>`_
        for basic APOSMM usage.

    .. seealso::
        `test_old_aposmm_one_residual_at_a_time.py <https://github.com/Libensemble/libensemble/blob/develop/libensemble/tests/regression_tests/test_old_aposmm_one_residual_at_a_time.py>`_
        for an example of APOSMM coordinating multiple local optimization runs
        for an objective with more than one component.
    """
    """
    Description of intermediate variables in aposmm_logic:

    n:                domain dimension
    c_flag:           True if giving libEnsemble individual components of fvec
                      to evaluate. (Note if c_flag is True, APOSMM will use
                      only the component to store the function value f)
    n_s:              the number of complete evaluations of sampled points
    updated_inds:     indices of H that have been updated (and so all their
                      information must be sent back to libE manager to update)
    H_o:                new points to be sent back to the history


    When re-running a local opt method to get the next point:
    advance_local_run.x_new:      stores the first new point requested by
                                  a local optimization method
    advance_local_run.pt_in_run:  counts function evaluations to know
                                  when a new point is given

    starting_inds:    indices where a runs should be started.
    active_runs:      indices of active local optimization runs
    sorted_run_inds:  indices of the considered run (in the order they were
                      requested by the localopt method)
    x_opt:            the reported minimum from a localopt run (disregarded
                      unless exit_code isn't 0)
    exit_code:        0 if a new localopt point has been found, otherwise it's
                      the NLopt/TAO/SciPy code
    samples_needed:   Number of additional uniformly drawn samples needed


    Description of persistent variables used to maintain the state of APOSMM

    persis_info['total_runs']: Running count of started/completed localopt runs
    persis_info['run_order']: Sequence of indices of points in unfinished runs
    persis_info['old_runs']: Sequence of indices of points in finished runs

    """

    n, n_s, c_flag, H_o, r_k, mu, nu = initialize_APOSMM(H, gen_specs)

    # np.savez('H'+str(len(H)),H=H,gen_specs=gen_specs,persis_info=persis_info)
    if n_s < gen_specs['user']['initial_sample_size']:
        updated_inds = set()

    else:
        updated_inds = update_history_dist(H, n, gen_specs, c_flag)

        starting_inds = decide_where_to_start_localopt(H, r_k, mu, nu)
        updated_inds.update(starting_inds)

        for ind in starting_inds:
            # Find the run number
            new_run_num = persis_info['total_runs']

            H['started_run'][ind] = 1
            H['num_active_runs'][ind] += 1

            persis_info['run_order'][new_run_num] = [ind]
            persis_info['total_runs'] += 1

        num_runs = len(persis_info['run_order'])
        if 'max_active_runs' in gen_specs[
                'user'] and gen_specs['user']['max_active_runs'] < num_runs:
            # Store run number and sim_id of the best point in each run
            run_vals = np.zeros((num_runs, 2), dtype=int)
            for i, run in enumerate(persis_info['run_order'].keys()):
                run_vals[i, 0] = run
                run_vals[i, 1] = persis_info['run_order'][run][np.nanargmin(
                    H['f'][persis_info['run_order'][run]])]

            # Compute pairwise distance between the best points in each run
            P = squareform(pdist(H['x_on_cube'][run_vals[:, 1]], 'euclidean'))
            dist_to_better = np.inf * np.ones(num_runs)

            for i in range(num_runs):
                better = H['f'][run_vals[:, 1]] < H['f'][run_vals[i, 1]]
                if any(better):
                    dist_to_better[i] = np.min(P[i, better])

            # Take max_active_runs largest
            k_sorted = np.argpartition(
                -dist_to_better, kth=gen_specs['user']['max_active_runs'] - 1)
            active_runs = set(
                run_vals[k_sorted[:gen_specs['user']['max_active_runs']],
                         0].astype(int))
        else:
            active_runs = set(persis_info['run_order'].keys())

        inactive_runs = set()

        # Find next point in any uncompleted run using persis_info['run_order']
        for run in active_runs:
            if not np.all(H['returned'][persis_info['run_order'][run]]):
                continue  # Can't advance a run if all points aren't returned.

            x_opt, exit_code, persis_info, sorted_run_inds, x_new = advance_local_run(
                H, gen_specs['user'], c_flag, run, persis_info)

            if np.isinf(x_new).all():
                if exit_code == 0:
                    run_out_file = "run_" + str(run) + "_abort.pickle"

                    with open(run_out_file, "wb") as f:
                        pickle.dump((H, gen_specs, c_flag, run, persis_info),
                                    f)

                    raise APOSMMException(
                        "Exit code is 0, but x_new was not updated in " +
                        "local opt run " + str(run) + " after " +
                        str(len(sorted_run_inds)) + " evaluations.\n" +
                        "Saving run information to: " + run_out_file +
                        "\nWorker crashing!")

                # No new point was added. Hopefully at a minimum
                update_history_optimal(x_opt, H, sorted_run_inds)
                inactive_runs.add(run)
                updated_inds.update(sorted_run_inds)

            else:
                # Check if x_new is already being requested (a check if it's in
                # H is performed inside advance_local_run)
                match_ind = np.where(np.equal(x_new,
                                              H_o['x_on_cube']).all(1))[0]
                if len(match_ind) == 0:
                    persis_info = add_to_Out(H_o,
                                             x_new,
                                             H,
                                             gen_specs,
                                             c_flag,
                                             persis_info,
                                             local_flag=1,
                                             sorted_run_inds=sorted_run_inds,
                                             run=run)
                else:
                    assert len(
                        match_ind) == 1, "The same point is in H_o twice"
                    persis_info['run_order'][run].append(
                        H_o['sim_id'][match_ind[0]])

        for i in inactive_runs:
            old_run = persis_info['run_order'].pop(i)  # Deletes all run info
            persis_info['old_runs'][i] = old_run

    if len(H) == 0:
        samples_needed = gen_specs['user']['initial_sample_size']
    elif 'min_batch_size' in gen_specs['user']:
        samples_needed = gen_specs['user']['min_batch_size'] - len(H_o)
    else:
        samples_needed = int(
            not bool(len(H_o)))  # 1 if len(H_o)==0, 0 otherwise

    if samples_needed > 0 and 'sample_points' in gen_specs['user']:
        v = np.sum(~H['local_pt'])  # Number of sample points so far
        sampled_points = gen_specs['user']['sample_points'][v:v +
                                                            samples_needed]
        on_cube = False  # Assume points are on original domain, not unit cube
        if len(sampled_points):
            persis_info = add_to_Out(H_o,
                                     sampled_points,
                                     H,
                                     gen_specs,
                                     c_flag,
                                     persis_info,
                                     on_cube=on_cube)
        samples_needed = samples_needed - len(sampled_points)

    if samples_needed > 0:
        sampled_points = persis_info['rand_stream'].uniform(
            0, 1, (samples_needed, n))
        on_cube = True
        persis_info = add_to_Out(H_o,
                                 sampled_points,
                                 H,
                                 gen_specs,
                                 c_flag,
                                 persis_info,
                                 on_cube=on_cube)

    H_o = np.append(
        H[np.array(list(updated_inds),
                   dtype=int)][[o[0] for o in gen_specs['out']]], H_o)

    return H_o, persis_info
    # before line 15 algorithm 2
    # TODO only works when K = 4, has to be generalized
    f_t_gray_reshape = f_t_gray.reshape((rows * cols))
    f_t_gray_reshape_4cols = np.array([
        f_t_gray_reshape, f_t_gray_reshape, f_t_gray_reshape, f_t_gray_reshape
    ],
                                      dtype=int).T
    # 16: m = K_temp
    ## m = np.where(match != 0, (K - 1), m)
    # 19 march

    match_exp_dim = np.expand_dims(match, axis=2)

    f_t_4cols_exp_dim = np.expand_dims(f_t_gray, axis=2)

    if np.any(np.equal(match, 0)):
        # m is where k_th gaussian hasn't been created
        # which means where first mu == 300 appears, where first variance == 0 appears, .. where first weight == 0 appears
        m_temp = np.argmax(mu == 300, axis=2)  # shape; (rows, cols)
        m = np.where(np.equal(match, 0), m_temp, m)
        # if there is no such mu == 300, assign m = K - 1(which is 3)
        m = np.where(np.logical_and(np.equal(match, 0), np.equal(m, 0)),
                     (K - 1), m)
        m_expended_dim = np.expand_dims(m, axis=2)

        # 17 algorithm 2
        w_temp_unmatched = np.copy(w)
        np.put_along_axis(w_temp_unmatched, m_expended_dim, w_init, axis=2)
        w = np.where(np.equal(match_exp_dim, 0), w_temp_unmatched, w)

        # 18 algorithm 2
Exemple #56
0
 def _compare(self, a, b):
     if self._is_numpy(a) or self._is_numpy(b):
         import numpy as np
         return np.equal(a, b)
     else:
         return a == b
Exemple #57
0
    def fit(self, X, y=None):
        from scipy.stats import norm, multivariate_normal
        from sklearn.metrics import f1_score, precision_score, recall_score

        mask = np.equal(y, 1)
        m0, m1 = np.bincount(mask)

        Xtr = X[~mask][:int(m0 * .6)]

        Xcv = np.concatenate(
            [X[~mask][int(m0 * .6):int(m0 * .8)], X[mask][:int(m1 * .5)]],
            axis=0)
        ycv = np.concatenate(
            [y[~mask][int(m0 * .6):int(m0 * .8)], y[mask][:int(m1 * .5)]])

        self.Xts = np.concatenate(
            [X[~mask][int(m0 * .8):], X[mask][int(m1 * .5):]], axis=0)
        self.yts = np.concatenate(
            [y[~mask][int(m0 * .8):], y[mask][int(m1 * .5):]])

        #fitting process
        mu = Xtr.mean(0)

        if self.multivariate:
            Σ = np.cov(Xtr.T)
            distribution = multivariate_normal(mean=mu, cov=Σ)
            self.predict_densities = lambda X: distribution.pdf(X)

        else:
            std = np.std(Xtr, ddof=0)
            distribution = norm(loc=mu, scale=std)
            self.predict_densities = lambda X: np.add.reduce(
                np.log(distribution.pdf(X)), axis=1)

        densities = self.predict_densities(Xcv)
        epsilons_sorted = sorted(densities)
        ix = sum(ycv == 1)

        for ix in range(ix, len(epsilons_sorted)):
            ypred = densities < epsilons_sorted[ix]
            recall = recall_score(ycv, ypred)
            if recall >= 1.0:
                epsilon_lower = epsilons_sorted[1]
                epsilon_upper = epsilons_sorted[ix +
                                                25]  # 25 is arbitrary number
                break

        epsilons = np.linspace(epsilon_lower, epsilon_upper, 100)
        recalls, precisions, f1_scores = (list() for _ in range(3))

        for epsilon in epsilons:
            ypred = densities < epsilon
            recall = recall_score(ycv, ypred)
            precision = precision_score(ycv, ypred)
            f1 = f1_score(ycv, ypred)
            [
                l.append(v) for l, v in zip([recalls, precisions, f1_scores],
                                            [recall, precision, f1])
            ]

        #make pandas-df
        from pandas import DataFrame, set_option
        set_option('precision', 2, 'display.width', 250)

        d = {
            "recall": recalls,
            "precision": precisions,
            "f1_score": f1_scores,
            "epsilon": epsilons
        }
        df = DataFrame(d)
        self.df = df.groupby(by=["recall", "precision", "f1_score"]).mean(
        ).reset_index().sort_values("epsilon").reset_index(drop=True)
        print(self.df)

        ix = df.idxmax()[self.criterion]
        self.optimal_epsilon = df.iloc[ix]["epsilon"]
        return (self)
def main():
    """
    CNNを用いた、MNIST 画像データの識別
    """
    print("Enter main()")

    #======================================================================
    # データセットを読み込み or 生成
    # Import or generate data.
    #======================================================================
    #======================================================================
    # データセットをトレーニングデータ、テストデータ、検証データセットに分割
    #======================================================================
    # MNIST データが格納されているフォルダへのパス
    mnist_path = "C:\Data\MachineLearning_DataSet\MNIST"

    X_train, y_train = MLPreProcess.load_mnist( mnist_path, "train" )
    X_test, y_test = MLPreProcess.load_mnist( mnist_path, "t10k" )

    X_train = numpy.array( [numpy.reshape(x, (28,28)) for x in X_train] )
    X_test = numpy.array( [numpy.reshape(x, (28,28)) for x in X_test] )

    """
    # TensorFlow のサポート関数を使用して, MNIST データを読み込み
    mnist = read_data_sets( mnist_path )
    print( "mnist :\n", mnist )
    X_train = numpy.array( [numpy.reshape(x, (28,28)) for x in mnist.train.images] )
    X_test = numpy.array( [numpy.reshape(x, (28,28)) for x in mnist.test.images] )
    y_train = mnist.train.labels
    y_test = mnist.test.labels
    """

    print( "X_train.shape : ", X_train.shape )
    print( "y_train.shape : ", y_train.shape )
    print( "X_test.shape : ", X_test.shape )
    print( "y_test.shape : ", y_test.shape )

    print( "X_train : \n", X_train )
    print( "y_train : \n", y_train )
    
    #======================================================================
    # データを変換、正規化
    # Transform and normalize data.
    # ex) data = tf.nn.batch_norm_with_global_normalization(...)
    #======================================================================
    # One -hot encoding
    #y_train_encoded = numpy.eye(10)[ y_train.astype(int) ]
    #y_test_encoded = numpy.eye(10)[ y_test.astype(int) ]

    session = tf.Session()
    encode_holder = tf.placeholder(tf.int64, [None])
    y_oneHot_enoded_op = tf.one_hot( encode_holder, depth=10, dtype=tf.float32 ) # depth が 出力層のノード数に対応
    session.run( tf.global_variables_initializer() )
    y_train_encoded = session.run( y_oneHot_enoded_op, feed_dict = { encode_holder: y_train } )
    y_test_encoded = session.run( y_oneHot_enoded_op, feed_dict = { encode_holder: y_test } )
    print( "y_train_encoded.shape : ", y_train_encoded.shape )
    print( "y_train_encoded.dtype : ", y_train_encoded.dtype )
    print( "y_test_encoded.shape : ", y_test_encoded.shape )

    #======================================================================
    # アルゴリズム(モデル)のパラメータを設定
    # Set algorithm parameters.
    # ex) learning_rate = 0.01  iterations = 1000
    #======================================================================
    # CNN クラスのオブジェクト生成
    cnn1 = ConvolutionalNN(
               session = tf.Session( config = tf.ConfigProto(log_device_placement=True) ),
               epochs = 500,
               batch_size = 100,
               eval_step = 1,
               image_height = 28,                   # 28 pixel
               image_width = 28,                    # 28 pixel
               n_channels = 1,                      # グレースケール
               n_ConvLayer_featuresMap = [25, 50],  # conv1 : 25 枚, conv2 : 50 枚
               n_ConvLayer_kernels = [4, 4],        # conv1 : 4*4, conv2 : 4*4
               n_strides = 1,
               n_pool_wndsize = 2,
               n_pool_strides = 2,
               n_fullyLayers = [100,100],
               n_labels = 10
           )

    #cnn1._t_holder = tf.placeholder( tf.int32, [None] )

    cnn2 = ConvolutionalNN(
               session = tf.Session( config = tf.ConfigProto(log_device_placement=True) ),
               epochs = 500,
               batch_size = 100,
               eval_step = 1,
               image_height = 28,                   # 28 pixel
               image_width = 28,                    # 28 pixel
               n_channels = 1,                      # グレースケール
               n_ConvLayer_featuresMap = [25, 50],  # conv1 : 25 枚, conv2 : 50 枚
               n_ConvLayer_kernels = [4, 4],        # conv1 : 4*4, conv2 : 4*4
               n_strides = 1,
               n_pool_wndsize = 2,
               n_pool_strides = 2,
               n_fullyLayers = [100,100],
               n_labels = 10
           )

    #cnn2._t_holder = tf.placeholder( tf.int32, [None] )

    #cnn1.print( "after __init__()" )

    #======================================================================
    # 変数とプレースホルダを設定
    # Initialize variables and placeholders.
    # TensorFlow は, 損失関数を最小化するための最適化において,
    # 変数と重みベクトルを変更 or 調整する。
    # この変更や調整を実現するためには, 
    # "プレースホルダ [placeholder]" を通じてデータを供給(フィード)する必要がある。
    # そして, これらの変数とプレースホルダと型について初期化する必要がある。
    # ex) a_var = tf.constant(42)
    #     x_input_holder = tf.placeholder(tf.float32, [None, input_size])
    #     y_input_holder = tf.placeholder(tf.fload32, [None, num_classes])
    #======================================================================


    #======================================================================
    # モデルの構造を定義する。
    # Define the model structure.
    # ex) add_op = tf.add(tf.mul(x_input_holder, weight_matrix), b_matrix)
    #======================================================================
    cnn1.model()
    cnn2.model()
    cnn1.print( "after model()" )

    #======================================================================
    # 損失関数を設定する。
    # Declare the loss functions.
    #======================================================================
    cnn1.loss( SoftmaxCrossEntropy() )
    cnn2.loss( SoftmaxCrossEntropy() )

    #======================================================================
    # モデルの初期化と学習(トレーニング)
    # ここまでの準備で, 実際に, 計算グラフ(有向グラフ)のオブジェクトを作成し,
    # プレースホルダを通じて, データを計算グラフ(有向グラフ)に供給する。
    # Initialize and train the model.
    #
    # ex) 計算グラフを初期化する方法の1つの例
    #     with tf.Session( graph = graph ) as session:
    #         ...
    #         session.run(...)
    #         ...
    #     session = tf.Session( graph = graph )  
    #     session.run(…)
    #======================================================================
    # モデルの最適化アルゴリズムを設定
    cnn1.optimizer( Momentum( learning_rate = 0.0001, momentum = 0.9 ) )
    cnn2.optimizer( Momentum( learning_rate = 0.0005, momentum = 0.9 ) )

    # TensorBoard 用のファイル(フォルダ)を作成
    cnn1.write_tensorboard_graph()

    # トレーニングデータで fitting 処理
    cnn1.fit( X_train, y_train_encoded )
    cnn2.fit( X_train, y_train_encoded )

    cnn1.print( "after fit()" )
    #print( mlp1._session.run( mlp1._weights[0] ) )

    #======================================================================
    # モデルの評価
    # (Optional) Evaluate the model.
    #======================================================================
    #-------------------------------------------------------------------
    # トレーニング回数に対する loss 値の plot
    #-------------------------------------------------------------------
    plt.clf()
    plt.plot(
        range( 0, 500 ), cnn1._losses_train,
        label = 'train data : CNN1 = [25,50,100], learning_rate = 0.0001',
        linestyle = '-',
        #linewidth = 2,
        color = 'red'
    )
    plt.plot(
        range( 0, 500 ), cnn2._losses_train,
        label = 'train data : CNN2 = [25,50,100], learning_rate = 0.0005',
        linestyle = '--',
        #linewidth = 2,
        color = 'blue'
    )
    plt.title( "loss" )
    plt.legend( loc = 'best' )
    #plt.ylim( [0, 1.05] )
    plt.xlabel( "Epocs" )
    plt.tight_layout()
   
    MLPlot.saveFigure( fileName = "CNN_1-1.png" )
    plt.show()

    #--------------------------------------------------------------------
    # テストデータでの正解率
    #--------------------------------------------------------------------
    accuracy1 = cnn1.accuracy( X_test, y_test )
    accuracy2 = cnn2.accuracy( X_test, y_test )
    print( "accuracy1 [test data] : %0.3f" % accuracy1 )
    print( "accuracy2 [test data] : %0.3f" % accuracy2 )

    print( "accuracy1 labels [test data]" )
    accuracys1 = cnn1.accuracy_labels( X_test, y_test )
    for i in range( len(accuracys1) ):
        print( "label %d : %.3f" % ( i, accuracys1[i] ) )

    print( "accuracy2 labels [test data]" )
    accuracys2 = cnn2.accuracy_labels( X_test, y_test )
    for i in range( len(accuracys2) ):
        print( "label %d : %.3f" % ( i, accuracys2[i] ) )

    #-------------------------------------------------------------------
    # 正解画像&誤識別画像の plot
    #-------------------------------------------------------------------
    predict1 = cnn1.predict( X_test )
    predict2 = cnn2.predict( X_test )
    print( "predict1 : ", predict1 )
    print( "predict2 : ", predict2 )

    # 正解・不正解のリスト [True or False]
    corrects1 = numpy.equal( predict1, y_test )
    corrects2 = numpy.equal( predict2, y_test )
    print( "corrects1 :", corrects1 )
    print( "corrects2 :", corrects2 )

    figure1, axis1 = plt.subplots( 
                        nrows = 5, ncols = 8,
                        sharex = True, sharey = True     # x,y 軸をシャアする
                     )
    
    # 2次元配列を1次元に変換
    axis1 = axis1.flatten()

    # 正解画像の plot のための loop
    #plt.clf()
    for (idx, image) in enumerate( X_test[ corrects1 ][0:40] ):
        #print( "idx", idx )
        image = image.reshape(28,28)        # 1次元配列を shape = [28 ,28] に reshape
        axis1[idx].imshow(
            image,
            cmap = "Greys",
            interpolation = "nearest"   # 補間方法
        )
        axis1[idx].set_title( "Actual: " + str( y_test[corrects1][idx] ) + " Pred: " + str( predict1[corrects1][idx] ), fontsize = 8 )

    axis1[0].set_xticks( [] )
    axis1[0].set_yticks( [] )
    #plt.tight_layout()
    MLPlot.saveFigure( fileName = "CNN_1-2.png" )
    plt.show()
    

    # 誤識別画像の plot のための loop
    figure2, axis2 = plt.subplots( 
                        nrows = 5, ncols = 8,
                        sharex = True, sharey = True     # x,y 軸をシャアする
                     )

    # 2次元配列を1次元に変換
    axis2 = axis2.flatten()

    for (idx, image) in enumerate( X_test[ ~corrects1 ][0:40] ):
        image = image.reshape(28,28)        # 1次元配列を shape = [28 ,28] に reshape
        axis2[idx].imshow(
            image,
            cmap = "Greys",
            interpolation = "nearest"   # 補間方法
        )
        axis2[idx].set_title( "Actual: " + str( y_test[~corrects1][idx] ) + " Pred: " + str( predict1[~corrects1][idx] ), fontsize = 8 )

    axis2[0].set_xticks( [] )
    axis2[0].set_yticks( [] )
    #plt.tight_layout()
    MLPlot.saveFigure( fileName = "CNN_1-3.png" )
    plt.show()

    #---------------------------------------------------------------------
    # MNIST 画像を plot
    #---------------------------------------------------------------------
    # 先頭の 0~9 のラベルの画像データを plot
    """
    # plt.subplots(...) から,
    # Figure クラスのオブジェクト、Axis クラスのオブジェクト作成
    figure, axis = plt.subplots( 
                       nrows = 2, ncols = 5,
                       sharex = True, sharey = True     # x,y 軸をシャアする
                   )
    # 2 × 5 配列を1次元に変換
    axis = axis.flatten()
    # 数字の 0~9 の plot 用の for ループ
    for i in range(10):
        image = X_train[y_train == i][0]    #
        image = image.reshape(28,28)        # 1次元配列を shape = [28 ,28] に reshape
        axis[i].imshow(
            image,
            cmap = "Greys",
            interpolation = "nearest"   # 補間方法
        )
    axis[0].set_xticks( [] )
    axis[0].set_yticks( [] )
    plt.tight_layout()
    MLPlot.saveFigure( fileName = "MultilayerPerceptron_3-1.png" )
    plt.show()
    """

    """
    # 特定のラベルの 25 枚の画像データを plot
    figure, axis = plt.subplots( nrows = 5, ncols = 5, sharex = True, sharey = True )
    axis = axis.flatten()
    for i in range(25):
        image = X_train[y_train == 7][i].reshape(28,28)    
        axis[i].imshow( image, cmap = "Greys", interpolation = "nearest" )
    
    axis[0].set_xticks( [] )
    axis[0].set_yticks( [] )
    plt.tight_layout()
    MLPlot.saveFigure( fileName = "MultilayerPerceptron_3-2.png" )
    plt.show()
    """

    #======================================================================
    # ハイパーパラメータのチューニング (Optional)
    #======================================================================


    #======================================================================
    # デプロイと新しい成果指標の予想 (Optional)
    #======================================================================


    print("Finish main()")
    return
def _ConstantValue(tensor, partial):
  # TODO(touts): Support Variables?
  if not isinstance(tensor, ops.Tensor):
    raise TypeError("tensor is not a Tensor")
  if tensor.op.type == "Const":
    return MakeNdarray(tensor.op.get_attr("value"))
  elif tensor.op.type == "Shape":
    input_shape = tensor.op.inputs[0].get_shape()
    if input_shape.is_fully_defined():
      return np.array([dim.value for dim in input_shape.dims],
                      dtype=tensor.dtype.as_numpy_dtype)
    else:
      return None
  elif tensor.op.type == "Size":
    input_shape = tensor.op.inputs[0].get_shape()
    if input_shape.is_fully_defined():
      return np.prod([dim.value for dim in input_shape.dims], dtype=np.int32)
    else:
      return None
  elif tensor.op.type == "Rank":
    input_shape = tensor.op.inputs[0].get_shape()
    if input_shape.ndims is not None:
      return np.ndarray(shape=(), buffer=np.array([input_shape.ndims], dtype=np.int32),
                        dtype=np.int32)
    else:
      return None
  elif tensor.op.type == "Range":
    start = constant_value(tensor.op.inputs[0])
    if start is None:
      return None
    limit = constant_value(tensor.op.inputs[1])
    if limit is None:
      return None
    delta = constant_value(tensor.op.inputs[2])
    if delta is None:
      return None
    return np.arange(start, limit, delta, dtype=tensor.dtype.as_numpy_dtype)
  elif tensor.op.type == "Cast":
    pre_cast = constant_value(tensor.op.inputs[0])
    if pre_cast is None:
      return None
    cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
    return pre_cast.astype(cast_dtype.as_numpy_dtype)
  elif tensor.op.type == "Concat":
    dim = constant_value(tensor.op.inputs[0])
    if dim is None:
      return None
    values = []
    for x in tensor.op.inputs[1:]:
      value = constant_value(x)
      if value is None:
        return None
      values.append(value)
    return np.concatenate(values, axis=dim)
  elif tensor.op.type == "ConcatV2":
    dim = constant_value(tensor.op.inputs[-1])
    if dim is None:
      return None
    values = []
    for x in tensor.op.inputs[:-1]:
      value = constant_value(x)
      if value is None:
        return None
      values.append(value)
    return np.concatenate(values, axis=dim)
  elif tensor.op.type == "Pack":
    values = []
    # Some imported GraphDefs have Pack ops with zero inputs. Those are invalid
    # and shouldn't be produced, but to deal sensibly with them here we check
    # and return None.
    if not tensor.op.inputs:
      return None
    # We can't handle axis != 0 Packs at the moment.
    if tensor.op.get_attr("axis") != 0:
      return None
    for x in tensor.op.inputs:
      value = constant_value(x, partial)
      if value is None and not partial:
        return None
      values.append(value)
    return np.array(values)
  elif tensor.op.type == "Fill":
    fill_shape = tensor.shape
    fill_value = constant_value(tensor.op.inputs[1])
    if fill_shape.is_fully_defined() and fill_value is not None:
      return np.full(fill_shape.as_list(), fill_value, dtype=fill_value.dtype)
    else:
      return None
  elif tensor.op.type == "Equal":
    value1 = constant_value(tensor.op.inputs[0])
    if value1 is None:
      return None
    value2 = constant_value(tensor.op.inputs[1])
    if value2 is None:
      return None
    return np.equal(value1, value2)
  elif tensor.op.type == "NotEqual":
    value1 = constant_value(tensor.op.inputs[0])
    if value1 is None:
      return None
    value2 = constant_value(tensor.op.inputs[1])
    if value2 is None:
      return None
    return np.not_equal(value1, value2)
  else:
    return None
    def test_mnist_float32(self):
        seed = 90
        epoch_num = 1

        state = np.random.normal(size=4).astype("float32")
        state_list = state.tolist()
        reward = np.random.random(size=[1, 1]).astype("float32")
        reward_list = reward.tolist()
        action_list = [1]
        action = np.array(action_list).astype("float32")
        mask_list = [[0, 1]]
        mask = np.array(mask_list).astype("float32")

        with fluid.dygraph.guard():
            fluid.default_startup_program().random_seed = seed
            fluid.default_main_program().random_seed = seed

            policy = Policy("PolicyModel")

            dy_state = fluid.dygraph.base.to_variable(state)
            dy_state.stop_gradient = True
            loss_probs = policy(dy_state)

            dy_mask = fluid.dygraph.base.to_variable(mask)
            dy_mask.stop_gradient = True

            loss_probs = fluid.layers.log(loss_probs)
            loss_probs = fluid.layers.elementwise_mul(loss_probs, dy_mask)
            loss_probs = fluid.layers.reduce_sum(loss_probs, dim=-1)

            dy_reward = fluid.dygraph.base.to_variable(reward)
            dy_reward.stop_gradient = True

            loss_probs = fluid.layers.elementwise_mul(dy_reward, loss_probs)
            loss = fluid.layers.reduce_sum(loss_probs)

            sgd = SGDOptimizer(learning_rate=1e-3)

            dy_param_init_value = {}

            dy_out = loss.numpy()

            for param in policy.parameters():
                dy_param_init_value[param.name] = param.numpy()

            loss.backward()
            sgd.minimize(loss)
            policy.clear_gradients()

            dy_param_value = {}
            for param in policy.parameters():
                dy_param_value[param.name] = param.numpy()

        with new_program_scope():
            fluid.default_startup_program().random_seed = seed
            fluid.default_main_program().random_seed = seed

            exe = fluid.Executor(fluid.CPUPlace(
            ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0))

            policy = Policy("PolicyModel")

            st_sgd = SGDOptimizer(learning_rate=1e-3)

            st_state = fluid.layers.data(name='st_state',
                                         shape=[4],
                                         dtype='float32')
            st_reward = fluid.layers.data(name='st_reward',
                                          shape=[1],
                                          dtype='float32')
            st_mask = fluid.layers.data(name='st_mask',
                                        shape=[2],
                                        dtype='float32')

            st_loss_probs = policy(st_state)

            st_loss_probs = fluid.layers.log(st_loss_probs)
            st_loss_probs = fluid.layers.elementwise_mul(
                st_loss_probs, st_mask)
            st_loss_probs = fluid.layers.reduce_sum(st_loss_probs, dim=-1)

            st_loss_probs = fluid.layers.elementwise_mul(
                st_reward, st_loss_probs)
            st_loss = fluid.layers.reduce_sum(st_loss_probs)

            st_sgd.minimize(st_loss)

            # initialize params and fetch them
            static_param_init_value = {}
            static_param_name_list = []
            for param in policy.parameters():
                static_param_name_list.append(param.name)

            out = exe.run(fluid.default_startup_program(),
                          fetch_list=static_param_name_list)

            for i in range(len(static_param_name_list)):
                static_param_init_value[static_param_name_list[i]] = out[i]

            fetch_list = [st_loss.name]
            fetch_list.extend(static_param_name_list)

            out = exe.run(fluid.default_main_program(),
                          feed={
                              "st_state": state,
                              "st_reward": reward,
                              "st_mask": mask
                          },
                          fetch_list=fetch_list)

            static_param_value = {}
            static_out = out[0]
            for i in range(1, len(out)):
                static_param_value[static_param_name_list[i - 1]] = out[i]

        #self.assertTrue(np.allclose(dy_x_data.all(), static_x_data.all()))

        for key, value in six.iteritems(static_param_init_value):
            self.assertTrue(np.equal(value, dy_param_init_value[key]).all())

        self.assertTrue(np.equal(static_out, dy_out).all())

        for key, value in six.iteritems(static_param_value):
            self.assertTrue(np.equal(value, dy_param_value[key]).all())