Exemplo n.º 1
0
 def position(self, l=None, t=None, squeeze=True):
     """
     return the position of the molecules at a given beamline
     position l, or at a given time t
     """
     # convert the l or t input to a t input:
     if t is None and l is None:
         raise IOError('you must input l or t.')
     if t is None:
         l = np.array(l)
         if len(l.shape) == 0:
             l = np.array([l])
         t = self.time(l=l, squeeze=squeeze)
     else:
         t = np.array(t)
         if len(t.shape) == 0:
             t = np.array([t])
     
     #formatting to make sure I can perform the array operations
     g = np.expand_dims(
         np.expand_dims(self.params.GRAVITY,0),0)
     t = np.expand_dims(t - self.t,2)
     x = np.expand_dims(self.x,0)
     v = np.expand_dims(self.v,0)
     
     pos = x + v * t + (1/2.0) * g * t**2
     
     if squeeze:
         pos = np.squeeze(pos, axis=0)
     return pos
Exemplo n.º 2
0
    def grad_EVzxVzxT_by_c(self, EVzxVzxT_list_this, Z, A, B, C, Kpred, p, r):

        P = Z.shape[0]
        R = Z.shape[1]
        N = A.shape[0]

        ainv = 1 / (self.length_scale * self.length_scale)
        siginv = 1 / (B[0, 0] * B[0, 0])

        dA = np.zeros([N, R])
        dA[:, r] = Kpred[r][:, p]

        AAt = 2 * A[:, r] * dA[:, r]

        res1 = -0.5 * np.tile(AAt, [P, 1]).T * (siginv - siginv * (1 / (siginv + 2 * ainv)) * siginv)
        res1 = np.expand_dims(res1, axis=2)
        res1 = np.repeat(res1, P, axis=2)

        res2 = dA.dot(Z.T) * (ainv * (1 / (siginv + 2 * ainv)) * siginv)
        res2 = np.expand_dims(res2, axis=2)
        res2 = np.repeat(res2, P, axis=2)

        res3 = np.swapaxes(res2, 1, 2)

        res = EVzxVzxT_list_this * (res1 + res2 + res3)

        res = np.sum(res, axis=0)

        return res
    def extractFeatureVector(self, featureDict, singleObject=False):
        """
        Extract the vector(s) of required features from the given feature dictionary,
        by concatenating the columns of the selected features into a matrix of new features, one row per object
        """
        featureVectors = None
        for f in self.selectedFeatures:
            if f not in featureDict:
                raise AssertionError("Feature '{}' not present in object features!".format(f))
            vec = featureDict[f]
            if len(vec.shape) == 1:
                if singleObject:
                    vec = np.expand_dims(vec, axis=0)
                else:
                    vec = np.expand_dims(vec, axis=1)
            if featureVectors is None:
                featureVectors = vec
            else:
                if len(vec.shape) == 3:
                    for row in range(vec.shape[2]):
                        featureVectors = np.hstack([featureVectors, vec[..., row]])
                elif len(vec.shape) > 3:
                    raise ValueError("Cannot deal with features of more than two dimensions yet")
                else:
                    featureVectors = np.hstack([featureVectors, vec])

        return featureVectors
Exemplo n.º 4
0
 def velocity(self,l=None, t=None, squeeze=True):
     """
     return velocities at a given position l, or a given time t
     (since this is just a ballistic model, the output does
      not depend on l or t, but it could in a non-ballistic model)
     """
     # convert the l or t input to a t input:
     if t is None and l is None:
         raise IOError('you must input l or t.')
     if t is None:
         l = np.array(l)
         if len(l.shape) == 0:
             l = np.array([l])
         t = self.time(l=l, squeeze=squeeze)
     else:
         t = np.array(t)
         if len(t.shape) == 0:
             t = np.array([t])
     
     #formatting to make sure I can perform the array operations
     g = np.expand_dims(
         np.expand_dims(self.params.GRAVITY,0),0)
     t = np.expand_dims(t - self.t,2)
     x = np.expand_dims(self.x,0)
     v = np.expand_dims(self.v,0)
     
     vel = v + g * t
     
     if squeeze:
         vel = np.squeeze(vel, axis=0)
     return vel
Exemplo n.º 5
0
    def grad_EVzxVzxT_by_hyper_exact(self, EVzxVzxT_list_this, Z, A, B, hyperno):

        P = Z.shape[0]
        R = Z.shape[1]
        N = A.shape[0]

        if hyperno != 0:
            return EVzxVzxT_list_this * 0

        alpha = self.length_scale * self.length_scale

        I = np.identity(R)
        S = np.diag(B[0, :] * B[0, :])
        Sinv = np.diag(1 / B[0, :] * B[0, :])
        C = I * alpha
        Cinv = I * (1 / alpha)
        CinvSinv = 2 * Cinv + Sinv
        CinvSinv_inv = np.diag(1 / CinvSinv.diagonal())

        dC = self.length_scale * I
        dCinv = -Cinv.dot(dC).dot(Cinv)
        dCinvSinv = 2 * dCinv
        dCinvSinv_inv = -CinvSinv_inv.dot(dCinvSinv).dot(CinvSinv_inv)

        S1 = (
            dCinv
            - dCinv.dot(CinvSinv_inv).dot(Cinv)
            - Cinv.dot(dCinvSinv_inv).dot(Cinv)
            - Cinv.dot(CinvSinv_inv).dot(dCinv)
        )
        S2 = -Sinv.dot(dCinvSinv_inv).dot(Sinv)
        S3 = Sinv.dot(dCinvSinv_inv).dot(Cinv) + Sinv.dot(CinvSinv_inv).dot(dCinv)
        S4 = dCinv.dot(CinvSinv_inv).dot(Cinv) + Cinv.dot(dCinvSinv_inv).dot(Cinv) + Cinv.dot(CinvSinv_inv).dot(dCinv)

        T1s = np.tile(Z.dot(S1).dot(Z.T).diagonal(), [P, 1])
        T1 = np.tile(T1s, [N, 1, 1])
        T2s = T1s.T
        T2 = np.tile(T2s, [N, 1, 1])
        T3 = np.tile(Z.dot(S4).dot(Z.T), [N, 1, 1])
        T4 = np.tile(A.dot(S2).dot(A.T).diagonal(), [P, 1]).T
        T4 = np.expand_dims(T4, axis=2)
        T4 = np.repeat(T4, P, axis=2)
        T5 = A.dot(S3).dot(Z.T)
        T5 = np.expand_dims(T5, axis=2)
        T5 = np.repeat(T5, P, axis=2)
        T6 = np.swapaxes(T5, 1, 2)

        SCinvI = 2 * Cinv.dot(S) + I
        SCinvI_inv = np.diag(1 / SCinvI.diagonal())
        (temp, logDetSCinvI) = np.linalg.slogdet(SCinvI)
        detSCinvI = np.exp(logDetSCinvI)
        dDetSCinvI = -0.5 * np.power(detSCinvI, -0.5) * SCinvI_inv.dot(2 * dCinv).dot(S).trace()

        expTerm = EVzxVzxT_list_this / np.power(detSCinvI, -0.5)

        res = EVzxVzxT_list_this * (-0.5 * T1 - 0.5 * T2 + T3 - 0.5 * T4 + T5 + T6) + dDetSCinvI * expTerm

        res = np.sum(res, axis=0)

        return res
Exemplo n.º 6
0
def evaluation_10_fold(root='./result/pytorch_result.mat'):
    ACCs = np.zeros(10)
    result = scipy.io.loadmat(root)
    for i in range(10):
        fold = result['fold']
        flags = result['flag']
        featureLs = result['fl']
        featureRs = result['fr']

        valFold = fold != i
        testFold = fold == i
        flags = np.squeeze(flags)

        mu = np.mean(np.concatenate((featureLs[valFold[0], :], featureRs[valFold[0], :]), 0), 0)
        mu = np.expand_dims(mu, 0)
        featureLs = featureLs - mu
        featureRs = featureRs - mu
        featureLs = featureLs / np.expand_dims(np.sqrt(np.sum(np.power(featureLs, 2), 1)), 1)
        featureRs = featureRs / np.expand_dims(np.sqrt(np.sum(np.power(featureRs, 2), 1)), 1)

        scores = np.sum(np.multiply(featureLs, featureRs), 1)
        threshold = getThreshold(scores[valFold[0]], flags[valFold[0]], 10000)
        ACCs[i] = getAccuracy(scores[testFold[0]], flags[testFold[0]], threshold)
    #     print('{}    {:.2f}'.format(i+1, ACCs[i] * 100))
    # print('--------')
    # print('AVE    {:.2f}'.format(np.mean(ACCs) * 100))
    return ACCs
Exemplo n.º 7
0
def compute_overlap(a, b):
    """
    Parameters
    ----------
    a: (N, 4) ndarray of float
    b: (K, 4) ndarray of float
    Returns
    -------
    overlaps: (N, K) ndarray of overlap between boxes and query_boxes
    """
    area = (b[:, 2] - b[:, 0] + 1) * (b[:, 3] - b[:, 1] + 1)

    iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0]) + 1
    ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1]) + 1

    iw = np.maximum(iw, 0)
    ih = np.maximum(ih, 0)

    ua = np.expand_dims((a[:, 2] - a[:, 0] + 1) * (a[:, 3] - a[:, 1] + 1), axis=1) + area - iw * ih

    ua = np.maximum(ua, np.finfo(float).eps)

    intersection = iw * ih

    return intersection / ua
Exemplo n.º 8
0
def fix_dimensions_domain(domain):
    """
    Fix the dimensions of an input so that it is a :class:`numpy.ndarray` of
    shape (dim, 2).

    :param vector: numerical object of at least length 2
    :type vector: :class:`collections.Iterable`
    :rtype: :class:`numpy.ndarray`
    :retuns: array of shape (dim, 2)

    """
    if not isinstance(domain, np.ndarray):
        if len(domain) == 2:
            domain = np.expand_dims(domain, axis=0)
        else:
            raise TypeError("The length must be at least 2.")
    elif len(domain.shape) == 1 and domain.shape[0] == 2:
        domain = np.expand_dims(domain, axis=0)
    elif len(domain.shape) == 2 and domain.shape[1] == 2:
        pass # The shape is already correct!
    elif len(domain.shape) == 2 and domain.shape[0] == 2:
        domain = domain.transpose()
    else:
        raise TypeError("At least one dimension must have a length of 2.")
    return domain
Exemplo n.º 9
0
    def cloud_callback(self, cloud):
        points = point_cloud2.read_points(cloud)
        points_list = np.asarray(list(points))
        points_arr = np.asarray(points_list)

        # Unpack RGB color info
        _float2rgb_vectorized = np.vectorize(_float2rgb)
        r, g, b = _float2rgb_vectorized(points_arr[:, 3])

        # Concatenate and Reshape
        r = np.expand_dims(r, 1)  # insert blank 3rd dimension (for concatenation)
        g = np.expand_dims(g, 1)  
        b = np.expand_dims(b, 1)  
        points_rgb = np.concatenate((points_arr[:, 0:3], r, g, b), axis=1)
        image_rgb = points_rgb.reshape(cloud.height, cloud.width, 6)
        z = copy.deepcopy(image_rgb[:, :, 2])  # get depth values (I think)
        image_np = copy.deepcopy(image_rgb[:, :, 3:].astype('uint8'))
        #code.interact(local=locals())
        
        # TWO-METER DISTANCE FILTER
        z[np.isnan(z)] = 0.0
        mask = np.logical_or(z > 2, z == 0)
        for i in range(image_np.shape[2]): 
            image_np[:, :, i][mask] = 0
        
        # Convert to Image msg
        image_cv = cv.fromarray(image_np)
        image_msg = self.bridge.cv_to_imgmsg(image_cv, encoding='bgr8')
        self.pub.publish(image_msg)
Exemplo n.º 10
0
def load_mask_labels():
    '''Load both target and style masks.
    A mask image (nr x nc) with m labels/colors will be loaded
    as a 4D boolean tensor: (1, m, nr, nc) for 'th' or (1, nr, nc, m) for 'tf'
    '''
    target_mask_img = load_img(target_mask_path,
                               target_size=(img_nrows, img_ncols))
    target_mask_img = img_to_array(target_mask_img)
    style_mask_img = load_img(style_mask_path,
                              target_size=(img_nrows, img_ncols))
    style_mask_img = img_to_array(style_mask_img)
    if K.image_dim_ordering() == 'th':
        mask_vecs = np.vstack([style_mask_img.reshape((3, -1)).T,
                               target_mask_img.reshape((3, -1)).T])
    else:
        mask_vecs = np.vstack([style_mask_img.reshape((-1, 3)),
                               target_mask_img.reshape((-1, 3))])

    labels = kmeans(mask_vecs, nb_labels)
    style_mask_label = labels[:img_nrows *
                              img_ncols].reshape((img_nrows, img_ncols))
    target_mask_label = labels[img_nrows *
                               img_ncols:].reshape((img_nrows, img_ncols))

    stack_axis = 0 if K.image_dim_ordering() == 'th' else -1
    style_mask = np.stack([style_mask_label == r for r in xrange(nb_labels)],
                          axis=stack_axis)
    target_mask = np.stack([target_mask_label == r for r in xrange(nb_labels)],
                           axis=stack_axis)

    return (np.expand_dims(style_mask, axis=0),
            np.expand_dims(target_mask, axis=0))
Exemplo n.º 11
0
    def _extrapolate_cols(self, data, first=True, last=True):
        """Extrapolate the column of data, to get the first and last together
        with the data.

        """

        if first:
            pos = self.col_indices[:2]
            first_column = _linear_extrapolate(pos,
                                               (data[:, 0], data[:, 1]),
                                               self.hcol_indices[0])
        if last:
            pos = self.col_indices[-2:]
            last_column = _linear_extrapolate(pos,
                                              (data[:, -2], data[:, -1]),
                                              self.hcol_indices[-1])

        if first and last:
            return np.hstack((np.expand_dims(first_column, 1),
                              data,
                              np.expand_dims(last_column, 1)))
        elif first:
            return np.hstack((np.expand_dims(first_column, 1),
                              data))
        elif last:
            return np.hstack((data,
                              np.expand_dims(last_column, 1)))
        else:
            return data
Exemplo n.º 12
0
def CreateCrossCorrelationTable(maindir, file_names, outpath):
    """
    Takes a directory and list of numpy files and horizontally concatenates 
    them all and saves the output in outdir. Labels are also added.
    """
    for number, file_name in enumerate(file_names):
        database_brain = np.load(maindir+os.sep+file_name) # Loading the 
        # correlation column.
        if number==0:
            concatenate_data= database_brain
        else:
            concatenate_data=np.concatenate((concatenate_data, 
                database_brain), axis=1)


    # Add concept indices:
    processed_fn = [string.replace('.nii.gz.npy', '') for string in file_names]
    processed_fn = [string.replace('_main', '') for string in processed_fn]
    horz_labels = np.array(processed_fn)
    horz_labels = np.expand_dims(horz_labels, axis=0) # Necessary for swapping 
    # and concatenating.
    vert_labels = np.swapaxes(horz_labels, 0, 1)
    horz_labels = np.insert(horz_labels, 0, 0)
    horz_labels = np.expand_dims(horz_labels, axis=0) # Expands again because 
    # the last line eliminates an axis for some reason.


    concatenate_data = np.char.mod('%10.3f', concatenate_data)
    concatenate_data = np.concatenate((vert_labels, concatenate_data), axis=1)
    concatenate_data = np.concatenate((horz_labels, concatenate_data), axis=0)

    np.save(outpath, concatenate_data)
    np.savetxt(outpath, concatenate_data, fmt='%s', delimiter=',')
Exemplo n.º 13
0
def twopoint_spidx_bootstrap(freq, flux, flux_err, niter=10000):
    """
    Quick bootstrap for spectral index calulcation
    freq: 2 array
    flux: 2 or 2xN array
    flux_err: 2 or 2xN array
    N is the number of sources
    """
    # calculate spidx assuming [iter,source,freq_point] shapes
    def spidx(freq, flux):
        return np.log10(flux[:,:,0]/flux[:,:,1])/np.log10(freq[:,:,0]/freq[:,:,1])

    freq = np.array(freq).astype(float)
    flux = np.array(flux).astype(float)
    flux_err = np.array(flux_err).astype(float)
    # if only 1 source, add degenerate axis
    if flux.shape == (2,): flux = np.expand_dims(flux, axis=1)
    if flux_err.shape == (2,): flux_err = np.expand_dims(flux_err, axis=1)
    flux = flux.T
    flux_err = flux_err.T
    nsource = flux.shape[0]

    results = np.zeros(shape=(niter,nsource))
    random_flux = np.resize(flux, (niter, nsource, 2)) + np.resize(flux_err, (niter, nsource, 2)) * np.random.randn(niter, nsource, 2)
    random_flux[random_flux <= 0] = np.nan # remove negative, this create a bias
    freq = np.resize(freq, (niter, nsource, 2))
    results = spidx(freq, random_flux)

    mean = np.nanmean(results,axis=0)
    err = np.nanstd(results,axis=0)
    return mean, err
Exemplo n.º 14
0
  def testVarianceAndCovarianceMatrix(self):
    amp = np.float64(.5)
    len_scale = np.float64(.2)
    jitter = np.float64(1e-4)
    observation_noise_variance = np.float64(3e-3)

    kernel = psd_kernels.ExponentiatedQuadratic(amp, len_scale)

    index_points = np.expand_dims(np.random.uniform(-1., 1., 10), -1)

    gp = tfd.GaussianProcess(
        kernel,
        index_points,
        observation_noise_variance=observation_noise_variance,
        jitter=jitter)

    def _kernel_fn(x, y):
      return amp * np.exp(-.5 * (np.squeeze((x - y)**2)) / (len_scale**2))

    expected_covariance = (
        _kernel_fn(np.expand_dims(index_points, 0),
                   np.expand_dims(index_points, 1)) +
        (observation_noise_variance + jitter) * np.eye(10))

    self.assertAllClose(expected_covariance,
                        self.evaluate(gp.covariance()))
    self.assertAllClose(np.diag(expected_covariance),
                        self.evaluate(gp.variance()))
Exemplo n.º 15
0
def pad(total_boxes, w, h):
    # compute the padding coordinates (pad the bounding boxes to square)
    tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32)
    tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32)
    numbox = total_boxes.shape[0]

    dx = np.ones((numbox), dtype=np.int32)
    dy = np.ones((numbox), dtype=np.int32)
    edx = tmpw.copy().astype(np.int32)
    edy = tmph.copy().astype(np.int32)

    x = total_boxes[:, 0].copy().astype(np.int32)
    y = total_boxes[:, 1].copy().astype(np.int32)
    ex = total_boxes[:, 2].copy().astype(np.int32)
    ey = total_boxes[:, 3].copy().astype(np.int32)

    tmp = np.where(ex > w)
    edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1)
    ex[tmp] = w

    tmp = np.where(ey > h)
    edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1)
    ey[tmp] = h

    tmp = np.where(x < 1)
    dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1)
    x[tmp] = 1

    tmp = np.where(y < 1)
    dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1)
    y[tmp] = 1

    return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph
Exemplo n.º 16
0
def gaussian_smooth(xdata, ydata, sigma=1., x=None):
    """Returns Gaussian smoothed signal.

    Arguments
    ---------
    xdata -- ndarray, co-ordinate of data points
    ydata -- ndarray, ordinates of data points
    sigma -- float (optional, default 1.), x-scale for smoothing
    x (optional) -- float or ndarray,
       values at which the Gaussian smoothed signal is computed
       default xdata

    Returns
    -------
    sequence of (x, gy) co-ordinates of the Gaussian smoothed signal.
    """
    # convert axis for data
    xx = np.expand_dims(xdata, axis=1)
    yy = np.expand_dims(ydata, axis=1)

    def g(t, t0, s):
        return np.exp(-(t-t0)**2/(2.*s**2))/(np.sqrt(2.*np.pi)*s)

    def num(t):
        return np.sum(g(t, xx, sigma)*yy, axis=0)

    def den(t):
        return np.sum(g(t, xx, sigma), axis=0)

    if x is not None:
        u = np.array(x)
    else:
        u = xdata
    return zip(u, num(u)/den(u))
def prepare_submission():
    mypath = '/Users/philipppushnyakov/data/test/'
    onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
    s = open('submission.csv', 'wt')
    s.write('Id,label' + '\n')
    for f in onlyfiles:
        im_name = mypath + f
        try:
            im = cv2.resize(cv2.imread(im_name), (64,64))
            #im = cv2.cvtColor(cv2.resize(cv2.imread(im_name), (64, 64)).astype(np.float32), cv2.COLOR_BGR2GRAY)
        except KeyboardInterrupt: raise
        except:
            print im_name
            continue
        #print im.shape
        #im = im.transpose((2,0,1))
        im = np.expand_dims(im, axis=0)
        im = np.expand_dims(im, axis=0)
        #for j in range(1):
        #im[:,j,:,:] -= np.mean(im[:,j,:,:])
        #im[:,j,:,:] /= np.std(im[:,j,:,:])
        #im -= np.mean(im)
        #im /= np.std(im)
        out = model.predict(im)
        #print out
        s.write(f.split('.')[0] + ',' + str(np.argmax(out) + 1) + '\n')
Exemplo n.º 18
0
def otsu(image, nclasses=2, nbins=256):
    """
    Calculates the threshold levels for an image using the multilevel
    otsu implementation from Deng-Yuan Huang and Chia-Hung Wang,
    "Optimal multi-level thresholding using a two-stage Otsu optimization
    approach", Pattern Recognition Letters 30 (2009) 275-284,
    doi:10.1016/j.patrec.2008.10.003.

    This is an implementation of the recursive algorithm (lookup table)
    from Liao (2001), and referenced by Huang and Wang.

    Input
    -----
    :image, array-like: Image intensity data
    :nclasses, int: (optional) Number of classes into which the data should
        be subdivided. Default: 2 (two classes --> one threshold)
    :nbins, int: (optional) A histogram of *nbins* will be made from the
        intensity values in *image*. This provides the number of bins in
        that histogram. Default: 256


    Output
    ------
    Threshold levels (tuple)
    """
    # ensure the image is a numpy array-like
    image = np.asarray(image)
    # calculate the histogram of intensities
    prob, edges = np.histogram(image.flatten(), bins=nbins, density=True)
    nbins = len(prob)
    # H-table (and similarly P-table and S-table from which H-table
    # is derived) is a lookup table where the start point is the first
    # index and the end point is the second index, i.e.
    #   `htable[12, 27]`
    # holds the modified variance of the class in the range [12, 27)
    triu = np.triu(np.ones((nbins, nbins)))
    # calculate the P-table
    ptable = np.concatenate([
        np.expand_dims(np.dot(triu[:,:threshold], prob[:threshold]), axis=1)
        for threshold in xrange(1, nbins+1)], axis=1)
    # calculate the S-table
    stable = np.concatenate([
        np.expand_dims(np.dot(triu[:,:threshold],
                       np.arange(threshold)*prob[:threshold]), axis=1)
        for threshold in xrange(1, nbins+1)], axis=1)
    # calculate the H-table
    olderr = np.seterr(divide='ignore')
    htable = stable**2/ptable
    np.seterr(**olderr)
    # find the thresholds that maximize the interclass variance
    nthresh = nclasses-1
    max_variance = 0
    for ijk in combinations(xrange(1, nbins-1), nthresh):
        ijk = (0,) + ijk + (-1,) # append the start and end points
        variance = sum([htable[i,j] for i,j in zip(ijk[:-1], ijk[1:])])
        if variance > max_variance:
            max_variance = variance
            thresholds = tuple((edges[i] + edges[i+1])/2.
                               for i in ijk[1:-1])
    return thresholds
Exemplo n.º 19
0
  def testReturnPaddedImageWithNonZeroPadValue(self):
    for dtype in [np.int32, np.int64, np.float32, np.float64]:
      image = np.dstack([[[5, 6],
                          [9, 0]],
                         [[4, 3],
                          [3, 5]]]).astype(dtype)
      expected_image = np.dstack([[[255, 255, 255, 255, 255],
                                   [255, 255, 255, 255, 255],
                                   [255, 5, 6, 255, 255],
                                   [255, 9, 0, 255, 255],
                                   [255, 255, 255, 255, 255]],
                                  [[255, 255, 255, 255, 255],
                                   [255, 255, 255, 255, 255],
                                   [255, 4, 3, 255, 255],
                                   [255, 3, 5, 255, 255],
                                   [255, 255, 255, 255, 255]]]).astype(dtype)

      with self.session() as sess:
        padded_image = preprocess_utils.pad_to_bounding_box(
            image, 2, 1, 5, 5, 255)
        padded_image = sess.run(padded_image)
        self.assertAllClose(padded_image, expected_image)
        # Add batch size = 1 to image.
        padded_image = preprocess_utils.pad_to_bounding_box(
            np.expand_dims(image, 0), 2, 1, 5, 5, 255)
        padded_image = sess.run(padded_image)
        self.assertAllClose(padded_image, np.expand_dims(expected_image, 0))
Exemplo n.º 20
0
def update_statistics(Y, P, beta=0.9):
  """
  Args
  ----
  Y: 
      2d array whose columns encode the 
      activity of the output units
  P:
      2d array encoding the pairwise average 
      activity of the output units  

  Returns
  -------
      The updated average activities
  """

  (n, d) = Y.shape
  

  A = np.expand_dims(Y, axis=1) * np.expand_dims(Y, axis=0)
  assert(A.shape == (n, n, d))
  
  Q = np.mean(A, axis=2)
  Q[np.where(Q == 0.)] = 0.000001
  assert(P.shape == Q.shape)

  return beta*P + (1-beta)*Q
Exemplo n.º 21
0
 def _read_img(self, img_name, label_name):
     img = Image.open(os.path.join(self.root_dir, img_name))
     label = Image.open(os.path.join(self.root_dir, label_name))
     assert img.size == label.size
     img = np.array(img, dtype=np.float32)  # (h, w, c)
     label = np.array(label)  # (h, w)
     if self.cut_off_size is not None:
         max_hw = max(img.shape[0], img.shape[1])
         min_hw = min(img.shape[0], img.shape[1])
         if min_hw > self.cut_off_size:
             rand_start_max = round(np.random.uniform(0, max_hw - self.cut_off_size - 1))
             rand_start_min = round(np.random.uniform(0, min_hw - self.cut_off_size - 1))
             if img.shape[0] == max_hw :
                 img = img[rand_start_max : rand_start_max + self.cut_off_size, rand_start_min : rand_start_min + self.cut_off_size]
                 label = label[rand_start_max : rand_start_max + self.cut_off_size, rand_start_min : rand_start_min + self.cut_off_size]
             else :
                 img = img[rand_start_min : rand_start_min + self.cut_off_size, rand_start_max : rand_start_max + self.cut_off_size]
                 label = label[rand_start_min : rand_start_min + self.cut_off_size, rand_start_max : rand_start_max + self.cut_off_size]
         elif max_hw > self.cut_off_size:
             rand_start = round(np.random.uniform(0, max_hw - min_hw - 1))
             if img.shape[0] == max_hw :
                 img = img[rand_start : rand_start + min_hw, :]
                 label = label[rand_start : rand_start + min_hw, :]
             else :
                 img = img[:, rand_start : rand_start + min_hw]
                 label = label[:, rand_start : rand_start + min_hw]
     reshaped_mean = self.mean.reshape(1, 1, 3)
     img = img - reshaped_mean
     img = np.swapaxes(img, 0, 2)
     img = np.swapaxes(img, 1, 2)  # (c, h, w)
     img = np.expand_dims(img, axis=0)  # (1, c, h, w)
     label = np.array(label)  # (h, w)
     label = np.expand_dims(label, axis=0)  # (1, h, w)
     return (img, label)
Exemplo n.º 22
0
def prob(samples, data, rho_D_M, d_distr_samples, d_Tree=None): 
    r"""
    
    Calculates :math:`P_{\Lambda}(\mathcal{V}_{\lambda_{samples}})`, the
    probability assoicated with a set of voronoi cells defined by the model
    solves at :math:`(\lambda_{samples})` where the volumes of these voronoi
    cells are assumed to be equal under the MC assumption.

    :param samples: The samples in parameter space for which the model was run.
    :type samples: :class:`~numpy.ndarray` of shape (num_samples, ndim)
    :param data: The data from running the model given the samples.
    :type data: :class:`~numpy.ndarray` of size (num_samples, mdim)
    :param rho_D_M: The simple function approximation of rho_D
    :type rho_D_M: :class:`~numpy.ndarray` of shape  (M,) 
    :param d_distr_samples: The samples in the data space that define a
        parition of D to for the simple function approximation
    :type d_distr_samples: :class:`~numpy.ndarray` of shape  (M, mdim) 
    :param d_Tree: :class:`~scipy.spatial.KDTree` for d_distr_samples
    :rtype: tuple of :class:`~numpy.ndarray` of sizes (num_samples,),
        (num_samples,), (ndim, num_l_emulate), (num_samples,), (num_l_emulate,)
    :returns: (P, lam_vol, io_ptr) where P is the
        probability associated with samples, and lam_vol the volumes associated
        with the samples, io_ptr a pointer from data to M bins.

    """
    if len(samples.shape) == 1:
        samples = np.expand_dims(samples, axis=1) 
    if len(data.shape) == 1:
        data = np.expand_dims(data, axis=1) 
    if len(d_distr_samples.shape) == 1:
        d_distr_samples = np.expand_dims(d_distr_samples, axis=1)
    if type(d_Tree) == type(None):
        d_Tree = spatial.KDTree(d_distr_samples)

    # Set up local arrays for parallelism
    local_index = range(0+comm.rank, samples.shape[0], comm.size)
    samples_local = samples[local_index, :]
    data_local = data[local_index, :]
    local_array = np.array(local_index, dtype='int64')
        
    # Determine which inputs go to which M bins using the QoI
    (_, io_ptr) = d_Tree.query(data_local)

    # Apply the standard MC approximation and
    # calculate probabilities
    P_local = np.zeros((samples_local.shape[0],))
    for i in range(rho_D_M.shape[0]):
        Itemp = np.equal(io_ptr, i)
        Itemp_sum = np.sum(Itemp)
        Itemp_sum = comm.allreduce(Itemp_sum, op=MPI.SUM)
        if Itemp_sum > 0:
            P_local[Itemp] = rho_D_M[i]/Itemp_sum 
    P_global = util.get_global_values(P_local)
    global_index = util.get_global_values(local_array)
    P = np.zeros(P_global.shape)
    P[global_index] = P_global[:]

    lam_vol = (1.0/float(samples.shape[0]))*np.ones((samples.shape[0],))

    return (P, lam_vol, io_ptr)
Exemplo n.º 23
0
    def _perform_clip(self, _filtered_data, axis=None):
        """
        Perform sigma clip by comparing the data to the minimum and
        maximum values (median + sig * standard deviation). Use
        sigma_lower and sigma_upper to get the correct limits. Data
        values less or greater than the minimum / maximum values
        will have True set in the mask array.
        """

        if _filtered_data.size == 0:
            return _filtered_data

        max_value = self.cenfunc(_filtered_data, axis=axis)
        std = self.stdfunc(_filtered_data, axis=axis)
        min_value = max_value - std * self.sigma_lower
        max_value += std * self.sigma_upper

        if axis is not None:
            if axis != 0:
                min_value = np.expand_dims(min_value, axis=axis)
                max_value = np.expand_dims(max_value, axis=axis)
        if max_value is np.ma.masked:
            max_value = np.ma.MaskedArray(np.nan, mask=True)
            min_value = np.ma.MaskedArray(np.nan, mask=True)

        _filtered_data.mask |= _filtered_data > max_value
        _filtered_data.mask |= _filtered_data < min_value

        return _filtered_data
Exemplo n.º 24
0
def compute_overlap(a, b):
    """
    Code originally from https://github.com/rbgirshick/py-faster-rcnn.
    Parameters
    ----------
    a: (N, 4) ndarray of float
    b: (K, 4) ndarray of float
    Returns
    -------
    overlaps: (N, K) ndarray of overlap between boxes and query_boxes
    """
    area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])

    iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0])
    ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1])

    iw = np.maximum(iw, 0)
    ih = np.maximum(ih, 0)

    ua = np.expand_dims((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1) + area - iw * ih

    ua = np.maximum(ua, np.finfo(float).eps)

    intersection = iw * ih

    return intersection / ua  
Exemplo n.º 25
0
def get_inception_score(images, splits=10):
  assert(type(images) == list)
  assert(type(images[0]) == np.ndarray)
  assert(len(images[0].shape) == 3)
  assert(np.max(images[0]) > 10)
  assert(np.min(images[0]) >= 0.0)
  inps = []
  for img in images:
    img = img.astype(np.float32)
    inps.append(np.expand_dims(img, 0))
  bs = 100
  with tf.Session() as sess:
    preds = []
    n_batches = int(math.ceil(float(len(inps)) / float(bs)))
    for i in range(n_batches):
        # sys.stdout.write(".")
        # sys.stdout.flush()
        inp = inps[(i * bs):min((i + 1) * bs, len(inps))]
        inp = np.concatenate(inp, 0)
        pred = sess.run(softmax, {'ExpandDims:0': inp})
        preds.append(pred)
    preds = np.concatenate(preds, 0)
    scores = []
    for i in range(splits):
      part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
      kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
      kl = np.mean(np.sum(kl, 1))
      scores.append(np.exp(kl))
    return np.mean(scores), np.std(scores)
Exemplo n.º 26
0
def np_matrix_to_tf_sparse(np_matrix, row_slices=None,
                           col_slices=None, transpose=False,
                           shuffle=False):
  """Simple util to slice non-zero np matrix elements as tf.SparseTensor."""
  indices = np.nonzero(np_matrix)

  # Only allow slices of whole rows or whole columns.
  assert not (row_slices is not None and col_slices is not None)

  if row_slices is not None:
    selected_ind = np.concatenate(
        [np.where(indices[0] == r)[0] for r in row_slices], 0)
    indices = (indices[0][selected_ind], indices[1][selected_ind])

  if col_slices is not None:
    selected_ind = np.concatenate(
        [np.where(indices[1] == c)[0] for c in col_slices], 0)
    indices = (indices[0][selected_ind], indices[1][selected_ind])

  if shuffle:
    shuffled_ind = [x for x in range(len(indices[0]))]
    random.shuffle(shuffled_ind)
    indices = (indices[0][shuffled_ind], indices[1][shuffled_ind])

  ind = (np.concatenate(
      (np.expand_dims(indices[1], 1),
       np.expand_dims(indices[0], 1)), 1).astype(np.int64) if transpose else
         np.concatenate((np.expand_dims(indices[0], 1),
                         np.expand_dims(indices[1], 1)), 1).astype(np.int64))
  val = np_matrix[indices].astype(np.float32)
  shape = (np.array(
      [max(indices[1]) + 1, max(indices[0]) + 1]).astype(np.int64) if transpose
           else np.array(
               [max(indices[0]) + 1, max(indices[1]) + 1]).astype(np.int64))
  return tf.SparseTensor(ind, val, shape)
def tiff_sino_to_image_slice(tiffdir,slice_ind):
    """
    Convert TIFF of sinograms and process to horizontal slices of sinograms.
    Assume structure of files from Octopus, and that files are numbered in
    uniform order
    Need tiffdir to be the path to the director of files, ending with "/"
    slice_ind is a 2-array for the first and last slice (as a fraction of the
    whole list) to be reconstructed. slice_ind[0]=slice_ind[1] is permitted.
    """
    import glob
    from PIL import Image

    files = glob.glob(tiffdir+'*.tif')
    #Read in data
    index=(np.round(slice_ind*len(files))).astype('int')
    if slice_ind[0]==slice_ind[1]:
        files = files[index]
    else:
        files = files[index[0]:index[1]]
    sinos = np.expand_dims(np.array(Image.open(files[0])),2)
    if len(files)>1:
        for i in range(len(files)-1):
            sinos = np.concatenate((sinos,np.expand_dims(np.array
                (Image.open(files[0])),2)),2)
    sinos = np.transpose(sinos,(1,0,2))
    return sinos
Exemplo n.º 28
0
def extract_imfeats( hdf5name, network ):

    # Image files
    hdf5file=h5py.File(hdf5name)

    # Final output of neural network
    imfeatures = np.zeros( (0,4096) )

    # Loop through all the images in the HDF5 file
    for imname in hdf5file.keys():
        img = 1.0 - hdf5file[imname].value /255.0 
        shards = np.zeros( (0, 1, 56, 56) )

        # Collect the inputs for the image
        for shard in StepShingler(img, hstep=30, vstep=30, shingle_size=(56,56)):    
            shard = np.expand_dims(np.expand_dims(shard, 0),0)
            shards = np.concatenate( (shards, shard) )
        print "Loaded %d shards in and predicting on image %s" %(len(shards), imname)
        sys.stdout.flush()

        # Predict the neural network and append the mean of features to overall imfeatures
        features = network.predict( shards, verbose=1 )
        imfeatures = np.concatenate( (imfeatures, np.expand_dims(features.mean(axis=0),0)) )
        
    return imfeatures
Exemplo n.º 29
0
def ReadSegmentFlow(path, offsets, new_height, new_width, new_length, is_color, name_pattern):
    if is_color:
        cv_read_flag = cv2.IMREAD_COLOR         # > 0
    else:
        cv_read_flag = cv2.IMREAD_GRAYSCALE     # = 0
    interpolation = cv2.INTER_LINEAR

    sampled_list = []
    for offset_id in range(len(offsets)):
        offset = offsets[offset_id]
        for length_id in range(1, new_length+1):
            frame_name_x = name_pattern % ("x", length_id + offset)
            frame_path_x = path + "/" + frame_name_x
            cv_img_origin_x = cv2.imread(frame_path_x, cv_read_flag)
            frame_name_y = name_pattern % ("y", length_id + offset)
            frame_path_y = path + "/" + frame_name_y
            cv_img_origin_y = cv2.imread(frame_path_y, cv_read_flag)
            if cv_img_origin_x is None or cv_img_origin_y is None:
               print("Could not load file %s or %s" % (frame_path_x, frame_path_y))
               sys.exit()
               # TODO: error handling here
            if new_width > 0 and new_height > 0:
                cv_img_x = cv2.resize(cv_img_origin_x, (new_width, new_height), interpolation)
                cv_img_y = cv2.resize(cv_img_origin_y, (new_width, new_height), interpolation)
            else:
                cv_img_x = cv_img_origin_x
                cv_img_y = cv_img_origin_y
            sampled_list.append(np.expand_dims(cv_img_x, 2))
            sampled_list.append(np.expand_dims(cv_img_y, 2))

    clip_input = np.concatenate(sampled_list, axis=2)
    return clip_input
Exemplo n.º 30
0
  def testParams(self):
    """Tests that the params work as intended."""
    num_classes = 2
    with self.test_session() as sess:
      # Experiment 1. Update weights only.
      data = constant_op.constant(self.data, dtype=dtypes.float32)
      gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
                                      [[3.0, 3.0], [0.0, 0.0]], 'w')
      training_ops = gmm_tool.training_ops()
      variables.global_variables_initializer().run()
      sess.run(gmm_tool.init_ops())
      for _ in xrange(self.iterations):
        sess.run(training_ops)

      # Only the probability to each class is updated.
      alphas = sess.run(gmm_tool.alphas())
      self.assertGreater(alphas[1], 0.6)
      means = sess.run(gmm_tool.clusters())
      np.testing.assert_almost_equal(
          np.expand_dims([[3.0, 3.0], [0.0, 0.0]], 1), means)
      covs = sess.run(gmm_tool.covariances())
      np.testing.assert_almost_equal(covs[0], covs[1])

      # Experiment 2. Update means and covariances.
      gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
                                      [[3.0, 3.0], [0.0, 0.0]], 'mc')
      training_ops = gmm_tool.training_ops()
      variables.global_variables_initializer().run()
      sess.run(gmm_tool.init_ops())
      for _ in xrange(self.iterations):
        sess.run(training_ops)
      alphas = sess.run(gmm_tool.alphas())
      self.assertAlmostEqual(alphas[0], alphas[1])
      means = sess.run(gmm_tool.clusters())
      np.testing.assert_almost_equal(
          np.expand_dims([[2.0, 2.0], [-1.0, -1.0]], 1), means, decimal=1)
      covs = sess.run(gmm_tool.covariances())
      np.testing.assert_almost_equal(
          [[0.371111, -0.0050774], [-0.0050774, 0.8651744]], covs[0], decimal=4)
      np.testing.assert_almost_equal(
          [[0.146976, 0.0259463], [0.0259463, 0.2543971]], covs[1], decimal=4)

      # Experiment 3. Update covariances only.
      gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
                                      [[-1.0, -1.0], [1.0, 1.0]], 'c')
      training_ops = gmm_tool.training_ops()
      variables.global_variables_initializer().run()
      sess.run(gmm_tool.init_ops())
      for _ in xrange(self.iterations):
        sess.run(training_ops)
      alphas = sess.run(gmm_tool.alphas())
      self.assertAlmostEqual(alphas[0], alphas[1])
      means = sess.run(gmm_tool.clusters())
      np.testing.assert_almost_equal(
          np.expand_dims([[-1.0, -1.0], [1.0, 1.0]], 1), means)
      covs = sess.run(gmm_tool.covariances())
      np.testing.assert_almost_equal(
          [[0.1299582, 0.0435872], [0.0435872, 0.2558578]], covs[0], decimal=5)
      np.testing.assert_almost_equal(
          [[3.195385, 2.6989155], [2.6989155, 3.3881593]], covs[1], decimal=5)
Exemplo n.º 31
0
plt.plot(hist.history['val_loss'],label = 'val_loss')
plt.legend()

model.save('faces_model_1')

dir = '/content/drive/My Drive/tom-hardy-actor-attends-mad-max-fury-road-photocall-th-annual-cannes-film-festival-may-cannes-france-58038178.jpg'
from keras_preprocessing import image 
img = image.load_img(dir,
      target_size=(224,224))

img = np.asarray(img)
plt.imshow(img)

training_set.class_indices

img = np.expand_dims(img,axis=0)
from keras.models import load_model
saved_model = load_model('faces_model_1')
output = saved_model.predict(img)
print(output)
print(np.argmax(output))

import cv2

# loading cascades
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')

def face_extractor(img):
  # fuction detects the faces and returns the cropped face
  # if no face detected , it returns the input image 
Exemplo n.º 32
0
def sparse_vox2ply(grid_indices, vox_size, color_theme=0, colors=None, face=True, name='', ):
    """
    visualize a given vox with sparse coding
    :param grid_indices: the sparse grid indices for each non-empty voxel
    :param vox_size: the vox size
    :param color_theme: int. 0 for constant, 1 for heat map, 2 for individual color
    :param colors: list
    :param face
    :param name: the saving path of visualized results
    :type grid_indices: np.Array
    :type vox_size: list(int)
    :type name: str
    :return: None
    """
    if colors is None:
        colors = [176, 176, 176]

    if color_theme == 0:
        colors = np.array(colors)
        colors = np.expand_dims(colors, axis=0)
        colors = np.repeat(colors, grid_indices.size, axis=0)
    elif color_theme == 1:
        colors = _heat_map_coding(np.array(colors))
    elif color_theme == 2:
        colors = _semantic_label_coding(np.array(colors))

    elif color_theme == 3:
        pass
    else:
        raise NotImplementedError()

    ply_content = open(name + '.ply', 'w')

    # compute center points info
    if face:
        x_pos = (grid_indices % vox_size[2]) + 0.5
        y_pos = (np.floor(grid_indices / vox_size[2]) % vox_size[1]) + 0.5
        z_pos = (np.floor(grid_indices / vox_size[2] / vox_size[1])) + 0.5
        pos = np.concatenate([x_pos, y_pos, z_pos], axis=-1)
        center_points = np.concatenate([pos, colors], axis=-1)
        _write_ply_header(ply_content, center_points.shape[0] * 8, center_points.shape[0] * 6)
        for point in center_points:
            r, g, b = point[3:]
            x, y, z = np.floor(point[:3])
            ply_content.write('%.2f %.2f %.2f %d %d %d\n' % (x, y, z, r, g, b))
            ply_content.write('%.2f %.2f %.2f %d %d %d\n' % (x + 1, y, z, r, g, b))
            ply_content.write('%.2f %.2f %.2f %d %d %d\n' % (x, y + 1, z, r, g, b))
            ply_content.write('%.2f %.2f %.2f %d %d %d\n' % (x + 1, y + 1, z, r, g, b))
            ply_content.write('%.2f %.2f %.2f %d %d %d\n' % (x, y, z + 1, r, g, b))
            ply_content.write('%.2f %.2f %.2f %d %d %d\n' % (x + 1, y, z + 1, r, g, b))
            ply_content.write('%.2f %.2f %.2f %d %d %d\n' % (x, y + 1, z + 1, r, g, b))
            ply_content.write('%.2f %.2f %.2f %d %d %d\n' % (x + 1, y + 1, z + 1, r, g, b))
        for idx in range(center_points.shape[0]):
            base_idx = idx * 8
            ply_content.write('4 %d %d %d %d\n' % (base_idx + 0, base_idx + 1, base_idx + 3, base_idx + 2))
            ply_content.write('4 %d %d %d %d\n' % (base_idx + 0, base_idx + 4, base_idx + 6, base_idx + 2))
            ply_content.write('4 %d %d %d %d\n' % (base_idx + 0, base_idx + 1, base_idx + 5, base_idx + 4))
            ply_content.write('4 %d %d %d %d\n' % (base_idx + 1, base_idx + 5, base_idx + 7, base_idx + 3))
            ply_content.write('4 %d %d %d %d\n' % (base_idx + 2, base_idx + 3, base_idx + 7, base_idx + 6))
            ply_content.write('4 %d %d %d %d\n' % (base_idx + 4, base_idx + 5, base_idx + 7, base_idx + 6))
    else:
        x_pos = (grid_indices % vox_size[2] * 60) / vox_size[2]
        y_pos = (np.floor(grid_indices / vox_size[2]) % vox_size[1] * 36) / vox_size[1]
        z_pos = (np.floor(grid_indices / vox_size[2] / vox_size[1] * 60)) / vox_size[0]
        pos = np.concatenate([x_pos, y_pos, z_pos], axis=-1)
        center_points = np.concatenate([pos, colors], axis=-1)
        _write_ply_header(ply_content, grid_indices.shape[0])
        for point in center_points:
            ply_content.write('%.2f %.2f %.2f %d %d %d\n' % tuple(point.tolist()))
    ply_content.close()
Exemplo n.º 33
0
def _numpy_exp_quad_matrix(amplitude, length_scale, x, feature_ndims):
    return _numpy_exp_quad(amplitude, length_scale,
                           np.expand_dims(x, -feature_ndims - 2),
                           np.expand_dims(x, -feature_ndims - 1),
                           feature_ndims)
Exemplo n.º 34
0
    def run(self):
        '''
        Runs the model according to the specified settings
        -   If mode = Train: Train a GRU model using the training data
        -   If mode = Val: Load the saved GRU model and evaluate it on the validation fold
        -   If mode = Test: Load the saved GRU model and evaluate it on the blind test set
        '''

        self.is_train = (self.mode == 'Train')

        if not os.path.exists(self.path):
            os.mkdir(self.path)

        # Load the training data
        with open('train_data.pkl', 'rb') as f:
            data_sequences = pkl.load(f)
        with open('train_labels.pkl', 'rb') as f:
            data_labels = pkl.load(f)

        dictionary, reverse_dictionary, data_lengths, self.max_seq_len, enc_sequences = build_dictionary(
            data_sequences)
        self.dictionary = sorted(dictionary.items(),
                                 key=operator.itemgetter(1))
        print(self.dictionary)
        self.vocabulary_size = len(dictionary)
        self.val_size = len(data_sequences) // self.folds
        fold = self.mask
        print('Training fold number %d. Each fold of size %d' %
              (fold, len(data_sequences) // self.folds))

        # Truncates sequences at length 2000 and returns descriptive statistics.
        # This is done by concatenating the first 1900 and the last 100 amino acids.

        if self.is_train:
            self.max_seq_len = 2000
            original_lengths = copy(data_lengths)

            data_sequences = enc_sequences[:, :self.max_seq_len]
            for i in range(len(data_lengths)):
                if data_lengths[i] > self.max_seq_len:
                    data_sequences[i] = np.concatenate(
                        (enc_sequences[i, :self.max_seq_len - 100],
                         enc_sequences[i, -100:]),
                        axis=0)
                    data_lengths[i] = self.max_seq_len

            if self.folds == 1:
                val_mask = np.array([False])
            else:
                val_mask = np.arange(self.val_size * (fold - 1),
                                     self.val_size * (fold))

            # Use seed to ensure same randomisation is applied for each fold
            np.random.seed(4)
            perm = np.random.permutation(len(data_sequences))
            data_labels = np.array(data_labels)

            data_sequences = data_sequences[perm]
            data_labels = data_labels[perm]
            data_lenghts = data_lengths[perm]
            original_lengths = original_lengths[perm]

            self.val_data = data_sequences[val_mask]
            self.val_labels = data_labels[val_mask]
            self.val_lengths = data_lengths[val_mask]
            self.val_original_lengths = original_lengths[val_mask]

            self.train_data = np.delete(data_sequences, val_mask, axis=0)
            self.train_labels = np.delete(data_labels, val_mask, axis=0)
            self.train_lengths = np.delete(data_lengths, val_mask, axis=0)
            self.train_original_lengths = np.delete(original_lengths,
                                                    val_mask,
                                                    axis=0)

            self.train_statistics, self.train_frame = self.summary_stats(
                self.train_lengths, self.train_labels, 'train')
            if self.folds == 1:
                self.val_statistics = np.array([])
                self.val_frame = np.array([])
                self.val_original_lengths = np.array([])
            else:
                self.val_statistics, self.val_frame = self.summary_stats(
                    self.val_lengths, self.val_labels, 'validation')

            this_data = [
                self.train_data, self.train_labels, self.train_lengths,
                self.val_data, self.val_labels, self.val_lengths,
                self.train_statistics, self.train_frame, self.val_statistics,
                self.val_frame, self.train_original_lengths,
                self.val_original_lengths
            ]

            with open(self.path + 'this_data.pkl', 'wb') as f:
                pkl.dump(this_data, f)

        else:
            with open(self.path + 'this_data.pkl', 'rb') as f:
                self.train_data, self.train_labels, self.train_lengths, self.val_data, self.val_labels, self.val_lengths, self.train_statistics, self.train_frame, self.val_statistics, self.val_frame, self.train_original_lengths, self.val_original_lengths = pkl.load(
                    f)

        # Now construct the Tensorflow graph
        print('\r~~~~~~~ Building model ~~~~~~~\r')

        # Define placeholders and variables
        initializer = tf.random_normal_initializer()
        self.word_embeddings = tf.get_variable(
            'embeddings', [self.vocabulary_size, self.embedding_size],
            tf.float32,
            initializer=initializer)
        sequences = tf.placeholder(tf.int32, [None, None], "sequences")
        sequences_lengths = tf.placeholder(tf.int32, [None],
                                           "sequences_lengths")
        labels = tf.placeholder(tf.int64, [None], "labels")
        keep_prob_dropout = tf.placeholder(tf.float32, name='dropout')
        global_step = tf.Variable(0, name='global_step', trainable=False)

        # Embed and encode sequences
        sequences_embedded = self.embed_data(sequences)
        encoded_sequences = self.encoder(sequences_embedded,
                                         sequences_lengths,
                                         keep_prob_dropout,
                                         bidirectional=self.bidirectional)

        # Take last hidden state of GRU and put them through a nonlinear and a linear FC layer
        with tf.name_scope('non_linear_layer'):
            encoded_sentences_BN = self.batch_norm_wrapper(
                encoded_sequences, self.is_train)
            non_linear = tf.nn.dropout(tf.nn.relu(
                tf.contrib.layers.linear(encoded_sentences_BN, 64)),
                                       keep_prob=keep_prob_dropout)

        with tf.name_scope('final_layer'):
            non_linear_BN = self.batch_norm_wrapper(non_linear, self.is_train)
            logits = tf.contrib.layers.linear(non_linear_BN, 4)

        # Compute mean loss on this batch, consisting of cross entropy loss and L2 loss
        CE_loss = self.get_CE_loss(labels, logits)
        L2_loss = self.get_L2_loss()
        loss = CE_loss + L2_loss

        # Perform training operation
        learning_rate = tf.train.exponential_decay(self.learning_rate,
                                                   global_step,
                                                   100,
                                                   0.96,
                                                   staircase=True)
        opt_op = tf.contrib.layers.optimize_loss(loss=loss,
                                                 global_step=global_step,
                                                 learning_rate=learning_rate,
                                                 optimizer='Adam',
                                                 clip_gradients=2.0,
                                                 learning_rate_decay_fn=None,
                                                 summaries=None)

        # Define scalars for Tensorboard
        tf.summary.scalar('CE_loss', CE_loss)
        tf.summary.scalar('L2_loss', L2_loss)
        tf.summary.scalar('loss', loss)
        tf.summary.scalar('learning_rate', learning_rate)

        # Compute accuracy of prediction
        probs = tf.nn.softmax(logits)
        with tf.name_scope('accuracy'):
            pred = tf.argmax(logits, 1)
            correct_prediction = tf.equal(labels, pred)
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            tf.summary.scalar('accuracy', accuracy)

        # If in training mode:
        # - shuffle data set before each epoch
        # - train model using mini batches
        # - track performance on train and validation set throughout training

        if self.is_train == True:
            with tf.Session() as session:
                train_loss_writer = tf.summary.FileWriter(
                    str(self.path + 'tensorboard/train_loss'), session.graph)
                train_summary_writer = tf.summary.FileWriter(
                    str(self.path + 'tensorboard/train_summary'),
                    session.graph)
                val_summary_writer = tf.summary.FileWriter(
                    str(self.path + 'tensorboard/val_summary'), session.graph)

                # Use the same LOG_DIR where you stored your checkpoint.
                embedding_writer = tf.summary.FileWriter(
                    str(self.path + 'tensorboard/'), session.graph)

                config = projector.ProjectorConfig()
                embedding = config.embeddings.add()
                embedding.tensor_name = self.word_embeddings.name
                # Link this tensor to its metadata file (e.g. labels).
                embedding.metadata_path = os.path.join('./metadata.tsv')

                # Saves a configuration file that TensorBoard will read during startup.
                projector.visualize_embeddings(embedding_writer, config)

                merged = tf.summary.merge_all()
                print('\r~~~~~~~ Initializing variables ~~~~~~~\r')
                tf.global_variables_initializer().run()

                start_time = time.time()
                min_train_loss = np.inf
                batch_times = []
                n = self.train_data.shape[0]
                print('\r~~~~~~~ Starting training ~~~~~~~\r')
                try:
                    train_summaryIndex = -1

                    for epoch in range(self.num_epochs):
                        self.is_train = True
                        epoch_time = time.time()
                        print('----- Epoch', epoch, '-----')
                        print('Shuffling dataset')

                        perm = np.random.permutation(len(self.train_data))
                        self.train_data_perm = self.train_data[perm]
                        self.train_labels_perm = self.train_labels[perm]
                        self.train_lengths_perm = self.train_lengths[perm]

                        total_loss = 0

                        for i in range(n // self.batch_size):
                            batch_start = time.time()
                            batch_data = self.train_data_perm[i *
                                                              self.batch_size:
                                                              (i + 1) *
                                                              self.batch_size]
                            batch_lengths = self.train_lengths_perm[
                                i * self.batch_size:(i + 1) * self.batch_size]
                            batch_labels = self.train_labels_perm[
                                i * self.batch_size:(i + 1) * self.batch_size]

                            train_dict = {
                                sequences: batch_data,
                                sequences_lengths: batch_lengths,
                                labels: batch_labels,
                                keep_prob_dropout: self.keep_prob_dropout
                            }

                            _, batch_loss, batch_accuracy, batch_summary = session.run(
                                [opt_op, loss, accuracy, merged],
                                feed_dict=train_dict)
                            total_loss += batch_loss
                            batch_times.append(time.time() - batch_start)

                            train_loss_writer.add_summary(
                                batch_summary,
                                i + (n // self.batch_size) * epoch)

                            if i % 10 == 0 and i > 0:
                                # Print loss every 10 batches
                                time_per_epoch = np.mean(batch_times) * (
                                    n // self.batch_size)
                                remaining_time = int(time_per_epoch -
                                                     time.time() + epoch_time)
                                string_out = '\rEnd of batch ' + str(
                                    i) + '    Train loss:   ' + str(
                                        total_loss / (i * self.batch_size)
                                    ) + '    Accuracy:   ' + str(
                                        batch_accuracy)
                                string_out += '  Elapsed training time : ' + str(
                                    int(time.time() - start_time)) + "s, "
                                string_out += str(
                                    remaining_time
                                ) + "s remaining for this epoch"
                                string_out += '  (' + str(
                                    time_per_epoch * 100 / 60 // 1 /
                                    100) + ' min/epoch)'
                                stdout.write(string_out)

                        # Train accuracy
                        train_dict = {
                            sequences: self.train_data_perm[:1000],
                            sequences_lengths: self.train_lengths_perm[:1000],
                            labels: self.train_labels_perm[:1000],
                            keep_prob_dropout: 1.0
                        }

                        train_summary, train_loss, train_accuracy = session.run(
                            [merged, loss, accuracy], feed_dict=train_dict)
                        train_summary_writer.add_summary(train_summary, epoch)
                        print('\nEpoch train loss: ', train_loss,
                              'Epoch train accuracy: ', train_accuracy)

                        # Val accuracy
                        val_dict = {
                            sequences: self.val_data,
                            sequences_lengths: self.val_lengths,
                            labels: self.val_labels,
                            keep_prob_dropout: 1.0
                        }
                        val_summary, val_loss, val_accuracy = session.run(
                            [merged, loss, accuracy], feed_dict=val_dict)
                        val_summary_writer.add_summary(val_summary, epoch)
                        print('\nEpoch val loss: ', val_loss,
                              'Epoch val accuracy: ', val_accuracy)

                        self.save_model(session, epoch)

                        saver = tf.train.Saver(
                            write_version=tf.train.SaverDef.V2)
                        saver.save(
                            session,
                            os.path.join(self.path + '/tensorboard/',
                                         'model.ckpt'))

                except KeyboardInterrupt:
                    save = input('save?')
                    if 'y' in save:
                        self.save_model(session, epoch)

        # If in validation mode:
        # - Load saved model and evaluate on validation fold
        # - Return list containing confusion matrices, and accuracy measures such as FPR and TPR

        elif self.mode == 'Val':
            with tf.Session() as session:
                print('Restoring model...')
                saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
                saver.restore(session, self.path + 'tensorboard/model.ckpt')
                print('Model restored!')

                val_dict = {
                    sequences: self.val_data,
                    sequences_lengths: self.val_lengths,
                    labels: self.val_labels,
                    keep_prob_dropout: 1.0
                }

                self.val_pred, self.val_accuracy, self.val_probs = session.run(
                    [pred, accuracy, probs], feed_dict=val_dict)

                _ = self.summary_stats(self.val_lengths, self.val_labels,
                                       'val')

                print('\nConfusion matrix (all sequence lengths):')
                val_confusion_1 = self.confusion(
                    gold=self.val_labels,
                    prediction=self.val_pred,
                    lengths=self.val_original_lengths,
                    min_length=0,
                    max_length=np.inf)
                print(val_confusion_1)

                print('\nConfusion matrix (sequence length < 2000):')
                val_confusion_2 = self.confusion(
                    gold=self.val_labels,
                    prediction=self.val_pred,
                    lengths=self.val_original_lengths,
                    min_length=0,
                    max_length=2000)
                print(val_confusion_2)

                print('\nConfusion matrix (sequence length > 2000):')
                val_confusion_3 = self.confusion(
                    gold=self.val_labels,
                    prediction=self.val_pred,
                    lengths=self.val_original_lengths,
                    min_length=2000,
                    max_length=np.inf)
                print(val_confusion_3)

                print('\n Val accuracy:', self.val_accuracy)
                print(
                    '\n Val accuracy when length <2000:',
                    np.sum((self.val_pred == self.val_labels) *
                           (self.val_original_lengths <= 2000)) /
                    np.sum(self.val_original_lengths <= 2000))
                print(
                    '\n Val accuracy when length >2000:',
                    np.sum((self.val_pred == self.val_labels) *
                           (self.val_original_lengths > 2000)) /
                    np.sum(self.val_original_lengths > 2000))

                this_sum = np.zeros([3, 5])
                this_auc = np.zeros([1, 5])
                this_TPR = []
                this_FPR = []

                total_tp = 0
                total_fp = 0
                total_fn = 0
                total_tn = 0

                for i in range(4):
                    tp = np.sum((self.val_labels == i) * (self.val_pred == i))
                    fp = np.sum((self.val_labels != i) * (self.val_pred == i))
                    fn = np.sum((self.val_labels == i) * (self.val_pred != i))
                    tn = np.sum((self.val_labels != i) * (self.val_pred != i))

                    total_tp += tp
                    total_fp += fp
                    total_fn += fn
                    total_tn += tn
                    prec = tp / (tp + fp) if (tp + fp) > 0 else 0.0
                    recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0
                    f1 = 2 * prec * recall / (
                        prec + recall) if prec * recall > 0 else 0.0
                    this_sum[:, i] = np.array([prec, recall, f1])
                    this_auc[:, i] = roc_auc_score(self.val_labels == i,
                                                   self.val_pred == i)
                    if i < 4:
                        this_FPR.append(
                            roc_curve(self.val_labels == i,
                                      self.val_probs[:, i])[0])
                        this_TPR.append(
                            roc_curve(self.val_labels == i,
                                      self.val_probs[:, i])[1])

                prec = total_tp / (total_tp + total_fp) if (
                    total_tp + total_fp) > 0 else 0.0
                recall = total_tp / (total_tp + total_fn) if (
                    total_tp + total_fn) > 0 else 0.0
                f1 = 2 * prec * recall / (prec +
                                          recall) if prec * recall > 0 else 0.0
                this_sum[:, 4] = np.array([prec, recall, f1])
                this_sum = np.concatenate((this_sum, this_auc), 0)

                self.this_sum = pd.DataFrame(this_sum)
                self.this_sum.index = pd.Index(
                    ['Precision', 'Recall', 'F1', 'AUC'])
                self.this_sum.columns = pd.Index(
                    ['cyto', 'secreted', 'mito', 'nucleus', 'Total'])

                print(self.this_sum)

                if self.is_train == False:
                    return [
                        val_confusion_1, val_confusion_2, val_confusion_3,
                        self.this_sum, this_FPR, this_TPR
                    ]

        # If in test model:
        # - Load saved model and evaluate on test set
        # - Print predicted probabilities for each protein in the test set

        elif self.mode == 'Test':
            with tf.Session() as session:
                print('Restoring model...')
                saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
                saver.restore(session, self.path + 'model.checkpoint')
                print('Model restored!')

                with open('test_data.pkl', 'rb') as f:
                    test_sequences = pkl.load(f)
                with open('test_labels.pkl', 'rb') as f:
                    test_labels = pkl.load(f)

                _, _, data_lengths, _, enc_sequences = build_dictionary(
                    test_sequences, vocab=dictionary)

                test_dict = {
                    sequences: enc_sequences,
                    sequences_lengths: data_lengths,
                    keep_prob_dropout: 1.0
                }

                self.probs, self.pred = session.run([probs, pred],
                                                    feed_dict=test_dict)
                result = pd.DataFrame(
                    np.concatenate((self.probs, np.expand_dims(self.pred, 1)),
                                   1))
                result.columns = pd.Index(
                    ['cyto', 'secreted', 'mito', 'nucleus', 'prediction'])
                print(result)
Exemplo n.º 35
0
def plot_filters_gnomonic(filters,
                          order=10,
                          ind=0,
                          title='Filter {}->{}',
                          graticule=False):
    """Plot all filters in a filterbank in Gnomonic projection."""
    nside = hp.npix2nside(filters.G.N)
    reso = hp.pixelfunc.nside2resol(nside=nside, arcmin=True) * order / 100
    rot = hp.pix2ang(nside=nside, ipix=ind, nest=True, lonlat=True)

    maps = filters.localize(ind, order=order)

    nrows, ncols = filters.n_features_in, filters.n_features_out

    if maps.shape[0] == filters.G.N:
        # FIXME: old signal shape when not using Chebyshev filters.
        shape = (nrows, ncols, filters.G.N)
        maps = maps.T.reshape(shape)
    else:
        if nrows == 1:
            maps = np.expand_dims(maps, 0)
        if ncols == 1:
            maps = np.expand_dims(maps, 1)

    # Plot everything.
    fig, axes = plt.subplots(nrows,
                             ncols,
                             figsize=(8, 8 / ncols * nrows),
                             squeeze=False,
                             sharex='col',
                             sharey='row')
    # turn of axes
    [axi.set_axis_off() for axi in axes.ravel()]

    # handle margins
    if title is None:
        margins = [0.003, 0.003, 0.003, 0.003]
        title = ""
    else:
        margins = [0.015, 0.015, 0.015, 0.015]

    cm = plt.cm.seismic
    cm.set_under('w')
    a = max(abs(maps.min()), maps.max())
    ymin, ymax = -a, a
    for row in range(nrows):
        for col in range(ncols):
            map = maps[row, col, :]
            hp.gnomview(map.flatten(),
                        fig=fig,
                        nest=True,
                        rot=rot,
                        reso=reso,
                        sub=(nrows, ncols, col + row * ncols + 1),
                        title=title.format(row, col),
                        notext=True,
                        min=ymin,
                        max=ymax,
                        cbar=False,
                        cmap=cm,
                        margins=margins)

    fig.suptitle('Gnomoinc view of the {} filters in the filterbank'.format(
        filters.n_filters),
                 fontsize=25,
                 y=1.05)

    if graticule:
        with utils.HiddenPrints():
            hp.graticule(verbose=False)

    return fig
Exemplo n.º 36
0
                        '/kaggle/input/spinedataset/Training/Normal/ID ({})/AP/ap.jpg'
                        .format(i), 0), (224, 224)))
        else:
            trainNormalAP.append(
                cv2.resize(
                    cv2.imread(
                        '/kaggle/input/spinedataset/Training/Normal/ID ({})/AP.jpg'
                        .format(i), 0), (224, 224)))
trainNormalAP = np.array(trainNormalAP)

# In[7]:

trainAP = np.concatenate((trainNormalAP, trainDamagedAP))
del trainNormalAP
del trainDamagedAP
trainAP = np.expand_dims(trainAP, axis=3)

# In[8]:

testAP_path = '/kaggle/input/testdataxrays/TestData(XraysOnly)/Test ({})/AP/AP.jpg'
testAP = []
for i in range(1, 302):
    if i in skip_ids:
        continue
    imagePath = testAP_path.format(i)
    if path.exists(imagePath):
        testAP.append(cv2.resize(cv2.imread(imagePath, 0), (224, 224)))
    else:
        if path.exists(
                '/kaggle/input/testdataxrays/TestData(XraysOnly)/Test ({})/AP/ap.jpg'
                .format(i)):
Exemplo n.º 37
0
#print "layer weights"
##print model.get_layer("conv1").get_weights()
#print "test_generator"
#print test_generator[0]
#print model.predict(test_generator)#, batch_size=len(test_generator[0]))
print model.evaluate_generator(test_generator)
#print model.evaluate(test_generator[0][0], test_generator[0][1], verbose=0)#, batch_size=len(test_generator[0]))
#print outputs
#functor = K.function([inp+[K.learning_phase()]], outputs)
#layer_outs = functor([test_generator[0], 0])
#print layer_outs

#print model.predict(test_generator[0])
#print model.evaluate(test_generator[0][0], test_generator[0][1])

ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path of the image")
args = vars(ap.parse_args())

image_path = args['image']

# Preprocessing our input image
img = image.img_to_array(image.load_img(image_path,
                                        target_size=(128, 128))) / 255.
img = np.expand_dims(img, axis=0)
print img.tolist()
print model.predict(img)

#tensorflow_model_server --model_base_path=/home/ubuntu/export/ --rest_api_port=9000 --model_name=test
#export FLASK_ENV=development && export FLASK_APP=app.py && flask run --host=0.0.0.0
Exemplo n.º 38
0
def normalized(a, axis=-1, order=2):
    l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
    l2[l2==0] = 1
    return np.squeeze(a / np.expand_dims(l2, axis))
Exemplo n.º 39
0
    print("min_depth: " + str(min_depth) + "  max_depth: " + str(max_depth))

    while True:
        
        # get rgb-depth from camera
        rgb_msg = rospy.wait_for_message("/rgb/image_raw", Image)
        depth_msg = rospy.wait_for_message("/depth_to_rgb/image_raw", Image)
        rgb = bridge.imgmsg_to_cv2(rgb_msg,desired_encoding='bgr8')
        depth = bridge.imgmsg_to_cv2(depth_msg, desired_encoding='32FC1')

        # depth normalization (0.25 ~ 1.25 -> 0 ~ 255)
        depth_im = depth
        depth_im[depth_im > max_depth] = max_depth
        depth_im[depth_im < min_depth] = min_depth
        depth_im = np.uint8(255 * (depth_im - min_depth) / (max_depth - min_depth))
        depth_im = np.repeat(np.expand_dims(depth_im, -1), 3, -1)
        rgbd_im = np.hstack([rgb, depth_im])
        rgbd_im = cv2.resize(rgbd_im, (1280, 360))
        cv2.imshow("rgbd - img_id: " + str(img_id) + "   S: Save / Q: Quit", rgbd_im )

        key = cv2.waitKey(1)
        if key == ord('q'):
            break

        elif key == ord('s'):
            print("Saving RGB-Depth, Image ID=" + str(img_id) + "  SAVE_ROOT=" + save_root)
            cv2.imwrite(os.path.join(save_root, "rgb_{}.png".format(img_id)), rgb)
            cv2.imwrite(os.path.join(save_root, "depth_im_{}.png".format(img_id)), depth_im)
            np.save(os.path.join(save_root, "depth_{}.npy".format(img_id)), depth)
            img_id += 1
            cv2.destroyAllWindows()
b = np.array([1, 2, 3, 4], dtype=int)

for x, y in np.nditer([a, b]):
    print(x, y)

# 一 、修改维度的函数
# broadcast_to()将数组广播到新形状
a = np.arange(4).reshape(1, 4)
a  #维数相同,有一个轴长为1
np.broadcast_to(a, (4, 4))

# expand_dims()指定位置插入新轴扩展数组形状

x = np.array([[1, 2], [3, 4]])
x.shape
np.expand_dims(x, axis=0).shape
y = np.expand_dims(x, axis=1)
y.shape
# squeeze()从给定数组形状中,删除长度为1的那个维度。
np.squeeze(y).shape
a = np.arange(3).reshape(1, 3, 1)
np.squeeze(a)
a.size
# 二、数组形状修改的函数
# 1. reshape()参数必须是分解因子
# 2. flat属性
a = np.arange(8).reshape(2, 4)
a
list(a.flat)  #数组上一个迭代器
# [0, 1, 2, 3, 4, 5, 6, 7]
# 3. ndarray.flatten()
Exemplo n.º 41
0
    def __getitem__(self, idx):
        # Load the depth image
        depth_img = self.get_depth(idx)

        # Load the rgb image
        rgb_img = self.get_rgb(idx)

        # Load the grasps
        grs = self.get_gtbb(idx)
        pos_out, width_out, cos_out, sin_out = grs.draw(
            (self.output_size, self.output_size))

        # Expand dim
        depth_img = np.expand_dims(depth_img, axis=0)
        depth_img = np.expand_dims(depth_img, axis=3)
        rgb_img = np.expand_dims(rgb_img, axis=0)
        pos_out = np.expand_dims(pos_out, axis=0)
        pos_out = np.expand_dims(pos_out, axis=3)
        width_out = np.expand_dims(width_out, axis=0)
        width_out = np.expand_dims(width_out, axis=3)
        cos_out = np.expand_dims(cos_out, axis=0)
        cos_out = np.expand_dims(cos_out, axis=3)
        sin_out = np.expand_dims(sin_out, axis=0)
        sin_out = np.expand_dims(sin_out, axis=3)
        #print(sin_out.shape)
        #print(cos_out.shape)

        output = np.concatenate((pos_out, width_out, cos_out, sin_out), axis=3)
        #return depth_img, rgb_img, grs, pos_out
        #return rgb_img, pos_out
        #return rgb_img, [pos_out, width_out]
        return rgb_img, output
Exemplo n.º 42
0
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')

# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')

# Initialize webcam feed
video = cv2.VideoCapture(0)
ret = video.set(3,1280)
ret = video.set(4,720)

while(True):

    # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
    # i.e. a single-column array, where each item in the column has the pixel RGB value
    ret, frame = video.read()
    frame_expanded = np.expand_dims(frame, axis=0)

    # Perform the actual detection by running the model with the image as input
    (boxes, scores, classes, num) = sess.run(
        [detection_boxes, detection_scores, detection_classes, num_detections],
        feed_dict={image_tensor: frame_expanded})

    # Draw the results of the detection (aka 'visulaize the results')
    vis_util.visualize_boxes_and_labels_on_image_array(
        frame,
        np.squeeze(boxes),
        np.squeeze(classes).astype(np.int32),
        np.squeeze(scores),
        category_index,
        use_normalized_coordinates=True,
        line_thickness=8,
Exemplo n.º 43
0
                                    unpack=None,
                                    usemask=False,
                                    loose=True,
                                    invalid_raise=True,
                                    max_rows=None,
                                    encoding='bytes')
 image = misc.imread(csv_path.replace(".csv", ".png")) / 255
 if double_channel:
     image2 = misc.imread(
         csv_path.replace(".csv", ".png").replace(dataset,
                                                  dataset2)) / 255
     image2 = np.mean(cv2.resize(image2,
                                 (image.shape[0], image.shape[1]),
                                 interpolation=cv2.INTER_LINEAR),
                      axis=-1)
     image2 = np.expand_dims(image2, axis=-1)
     image = np.concatenate((image, image2), axis=-1)
 cells_in_image = []
 for cell_string in np.nditer(all_annotation_txt):
     cell_array = np.fromstring(np.array2string(cell_string).strip("'"),
                                dtype=int,
                                sep=",")
     cell_array = np.flip(np.reshape(cell_array, (-1, 2)), 1)
     cell_center = np.round(np.mean(cell_array, axis=0)).astype(int)
     cells_in_image.append(cell_array)
     crop, mask = crop_image(image, cell_center, bounding_box_size,
                             cell_array)
     a_channel_cells = np.vstack(
         (a_channel_cells, np.expand_dims(crop, axis=0)))
     a_channel_masks = np.vstack(
         (a_channel_masks, np.expand_dims(mask, axis=0)))
Exemplo n.º 44
0
  def create_pc_pillars(self, img, img_info, pc_2d, pc_3d, inp_trans, out_trans):
    pillar_wh = np.zeros((2, pc_3d.shape[1]))
    boxes_2d = np.zeros((0,8,2))
    pillar_dim = self.opt.pillar_dims
    v = np.dot(np.eye(3), np.array([1,0,0]))
    ry = -np.arctan2(v[2], v[0])

    for i, center in enumerate(pc_3d[:3,:].T):
      # Create a 3D pillar at pc location for the full-size image
      box_3d = compute_box_3d(dim=pillar_dim, location=center, rotation_y=ry)
      box_2d = project_to_image(box_3d, img_info['calib']).T  # [2x8]        
      
      ## save the box for debug plots
      if self.opt.debug:
        box_2d_img, m = self._transform_pc(box_2d, inp_trans, self.opt.input_w, 
                                            self.opt.input_h, filter_out=False)
        boxes_2d = np.concatenate((boxes_2d, np.expand_dims(box_2d_img.T,0)),0)

      # transform points
      box_2d_t, m = self._transform_pc(box_2d, out_trans, self.opt.output_w, self.opt.output_h)
      
      if box_2d_t.shape[1] <= 1:
        continue

      # get the bounding box in [xyxy] format
      bbox = [np.min(box_2d_t[0,:]), 
              np.min(box_2d_t[1,:]), 
              np.max(box_2d_t[0,:]), 
              np.max(box_2d_t[1,:])] # format: xyxy

      # store height and width of the 2D box
      pillar_wh[0,i] = bbox[2] - bbox[0]
      pillar_wh[1,i] = bbox[3] - bbox[1]

    ## DEBUG #################################################################
    if self.opt.debug:
      img_2d = copy.deepcopy(img)
      # img_3d = copy.deepcopy(img)
      img_2d_inp = cv2.warpAffine(img, inp_trans, 
                        (self.opt.input_w, self.opt.input_h),
                        flags=cv2.INTER_LINEAR)
      img_2d_out = cv2.warpAffine(img, out_trans, 
                        (self.opt.output_w, self.opt.output_h),
                        flags=cv2.INTER_LINEAR)
      img_3d = cv2.warpAffine(img, inp_trans, 
                        (self.opt.input_w, self.opt.input_h),
                        flags=cv2.INTER_LINEAR)
      blank_image = 255*np.ones((self.opt.input_h,self.opt.input_w,3), np.uint8)
      overlay = img_2d_inp.copy()
      output = img_2d_inp.copy()

      pc_inp, _= self._transform_pc(pc_2d, inp_trans, self.opt.input_w, self.opt.input_h)
      pc_out, _= self._transform_pc(pc_2d, out_trans, self.opt.output_w, self.opt.output_h)

      pill_wh_inp = pillar_wh * (self.opt.input_w/self.opt.output_w)
      pill_wh_out = pillar_wh
      pill_wh_ori = pill_wh_inp * 2
      
      for i, p in enumerate(pc_inp[:3,:].T):
        color = int((p[2].tolist()/60.0)*255)
        color = (0,color,0)
        
        rect_tl = (np.min(int(p[0]-pill_wh_inp[0,i]/2), 0), np.min(int(p[1]-pill_wh_inp[1,i]),0))
        rect_br = (np.min(int(p[0]+pill_wh_inp[0,i]/2), 0), int(p[1]))
        cv2.rectangle(img_2d_inp, rect_tl, rect_br, (0, 0, 255), 1, lineType=cv2.LINE_AA)
        img_2d_inp = cv2.circle(img_2d_inp, (int(p[0]), int(p[1])), 3, color, -1)

        ## On original-sized image
        rect_tl_ori = (np.min(int(pc_2d[0,i]-pill_wh_ori[0,i]/2), 0), np.min(int(pc_2d[1,i]-pill_wh_ori[1,i]),0))
        rect_br_ori = (np.min(int(pc_2d[0,i]+pill_wh_ori[0,i]/2), 0), int(pc_2d[1,i]))
        cv2.rectangle(img_2d, rect_tl_ori, rect_br_ori, (0, 0, 255), 2, lineType=cv2.LINE_AA)
        img_2d = cv2.circle(img_2d, (int(pc_2d[0,i]), int(pc_2d[1,i])), 6, color, -1)
        
        p2 = pc_out[:3,i].T
        rect_tl2 = (np.min(int(p2[0]-pill_wh_out[0,i]/2), 0), np.min(int(p2[1]-pill_wh_out[1,i]),0))
        rect_br2 = (np.min(int(p2[0]+pill_wh_out[0,i]/2), 0), int(p2[1]))
        cv2.rectangle(img_2d_out, rect_tl2, rect_br2, (0, 0, 255), 1, lineType=cv2.LINE_AA)
        img_2d_out = cv2.circle(img_2d_out, (int(p[0]), int(p[1])), 3, (255,0,0), -1)
        
        # on blank image
        cv2.rectangle(blank_image, rect_tl, rect_br, color, -1, lineType=cv2.LINE_AA)
        
        # overlay
        alpha = 0.1
        cv2.rectangle(overlay, rect_tl, rect_br, color, -1, lineType=cv2.LINE_AA)
        cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)

        # plot 3d pillars
        img_3d = draw_box_3d(img_3d, boxes_2d[i].astype(np.int32), [114, 159, 207], 
                    same_color=False)

      cv2.imwrite((self.opt.debug_dir+ '/{}pc_pillar_2d_inp.' + self.opt.img_format)\
        .format(self.img_ind), img_2d_inp)
      cv2.imwrite((self.opt.debug_dir+ '/{}pc_pillar_2d_ori.' + self.opt.img_format)\
        .format(self.img_ind), img_2d)
      cv2.imwrite((self.opt.debug_dir+ '/{}pc_pillar_2d_out.' + self.opt.img_format)\
        .format(self.img_ind), img_2d_out)
      cv2.imwrite((self.opt.debug_dir+'/{}pc_pillar_2d_blank.'+ self.opt.img_format)\
        .format(self.img_ind), blank_image)
      cv2.imwrite((self.opt.debug_dir+'/{}pc_pillar_2d_overlay.'+ self.opt.img_format)\
        .format(self.img_ind), output)
      cv2.imwrite((self.opt.debug_dir+'/{}pc_pillar_3d.'+ self.opt.img_format)\
        .format(self.img_ind), img_3d)
      self.img_ind += 1
    ## DEBUG #################################################################
    return pillar_wh
def run(slid_dir):
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size', type=int, default=100, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                    help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                    help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=100, metavar='N',
                    help='how many batches to wait before '
                         'logging training status')
    parser.add_argument('--resume', default='./models/model_detector.pth',
                        help="path to model (to continue training)")
    parser.add_argument('--outname', default='./scores_detector_test',
                        help="path to scores' file")
    
    args = parser.parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)
    
    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
    
    model = Net()
    if args.resume != '':
        model.load_state_dict(torch.load(args.resume))
    print(model)
    
    if args.cuda:
        model.cuda()
    
    def test_eval(epoch):
        model.eval()
        test_loss = 0
        correct = 0
        scores = []
        for batch_idx, (data, target) in enumerate(test_loader):
            if args.cuda:
                data, target = data.cuda(), target.cuda()
            data, target = Variable(data, volatile=True), Variable(target)
            output = model(data)
            pred = output.data.max(1)[1]
            correct += pred.eq(target.data).cpu().sum()
            if batch_idx % args.log_interval == 0:
                print('Eval Patient: {} [{}/{} ({:.0f}%)]'.format(
                    epoch, batch_idx * len(data), len(test_loader.dataset),
                    100. * batch_idx / len(test_loader)))
            scores.extend((output.data).cpu().numpy())
        test_loss = test_loss
        test_loss /= len(test_loader)
        return scores

    files = os.listdir(slid_dir)
    files.sort()
    
    dist_thresh = 10
    all_scores = []
    all_labels = []
    all_images = []
    all_patients = []
    all_cands = []
    idPat = 0
    tot_time = time.time()
    for f in range(len(files)):
        tmp = np.load(str(slid_dir + files[f]))
        slices = tmp
        slices = np.swapaxes(slices, 2, 3)
        slices = np.expand_dims(slices, axis=1)
        slices = slices.astype(np.float32)
        print('\n Patient ' + str(f+1) + '/' + str(len(files)) + ' loaded.')
        labels = np.zeros(len(slices))
        labels = labels.astype(np.int64)
        vdata = torch.from_numpy(slices)
        vlabel = torch.from_numpy(labels)
        testv = data_utils.TensorDataset(vdata, vlabel)
        test_loader = data_utils.DataLoader(testv, batch_size=args.batch_size, shuffle=False)
    
        scores = test_eval(f+1)
    
        all_scores.extend(scores)
        pat_name = str(files[f][:-4])
        all_patients.extend([pat_name]) #patients names
        all_labels.extend(labels) #labels
        all_images.extend((idPat+1)*np.ones(len(scores))) #patient index
        idPat += 1
    
    np.savez(args.outname, all_patients, all_scores, all_labels, all_images)
Exemplo n.º 46
0
    def on_new_messages(self, messages):
        ''' [Optional] Called according to the execution mode of the block.

        Parameters
        ----------
        messages : dict
            A dictionary of the port keys and the values of the incoming messages.

        '''
        with self.graph.as_default():
            image = to_ndarray(messages['image'])
            image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            imH, imW, _ = image.shape
            image_resized = cv2.resize(image_rgb, (self.width, self.height))
            input_data = np.expand_dims(image_resized, axis=0)
            if self.floating_model:
                input_data = (np.float32(input_data) -
                              self.input_mean) / self.input_std

            self.interpreter.set_tensor(
                self.input_details[0]['index'], input_data)

            start_time = time.time()
            self.interpreter.invoke()
            stop_time = time.time()

            boxes = self.interpreter.get_tensor(
                self.output_details[0]['index'])[0]
            classes = self.interpreter.get_tensor(
                self.output_details[1]['index'])[0]
            scores = self.interpreter.get_tensor(
                self.output_details[2]['index'])[0]

            for i in range(len(scores)):
                if ((scores[i] > self.min_conf_threshold) and (scores[i] <= 1.0)):

                    # Get bounding box coordinates and draw box
                    # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
                    ymin = int(max(1, (boxes[i][0] * imH)))
                    xmin = int(max(1, (boxes[i][1] * imW)))
                    ymax = int(min(imH, (boxes[i][2] * imH)))
                    xmax = int(min(imW, (boxes[i][3] * imW)))

                    cv2.rectangle(image, (xmin, ymin),
                                  (xmax, ymax), (10, 255, 0), 2)

                    # Draw label
                    # Look up object name from "labels" array using class index
                    object_name = self.labels[int(classes[i])]
                    label = '%s: %d%%' % (object_name, int(
                        scores[i]*100))  # Example: 'person: 72%'
                    labelSize, baseLine = cv2.getTextSize(
                        label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)  # Get font size
                    # Make sure not to draw label too close to top of window
                    label_ymin = max(ymin, labelSize[1] + 10)
                    # Draw white box to put label text in
                    cv2.rectangle(image, (xmin, label_ymin-labelSize[1]-10), (
                        xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED)
                    cv2.putText(image, label, (xmin, label_ymin-7),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)  # Draw label text
            img_msg = from_ndarray(image, messages['image'].header)
            self.publish("out_img", img_msg)
            print("Detection Took {}".format(stop_time-start_time))
# Count the number of times the actual up days match the predicted up days
num_accurate_predictions = (list(yData == predictions)).count(True)

pctAccuracy = float(num_accurate_predictions) / float(len(predictions))

print("Accuracy: ", pctAccuracy)

# Logistic regression with estimators

import tensorflow as tf

features = [tf.contrib.layers.real_valued_column("x", dimension=1)]

estimator = tf.contrib.learn.LinearClassifier(feature_columns=features)

x = np.expand_dims(xData[:, 0], axis=1)

y = np.expand_dims(np.array(yData), axis=1)

# Batch size of 100 and 10000 epochs
input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x}, y, batch_size=100, num_epochs=10000)

fit = estimator.fit(input_fn=input_fn, steps=10000)

# All data points in a single batch with just one epoch
input_fn_oneshot = tf.contrib.learn.io.numpy_input_fn({"x": x}, y, batch_size=len(x), num_epochs=1)

results = fit.evaluate(input_fn=input_fn_oneshot, steps=1)

print(results)
Exemplo n.º 48
0
    ret, frame = cap.read()
    roi = frame[200:420, 200:420]

    cv2.rectangle(frame, (200, 200), (420, 420), (0, 255, 0), 0)
    if count == 25:
        count = 0
        maxAlpha = max(alpha, key=alpha.get)
        #cv2.putText(frame,maxAlpha, (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
        alpha.clear()

    cv2.putText(frame, maxAlpha, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
                (255, 0, 0), 2, cv2.LINE_AA)
    mask = preprocess(roi)
    maskR = Image.fromarray(mask, mode=None)
    mask1 = maskR.resize((110, 110), Image.ANTIALIAS)
    mask2 = np.expand_dims(mask1, axis=0)
    mask3 = np.expand_dims(mask2, axis=3)
    classes = model.predict_classes(mask3)
    if (classes > 9):
        char = chr(classes - 10 + ord('A'))
        if char in alpha.keys():
            alpha[char] += 1
        else:
            alpha[char] = 1
        #cv2.putText(frame,chr(classes-10+ord('A')), (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
    else:
        char = str(classes)
        if char in alpha.keys():
            alpha[char] += 1
        else:
            alpha[char] = 1
Exemplo n.º 49
0
    def visualize_feature_map(self, model):
        """
        This method visualizes the feature map of the last convolutional layer of the network. 
        """
        # Define path
        img_path = os.path.join("..", "data", "unseen_images",
                                self.unseen_image)

        # Load image with dimensions corresponding to training images
        img = load_img(img_path, target_size=(224, 224))

        # Convert image to array
        x = img_to_array(img)

        # Convert to rank 4 tensor
        x = np.expand_dims(x, axis=0)

        # Preprocess to be in line with ResNet50 data
        x = preprocess_input(x)

        # Create activation heatmap for final layer. This is done by taking advantage of how the model learns through gradient descent. We use the gradients that have been learned through training, and we go the opposite way (rather than minimizing we are maximizing). Essentially, we make use of the gradients in the final layer to highlight which regions are particularly informative when predicting a given class.
        with tf.GradientTape() as tape:

            # Take the last convolutional layer in the network
            last_conv_layer = model.get_layer('conv5_block3_out')

            # Create a model that maps the input image to the activations of the last convolutional layer as well as the output predictions
            iterate = tf.keras.models.Model(
                [model.inputs], [model.output, last_conv_layer.output])

            # Compute the gradient of the top predicted class for the input image with respect to the activations of the last conv layer
            # Take the gradients from the last layer
            model_out, last_conv_layer = iterate(x)

            # Find the class that has been predicted by the model
            class_out = model_out[:, np.argmax(model_out[0])]

            # Extract gradient of the output neuron of the last convolutional layer
            grads = tape.gradient(class_out, last_conv_layer)

            # Vector of mean intensity of the gradient over a specific feature map channel
            pooled_grads = K.mean(grads, axis=(0, 1, 2))

            # Multiply each channel in the feature map array by "how important this channel is" with regard to the top predicted class. Then sum all the channels to obtain the heatmap class activation
            heatmap = tf.reduce_mean(tf.multiply(pooled_grads,
                                                 last_conv_layer),
                                     axis=-1)
            heatmap = np.maximum(heatmap, 0)
            heatmap /= np.max(heatmap)
            heatmap = heatmap.reshape((7, 7))
            plt.matshow(heatmap)

            # Load unseen image with OpenCV
            img = cv2.imread(img_path)

            # Make heatmap semi-transparent
            intensity = 0.5

            # Resize the heatmap to be the original dimensions of the input
            heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))

            # Apply colormap
            heatmap = cv2.applyColorMap(np.uint8(255 * heatmap),
                                        cv2.COLORMAP_JET)

            # Multiply heatmap by intensity and 'add' this on top of the original image
            superimposed = (heatmap * intensity) + img

            # Save the superimposed image to output directory
            cv2.imwrite(
                os.path.join("..", "output",
                             f"{self.unseen_image}_superimposed_heatmap.png"),
                superimposed)

        # User message
        print(
            f"\n[INFO] The feature map has now been visualized and superimposed on {self.unseen_image}. Find image as {self.unseen_image}_superimposed_heatmap.png in 'output' directory..."
        )
Exemplo n.º 50
0
def main():
    #
    print('\nRunnig fold: ' + sys.argv[1])
    kfold_ = int(sys.argv[1]) # only train for one cross-validation fold at a time (this way we can train all folds in parallel)
    print(type(kfold_))

    # load data
    tr_fact = 1 # 1 is 100% data for training

    out_dir = './results/rcnn_merge_time_coch_cval10_brain_hfb/' + \
              'n_back_6_cnnT_300h_cnnF_100h_rnn_300h_alt_alt2_concattest_train' + str(int(tr_fact * 100)) + '/'
    x1_file = './data/M3_audio_mono_down.wav'
    x2_file = './data/minoes_wav_freq_125Hz_abs.npy'
    t_file  = './data/minoes_hfb_6subjs.npy'
    xtr1    = librosa.load(x1_file, sr=8000)[0]
    xtr2    = np.load(x2_file).astype(np.float32)
    ttr     = np.load(t_file).astype(np.float32)
    print('Train data: ' + str(int(tr_fact * 100)) + '%')

    # resample brain and spectrogram data to 50 Hz
    xtr2    = resample(xtr2, sr1=50, sr2=125)
    ttr     = resample(ttr, sr1=50, sr2=125)

    # take a sample in sec
    global sr1, sr2, sr3, n_back
    sr1     = 8000
    sr2     = 50
    sr3     = 50
    nsec    = ttr.shape[0] / float(sr2)
    nsamp   = nsec * 1
    n2      = int(nsamp * sr2)
    n3      = int(nsamp * sr3)
    xtr2    = xtr2[:n2]
    ttr     = ttr[:n3]

    # cut raw audio to match brain data (ttr) length in sec
    n1      = int(nsamp * sr1)
    xtr1    = xtr1[:n1]
    xtr1    = xtr1[:, None]

    # set up cross-validation for performance accuracy: set-up the same way for all folds when folds are trained separately
    kfolds = 10
    nparts = 7 # test set is not a continuous chunk but is a concatenation of nparts fragments for better performance
    ind1 = np.arange(xtr1.shape[0])
    ind2 = np.arange(ttr.shape[0])
    ind3 = np.arange(ttr.shape[0])
    TestI_, TestI = [], []
    kf = KFold(n_splits=kfolds * nparts)

    for (_, ix1_test), (_, ix2_test), (_, it_test) in zip(kf.split(xtr1), kf.split(xtr2), kf.split(ttr)):
        TestI_.append([ix1_test, ix2_test, it_test])

    for kfold in range(kfolds):
        TestI.append([np.array(
            [item for sublist in [TestI_[i][j] for i in range(0 + kfold, kfolds * nparts + kfold, kfolds)] for item in
             sublist])
                      for j in range(len(TestI_[0]))])


    if (out_dir is not None) & (not os.path.exists(out_dir)): os.makedirs(out_dir)
    process = psutil.Process(os.getpid())
    print(process.memory_info().rss / 1024 / 1024 / 1024)

    # standard sklearn preprocessing of data
    scaler = Scaler()
    kfold = kfold_
    ktrain, ktest, _ = scaler([xtr1[np.setdiff1d(ind1, TestI[kfold][0])], xtr2[np.setdiff1d(ind2, TestI[kfold][1])], ttr[np.setdiff1d(ind3, TestI[kfold][2])]],
                              [xtr1[TestI[kfold][0]], xtr2[TestI[kfold][1]], ttr[TestI[kfold][2]]], None)

    nsec_tr    = ktrain[-1].shape[0] / float(sr2)
    nsamp_tr   = nsec_tr * tr_fact
    ktrain = map(lambda x, n: x.copy()[:n], ktrain, [int(nsamp_tr *i) for i in [sr1, sr2, sr3]])
    print(map(len, ktrain))
    print(map(len, ktest))

    # model parameters
    dur     = 1 # sec units
    batch_size = 16
    n_back  = 6 * dur # in dur units, temporal window of input data (how much data the model sees at once)
    nepochs = 30
    n_out   = ttr.shape[-1]
    alpha   = 5e-04
    h_cnn_t   = 300 # number of hidden units on top layer of CNN time
    h_cnn_f   = 100 # number of hidden units on top layer of CNN freq/spectra
    h_rnn   = 300 # number of hidden units of RNN

    print('batch size: ' + str(batch_size) + ', nepochs: ' + str(nepochs) + ', lr: ' + str(alpha) +
                            ', h_cnn_t: ' + str(h_cnn_t) + ', h_cnn_f: ' + str(h_cnn_f) + ', h_rnn: ' + str(h_rnn))
    print('outdir: ' + out_dir)

    # set up model
    rcnn = RCNN(h_cnn_t, h_cnn_f, h_rnn, n_out)
    opt = chainer.optimizers.Adam(alpha)
    opt.setup(rcnn)
	
    with open(out_dir + 'fold' + str(kfold) + '_run.log', 'wb'): pass # running epoch and best performance are saved to txt file for bookkeeping
    with open(out_dir + 'fold' + str(kfold) + '_epoch.txt', 'wb'): pass

    # train loop
    best_acc = -1
    for epoch in range(nepochs):
        print('Epoch ' + str(epoch))
        with open(out_dir + 'fold' + str(kfold) + '_run.log', 'a') as fid0:
            fid0.write('epoch' + str(epoch) + '\n')
        rcnn.reset_state()
        x1, x2, t = roll_data(ktrain, [.14 * epoch * sr for sr in [sr1, sr2, sr3]])
        x1, x2, t = prepare_input([x1, x2, t], [sr1, sr2, sr3], n_back)
        xbs1, xbs2, tbs = get_batches([x1, x2, t], batch_size)
        print(process.memory_info().rss / 1024 / 1024 / 1024)

        for ib, (xb1, xb2, tb) in enumerate(zip(xbs1, xbs2, tbs)):
            with chainer.using_config('train', True):
                y = rcnn([np.expand_dims(xb1, 1), np.expand_dims(xb2, 1)], n_back)
                loss = 0
                for ni in range(y.shape[1]):
                    loss += F.mean_squared_error(tb[:, ni, :], y[:, ni, :])
                r = acc_pass(tb.reshape((-1, n_out)), y.data.reshape((-1, n_out)))
                print('\t\tbatch ' + str(ib) + ', train loss: ' + str(loss.data / tb.shape[1]) + ', max acc: ' + str(np.max(r)))
                rcnn.cleargrads()
                loss.backward()
                loss.unchain_backward()
                opt.update()

        xb1_, xb2_, tb_ = prepare_input(ktest, [sr1, sr2, sr3], n_back)
        rcnn.reset_state()
        with chainer.using_config('train', False):
            y_ = rcnn([np.expand_dims(xb1_, 1), np.expand_dims(xb2_, 1)], n_back)
            loss_ = 0
            for ni in range(y_.shape[1]):
                loss_ += F.mean_squared_error(tb_[:, ni, :], y_[:, ni, :])

        r = acc_pass(tb_.reshape((-1, n_out)), y_.data.reshape((-1, n_out)))
        print('\t\ttest loss: ' + str(np.round(loss_.data / tb_.shape[1], 3)) + ', max acc: ' + str(
            np.round(np.sort(r)[::-1][:10], 4)))
        run_acc = np.mean(np.sort(r)[::-1][:10])
        if run_acc > best_acc: # only if performance of current model is superior, save it to file
            print('Current model is best: ' + str(np.round(run_acc, 4)) + ' > ' + str(
                np.round(best_acc, 4)) + ': saving update to disk')
            best_acc = run_acc.copy()
            serializers.save_npz(out_dir + '/model' + str(kfold) + '.npz', rcnn)
            with open(out_dir + 'fold' + str(kfold) + '_epoch.txt', 'a') as fid:
                fid.write(str(epoch) + '\n')
                fid.write(str(np.sort(r)[::-1][:10]) + '\n')
            np.save(out_dir + '/predictions_fold' + str(kfold), y_.data.reshape((-1, n_out)))
            np.save(out_dir + '/targets_fold' + str(kfold), tb_.reshape((-1, n_out)))
Exemplo n.º 51
0
 def Softmax(m_input):
   scores_exp = np.exp(m_input)
   softmax = scores_exp/np.expand_dims(np.sum(scores_exp, axis=1), axis=1)
   return softmax
Exemplo n.º 52
0
    def collision_handling_subroutine(self, rewards, is_last):
        # Players stop on hitting a wall
        for npu, pursuer in enumerate(self._pursuers):
            clippedx_2 = np.clip(pursuer.position, 0, 1)
            vel_2 = pursuer.velocity
            vel_2[pursuer.position != clippedx_2] = 0
            pursuer.set_velocity(vel_2)
            pursuer.set_position(clippedx_2)

        obstacle_coll_Np = np.zeros(self.n_pursuers)
        # Particles rebound on hitting an obstacle
        for npu, pursuer in enumerate(self._pursuers):
            distfromobst_No = ssd.cdist(np.expand_dims(
                pursuer.position, 0), self.obstaclesx_No_2)
            is_colliding_No = distfromobst_No <= pursuer._radius + self.obstacle_radius
            obstacle_coll_Np[npu] = is_colliding_No.sum()
            if obstacle_coll_Np[npu] > 0:
                velocity_scale = pursuer._radius + self.obstacle_radius - \
                    ssd.euclidean(pursuer.position, self.obstaclesx_No_2)
                pos_diff = pursuer.position - self.obstaclesx_No_2[0]
                new_pos = pursuer.position + velocity_scale * pos_diff
                pursuer.set_position(new_pos)

                collision_normal = pursuer.position - self.obstaclesx_No_2[0]
                # project current velocity onto collision normal
                current_vel = pursuer.velocity
                proj_numer = np.dot(current_vel, collision_normal)
                cllsn_mag = np.dot(collision_normal, collision_normal)
                proj_vel = (proj_numer / cllsn_mag) * collision_normal
                perp_vel = current_vel - proj_vel
                total_vel = perp_vel - proj_vel
                pursuer.set_velocity(total_vel)

        if is_last:
            obstacle_coll_Ne = np.zeros(self.n_evaders)
            for nev, evader in enumerate(self._evaders):
                distfromobst_No = ssd.cdist(np.expand_dims(
                    evader.position, 0), self.obstaclesx_No_2)
                is_colliding_No = distfromobst_No <= evader._radius + self.obstacle_radius
                obstacle_coll_Ne[nev] = is_colliding_No.sum()
                if obstacle_coll_Ne[nev] > 0:
                    velocity_scale = evader._radius + self.obstacle_radius - \
                        ssd.euclidean(evader.position, self.obstaclesx_No_2)
                    pos_diff = evader.position - self.obstaclesx_No_2[0]
                    evader.set_position(
                        evader.position + velocity_scale * pos_diff)

                    collision_normal = evader.position - \
                        self.obstaclesx_No_2[0]
                    # project current velocity onto collision normal
                    current_vel = evader.velocity
                    proj_numer = np.dot(current_vel, collision_normal)
                    cllsn_mag = np.dot(collision_normal, collision_normal)
                    proj_vel = (proj_numer / cllsn_mag) * collision_normal
                    perp_vel = current_vel - proj_vel
                    total_vel = perp_vel - proj_vel
                    evader.set_velocity(total_vel)

            obstacle_coll_Npo = np.zeros(self.n_poison)
            for npo, poison in enumerate(self._poisons):
                distfromobst_No = ssd.cdist(np.expand_dims(
                    poison.position, 0), self.obstaclesx_No_2)
                is_colliding_No = distfromobst_No <= poison._radius + self.obstacle_radius
                obstacle_coll_Npo[npo] = is_colliding_No.sum()
                if obstacle_coll_Npo[npo] > 0:
                    velocity_scale = poison._radius + self.obstacle_radius - \
                        ssd.euclidean(poison.position, self.obstaclesx_No_2)
                    pos_diff = poison.position - self.obstaclesx_No_2[0]
                    poison.set_position(
                        poison.position + velocity_scale * pos_diff)

                    collision_normal = poison.position - \
                        self.obstaclesx_No_2[0]
                    # project current velocity onto collision normal
                    current_vel = poison.velocity
                    proj_numer = np.dot(current_vel, collision_normal)
                    cllsn_mag = np.dot(collision_normal, collision_normal)
                    proj_vel = (proj_numer / cllsn_mag) * collision_normal
                    perp_vel = current_vel - proj_vel
                    total_vel = perp_vel - proj_vel
                    poison.set_velocity(total_vel)

        # Find collisions
        pursuersx_Np_2 = np.array(
            [pursuer.position for pursuer in self._pursuers])
        evadersx_Ne_2 = np.array([evader.position for evader in self._evaders])
        poisonx_Npo_2 = np.array([poison.position for poison in self._poisons])

        # Evaders
        evdists_Np_Ne = ssd.cdist(pursuersx_Np_2, evadersx_Ne_2)
        is_colliding_ev_Np_Ne = evdists_Np_Ne <= np.asarray([
            pursuer._radius + evader._radius for pursuer in self._pursuers
            for evader in self._evaders
        ]).reshape(self.n_pursuers, self.n_evaders)

        # num_collisions depends on how many needed to catch an evader
        ev_caught, which_pursuer_caught_ev = self._caught(
            is_colliding_ev_Np_Ne, self.n_coop)

        # Poisons
        podists_Np_Npo = ssd.cdist(pursuersx_Np_2, poisonx_Npo_2)
        is_colliding_po_Np_Npo = podists_Np_Npo <= np.asarray([
            pursuer._radius + poison._radius for pursuer in self._pursuers
            for poison in self._poisons
        ]).reshape(self.n_pursuers, self.n_poison)
        po_caught, which_pursuer_caught_po = self._caught(
            is_colliding_po_Np_Npo, 1)

        # Find sensed objects
        # Obstacles
        sensorvals_Np_K_No = np.array(
            [pursuer.sensed(self.obstaclesx_No_2) for pursuer in self._pursuers])

        # Evaders
        sensorvals_Np_K_Ne = np.array(
            [pursuer.sensed(evadersx_Ne_2) for pursuer in self._pursuers])

        # Poison
        sensorvals_Np_K_Npo = np.array(
            [pursuer.sensed(poisonx_Npo_2) for pursuer in self._pursuers])

        # Allies
        sensorvals_Np_K_Np = np.array(
            [pursuer.sensed(pursuersx_Np_2, same=True) for pursuer in self._pursuers])

        # dist features
        closest_ob_idx_Np_K = np.argmin(sensorvals_Np_K_No, axis=2)
        closest_ob_dist_Np_K = self._closest_dist(
            closest_ob_idx_Np_K, sensorvals_Np_K_No)
        sensedmask_ob_Np_K = np.isfinite(closest_ob_dist_Np_K)
        sensed_obdistfeatures_Np_K = np.zeros(
            (self.n_pursuers, self.n_sensors))
        sensed_obdistfeatures_Np_K[sensedmask_ob_Np_K] = closest_ob_dist_Np_K[sensedmask_ob_Np_K]
        # Evaders
        closest_ev_idx_Np_K = np.argmin(sensorvals_Np_K_Ne, axis=2)
        closest_ev_dist_Np_K = self._closest_dist(
            closest_ev_idx_Np_K, sensorvals_Np_K_Ne)
        sensedmask_ev_Np_K = np.isfinite(closest_ev_dist_Np_K)
        sensed_evdistfeatures_Np_K = np.zeros(
            (self.n_pursuers, self.n_sensors))
        sensed_evdistfeatures_Np_K[sensedmask_ev_Np_K] = closest_ev_dist_Np_K[sensedmask_ev_Np_K]
        # Poison
        closest_po_idx_Np_K = np.argmin(sensorvals_Np_K_Npo, axis=2)
        closest_po_dist_Np_K = self._closest_dist(
            closest_po_idx_Np_K, sensorvals_Np_K_Npo)
        sensedmask_po_Np_K = np.isfinite(closest_po_dist_Np_K)
        sensed_podistfeatures_Np_K = np.zeros(
            (self.n_pursuers, self.n_sensors))
        sensed_podistfeatures_Np_K[sensedmask_po_Np_K] = closest_po_dist_Np_K[sensedmask_po_Np_K]
        # Allies
        closest_pu_idx_Np_K = np.argmin(sensorvals_Np_K_Np, axis=2)
        closest_pu_dist_Np_K = self._closest_dist(
            closest_pu_idx_Np_K, sensorvals_Np_K_Np)
        sensedmask_pu_Np_K = np.isfinite(closest_pu_dist_Np_K)
        sensed_pudistfeatures_Np_K = np.zeros(
            (self.n_pursuers, self.n_sensors))
        sensed_pudistfeatures_Np_K[sensedmask_pu_Np_K] = closest_pu_dist_Np_K[sensedmask_pu_Np_K]

        # speed features
        pursuersv_Np_2 = np.array(
            [pursuer.velocity for pursuer in self._pursuers])
        evadersv_Ne_2 = np.array([evader.velocity for evader in self._evaders])
        poisonv_Npo_2 = np.array([poison.velocity for poison in self._poisons])

        # Evaders

        sensed_evspeedfeatures_Np_K = self._extract_speed_features(evadersv_Ne_2,
                                                                   closest_ev_idx_Np_K,
                                                                   sensedmask_ev_Np_K)
        # Poison
        sensed_pospeedfeatures_Np_K = self._extract_speed_features(poisonv_Npo_2,
                                                                   closest_po_idx_Np_K,
                                                                   sensedmask_po_Np_K)
        # Allies
        sensed_puspeedfeatures_Np_K = self._extract_speed_features(pursuersv_Np_2,
                                                                   closest_pu_idx_Np_K,
                                                                   sensedmask_pu_Np_K)

        # Process collisions
        # If object collided with required number of players, reset its position and velocity
        # Effectively the same as removing it and adding it back
        if ev_caught.size:
            for evcaught in ev_caught:
                self._evaders[evcaught].set_position(self.np_random.rand(2))
                self._evaders[evcaught].set_position(
                    self._respawn(self._evaders[evcaught].position, self._evaders[evcaught]
                                  ._radius))
                self._evaders[evcaught].set_velocity(
                    (self.np_random.rand(2,) - 0.5) * self.ev_speed)

        if po_caught.size:
            for pocaught in po_caught:
                self._poisons[pocaught].set_position(self.np_random.rand(2))
                self._poisons[pocaught].set_position(
                    self._respawn(self._poisons[pocaught].position, self._poisons[pocaught]
                                  ._radius))
                self._poisons[pocaught].set_velocity(
                    (self.np_random.rand(2,) - 0.5) * self.poison_speed)

        ev_encounters, which_pursuer_encounterd_ev = self._caught(
            is_colliding_ev_Np_Ne, 1)
        # Update reward based on these collisions
        rewards[which_pursuer_caught_ev] += self.food_reward
        rewards[which_pursuer_caught_po] += self.poison_reward
        rewards[which_pursuer_encounterd_ev] += self.encounter_reward

        # Add features together
        if self._speed_features:
            sensorfeatures_Np_K_O = np.c_[sensed_obdistfeatures_Np_K, sensed_evdistfeatures_Np_K,
                                          sensed_evspeedfeatures_Np_K, sensed_podistfeatures_Np_K,
                                          sensed_pospeedfeatures_Np_K, sensed_pudistfeatures_Np_K,
                                          sensed_puspeedfeatures_Np_K]
        else:
            sensorfeatures_Np_K_O = np.c_[sensed_obdistfeatures_Np_K, sensed_evdistfeatures_Np_K,
                                          sensed_podistfeatures_Np_K, sensed_pudistfeatures_Np_K]

        return sensorfeatures_Np_K_O, is_colliding_ev_Np_Ne, is_colliding_po_Np_Npo, rewards
Exemplo n.º 53
0
print("[INFO] encoding labels...")
le = LabelEncoder()
le.fit([tl for tl in train_labels])

# variables to hold features and labels
features = []
labels   = []

# loop over all the labels in the folder
i = 0
for label in train_labels:
	cur_path = train_path + "/" + label
	for image_path in glob.glob(cur_path + "/*.jpg"):
		img = image.load_img(image_path, target_size=image_size)
		x = image.img_to_array(img)
		x = np.expand_dims(x, axis=0)
		x = preprocess_input(x)
		feature = model.predict(x)
		flat = feature.flatten()
		features.append(flat)
		labels.append(label)
		print ("[INFO] processed - {}".format(i))
		i += 1
	print ("[INFO] completed label - {}".format(label))

# encode the labels using LabelEncoder
targetNames = np.unique(labels)
le = LabelEncoder()
le_labels = le.fit_transform(labels)

# get the shape of training labels
Exemplo n.º 54
0
    def compute_message_to_parent(self, parent, index, u, *u_parents):

        if index == 0:

            # Shape(phi)    = [Nn,..,K,..,N0,Dd,..,D0]
            # Shape(L)      = [Nn,..,K,..,N0]
            # Shape(u)      = [Nn,..,N0,Dd,..,D0]
            # Shape(result) = [Nn,..,N0,K]

            # Compute g:
            # Shape(g)      = [Nn,..,K,..,N0]
            g = self.distribution.compute_cgf_from_parents(*(u_parents[1:]))
            # Reshape(g):
            # Shape(g)      = [Nn,..,N0,K]
            g = utils.moveaxis(g, self.cluster_plate, -1)

            # Compute phi:
            # Shape(phi)    = [Nn,..,K,..,N0,Dd,..,D0]
            phi = self.distribution.compute_phi_from_parents(*(u_parents[1:]))
            # Move phi axis:
            # Shape(phi)    = [Nn,..,N0,K,Dd,..,D0]
            for ind in range(len(phi)):
                if self.cluster_plate < 0:
                    axis_from = self.cluster_plate-self.distribution.ndims[ind]
                else:
                    raise RuntimeError("Cluster plate axis must be negative")
                axis_to = -1-self.distribution.ndims[ind]
                if np.ndim(phi[ind]) >= abs(axis_from):
                    # Cluster plate axis exists, move it to the correct position
                    phi[ind] = utils.moveaxis(phi[ind], axis_from, axis_to)
                else:
                    # No cluster plate axis, just add a new axis to the correct
                    # position, if phi has something on that axis
                    if np.ndim(phi[ind]) >= abs(axis_to):
                        phi[ind] = np.expand_dims(phi[ind], axis=axis_to)

            # Reshape u:
            # Shape(u)      = [Nn,..,N0,1,Dd,..,D0]
            u_self = list()
            for ind in range(len(u)):
                u_self.append(np.expand_dims(u[ind],
                                             axis=(-1-self.distribution.ndims[ind])))

            # Compute logpdf:
            # Shape(L)      = [Nn,..,N0,K]
            L = self.distribution.compute_logpdf(u_self, phi, g, 0)

            # Sum over other than the cluster dimensions? No!
            # Hmm.. I think the message passing method will do
            # that automatically

            m = [L]

            return m

        elif index >= 1:

            # Parent index for the distribution used for the
            # mixture.
            index = index - 1

            # Reshape u:
            # Shape(u)      = [Nn,..1,..,N0,Dd,..,D0]
            u_self = list()
            for ind in range(len(u)):
                if self.cluster_plate < 0:
                    cluster_axis = self.cluster_plate - self.distribution.ndims[ind]
                else:
                    cluster_axis = self.cluster_plate
                u_self.append(np.expand_dims(u[ind], axis=cluster_axis))

            # Message from the mixed distribution
            m = self.distribution.compute_message_to_parent(parent,
                                                        index, 
                                                        u_self, 
                                                        *(u_parents[1:]))

            # Weigh the messages with the responsibilities
            for i in range(len(m)):

                # Shape(m)      = [Nn,..,K,..,N0,Dd,..,D0]
                # Shape(p)      = [Nn,..,N0,K]
                # Shape(result) = [Nn,..,K,..,N0,Dd,..,D0]

                # Number of axes for the variable dimensions for
                # the parent message.
                D = self.distribution.ndims_parents[index][i]

                # Responsibilities for clusters are the first
                # parent's first moment:
                # Shape(p)      = [Nn,..,N0,K]
                p = u_parents[0][0]
                # Move the cluster axis to the proper place:
                # Shape(p)      = [Nn,..,K,..,N0]
                p = utils.atleast_nd(p, abs(self.cluster_plate))
                p = utils.moveaxis(p, -1, self.cluster_plate)
                # Add axes for variable dimensions to the contributions
                # Shape(p)      = [Nn,..,K,..,N0,1,..,1]
                p = utils.add_trailing_axes(p, D)

                if self.cluster_plate < 0:
                    # Add the variable dimensions
                    cluster_axis = self.cluster_plate - D

                # Add axis for clusters:
                # Shape(m)      = [Nn,..,1,..,N0,Dd,..,D0]
                #m[i] = np.expand_dims(m[i], axis=cluster_axis)

                #
                # TODO: You could do summing here already so that
                # you wouldn't compute huge matrices as
                # intermediate result. Use einsum.

                # Compute the message contributions for each
                # cluster:
                # Shape(result) = [Nn,..,K,..,N0,Dd,..,D0]
                m[i] = m[i] * p

            return m
Exemplo n.º 55
0
def main(video_src):
    """MAIN
    
    :param video_src: Source of video to analyze
    :type video_src: str or int"""
    cam = cv2.VideoCapture(video_src)
    _, sample_frame = cam.read()

    # Introduce mark_detector to detect landmarks.
    mark_detector = MarkDetector()
    face_detector = FaceDetector()

    # Setup process and queues for multiprocessing.
    img_queue = Queue()
    box_queue = Queue()
    img_queue.put(sample_frame)
    box_process = Process(target=get_face, args=(mark_detector, img_queue, box_queue,))
    #box_process = Process(target=get_faces, args=(face_detector, img_queue, box_queue,))
    box_process.start()

    # Introduce pose estimator to solve pose. Get one frame to setup the
    # estimator according to the image size.
    height, width = sample_frame.shape[:2]
    pose_estimator = PoseEstimator(img_size=(height, width))

    # Introduce scalar stabilizers for pose.
    pose_stabilizers = [Stabilizer(
        state_num=2,
        measure_num=1,
        cov_process=0.1,
        cov_measure=0.1) for _ in range(6)]

    #face expression recognizer initialization
    from keras.models import model_from_json
    model = model_from_json(open("./model/facial_expression_model_structure.json", "r").read())
    model.load_weights('./model/facial_expression_model_weights.h5') #load weights

    #-----------------------------

    emotions = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral')

    while True:
        input()
        # Read frame, crop it, flip it, suits your needs.
        frame_got, frame = cam.read()
        if frame_got is False:
            break

        # Crop it if frame is larger than expected.
        # frame = frame[0:480, 300:940]

        # If frame comes from webcam, flip it so it looks like a mirror.
        if video_src == 0:
            frame = cv2.flip(frame, 2)

        # Pose estimation by 3 steps:
        # 1. detect face;
        # 2. detect landmarks;
        # 3. estimate pose

        # Feed frame to image queue.
        img_queue.put(frame)

        # Get face from box queue.

        #facebox = box_queue.get()

        faceboxes = dump_queue(box_queue)
        print("{} FACEBOXES".format(len(faceboxes)))
        for facebox in faceboxes:
            if min(facebox) < 0:
                continue
            # Detect landmarks from image of 128x128.
            face_img = frame[facebox[1]: facebox[3],
                             facebox[0]: facebox[2]]

            if not face_img.shape[0] or not face_img.shape[1]:
                continue

            face_img = cv2.resize(face_img, (CNN_INPUT_SIZE, CNN_INPUT_SIZE))
            face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)
            marks = mark_detector.detect_marks(face_img)

            # Convert the marks locations from local CNN to global image.
            marks *= (facebox[2] - facebox[0])
            marks[:, 0] += facebox[0]
            marks[:, 1] += facebox[1]

            # Uncomment following line to show raw marks.
            # mark_detector.draw_marks(
            #     frame, marks, color=(0, 255, 0))

            detected_face = frame[facebox[1]: facebox[3],facebox[0]: facebox[2]] #crop detected face
            detected_face = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY) #transform to gray scale
            detected_face = cv2.resize(detected_face, (48, 48)) #resize to 48x48



            # emotion estimation
            img_pixels = image.img_to_array(detected_face)
            img_pixels = np.expand_dims(img_pixels, axis = 0)

            img_pixels /= 255 #pixels are in scale of [0, 255]. normalize all pixels in scale of [0, 1]

            predictions = model.predict(img_pixels) #store probabilities of 7 expressions

            #find max indexed array 0: angry, 1:disgust, 2:fear, 3:happy, 4:sad, 5:surprise, 6:neutral
            max_index = np.argmax(predictions[0])

            emotion = emotions[max_index]

            #write emotion text above rectangle

            #V_FONT_HERSHEY_SIMPLEX normal size sans-serif font
            #CV_FONT_HERSHEY_PLAIN small size sans-serif font
            #CV_FONT_HERSHEY_DUPLEX normal size sans-serif font (more complex than CV_FONT_HERSHEY_SIMPLEX )
            #CV_FONT_HERSHEY_COMPLEX normal size serif font
            #CV_FONT_HERSHEY_TRIPLEX normal size serif font (more complex than CV_FONT_HERSHEY_COMPLEX )
            #CV_FONT_HERSHEY_COMPLEX_SMALL smaller version of CV_FONT_HERSHEY_COMPLEX
            #CV_FONT_HERSHEY_SCRIPT_SIMPLEX hand-writing style font
            #CV_FONT_HERSHEY_SCRIPT_COMPLEX more complex variant of CV_FONT_HERSHEY_SCRIPT_SIMPLEX

            image_text = ""
            for index in range(len(emotions)):
                if predictions[0][index]>0.3:
                    image_text += "{0} : {1} %\n".format(emotions[index], int(predictions[0][index]*100))

            space = 0
            for text in image_text.strip().split("\n"):
                cv2.putText(
                img = frame,
                text= text,
                org =(int(facebox[0]), int(facebox[1])-space),
                fontFace = cv2.FONT_HERSHEY_PLAIN,
                fontScale = 0.8,
                color   =  (255,255,255),
                thickness = 1
                )
                space += int(0.25*48)



            # Try pose estimation with 68 points.
            pose = pose_estimator.solve_pose_by_68_points(marks)
            #pose = pose_estimator.solve_pose(marks)


            # Stabilize the pose.
            #stabile_pose = []
            #pose_np = np.array(pose).flatten()
            #for value, ps_stb in zip(pose_np, pose_stabilizers):
            #    ps_stb.update([value])
            #    stabile_pose.append(ps_stb.state[0])
            #stabile_pose = np.reshape(stabile_pose, (-1, 3))

            # Uncomment following line to draw pose annotaion on frame.
            pose_estimator.draw_annotation_box(
                 frame, pose[0], pose[1], color=(255, 128, 128))

            # Uncomment following line to draw stabile pose annotaion on frame.
            #pose_estimator.draw_annotation_box(
            #    frame, stabile_pose[0], stabile_pose[1], color=(128, 255, 128))


        # Show preview.
        cv2.imshow("Preview", frame)
        if cv2.waitKey(10) == 27:
            break

    # Clean up the multiprocessing process.
    box_process.terminate()
    box_process.join()
Exemplo n.º 56
0
 def ndarray(self):
     # atleast_2d is for
     if len(self.dtype) == 1:
         return np.expand_dims(self.view((float, len(self.dtype))), 1)
     else:
         return self.view((float, len(self.dtype)))
Exemplo n.º 57
0
def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):
    assert (true_boxes[..., 4] <
            num_classes).all(), 'class id must be less than num_classes'
    # 一共有三个特征层数
    num_layers = len(anchors) // 3
    # 先验框
    # 678为 142,110,  192,243,  459,401
    # 345为 36,75,  76,55,  72,146
    # 012为 12,16,  19,36,  40,28
    anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]
                   ] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]]

    true_boxes = np.array(true_boxes, dtype='float32')
    input_shape = np.array(input_shape, dtype='int32')  # 416,416
    # 读出xy轴,读出长宽
    # 中心点(m,n,2)
    boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
    boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
    # 计算比例
    true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]
    true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]

    # m张图
    m = true_boxes.shape[0]
    # 得到网格的shape为13,13;26,26;52,52
    grid_shapes = [
        input_shape // {
            0: 32,
            1: 16,
            2: 8
        }[l] for l in range(num_layers)
    ]
    # y_true的格式为(m,13,13,3,85)(m,26,26,3,85)(m,52,52,3,85)
    y_true = [
        np.zeros((m, grid_shapes[l][0], grid_shapes[l][1], len(
            anchor_mask[l]), 5 + num_classes),
                 dtype='float32') for l in range(num_layers)
    ]
    # [1,9,2]
    anchors = np.expand_dims(anchors, 0)
    anchor_maxes = anchors / 2.
    anchor_mins = -anchor_maxes
    # 长宽要大于0才有效
    valid_mask = boxes_wh[..., 0] > 0

    for b in range(m):
        # 对每一张图进行处理
        wh = boxes_wh[b, valid_mask[b]]
        if len(wh) == 0: continue
        # [n,1,2]
        wh = np.expand_dims(wh, -2)
        box_maxes = wh / 2.
        box_mins = -box_maxes

        # 计算真实框和哪个先验框最契合
        intersect_mins = np.maximum(box_mins, anchor_mins)
        intersect_maxes = np.minimum(box_maxes, anchor_maxes)
        intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
        intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
        box_area = wh[..., 0] * wh[..., 1]
        anchor_area = anchors[..., 0] * anchors[..., 1]
        iou = intersect_area / (box_area + anchor_area - intersect_area)
        # 维度是(n) 感谢 消尽不死鸟 的提醒
        best_anchor = np.argmax(iou, axis=-1)

        for t, n in enumerate(best_anchor):
            for l in range(num_layers):
                if n in anchor_mask[l]:
                    # floor用于向下取整
                    i = np.floor(true_boxes[b, t, 0] *
                                 grid_shapes[l][1]).astype('int32')
                    j = np.floor(true_boxes[b, t, 1] *
                                 grid_shapes[l][0]).astype('int32')
                    # 找到真实框在特征层l中第b副图像对应的位置
                    k = anchor_mask[l].index(n)
                    c = true_boxes[b, t, 4].astype('int32')
                    y_true[l][b, j, i, k, 0:4] = true_boxes[b, t, 0:4]
                    y_true[l][b, j, i, k, 4] = 1
                    y_true[l][b, j, i, k, 5 + c] = 1

    return y_true
Exemplo n.º 58
0
def main():
    # Create images and animations folder

    Path("Imgs").mkdir(exist_ok=True)
    Path("Imgs/Telesismo").mkdir(exist_ok=True)
    Path("Imgs/Local1").mkdir(exist_ok=True)
    Path("Imgs/Local2").mkdir(exist_ok=True)
    Path("Animations").mkdir(exist_ok=True)

    # Load STEAD trace

    # st = '../Data_STEAD/Train_data.hdf5'
    #
    # with h5py.File(st, 'r') as h5_file:
    #     grp = h5_file['earthquake']['local']
    #     for idx, dts in enumerate(grp):
    #         st_trace = grp[dts][:, 0] / np.max(np.abs(grp[dts][:, 0]))
    #         break

    # Datos Utiles

    # Fig. 3fo and 3bb.
    # Comparacion entre registros de un telesismo por fibra optica y sismometro

    # file_fo = '../Data_Reykjanes/Jousset_et_al_2018_003_Figure3_fo.ascii'
    # file_bb = '../Data_Reykjanes/Jousset_et_al_2018_003_Figure3_bb.ascii'
    #
    # fs = 20
    #
    # data_fo = {
    #     'head': '',
    #     'strain': []
    # }
    #
    # data_bb = {
    #     'head': '',
    #     'strain': []
    # }
    #
    # with open(file_fo, 'r') as f:
    #     for idx, line in enumerate(f):
    #         if idx == 0:
    #             data_fo['head'] = line.strip()
    #         else:
    #             val = line.strip()
    #             data_fo['strain'].append(float(val))
    #
    # with open(file_bb, 'r') as f:
    #     for idx, line in enumerate(f):
    #         if idx == 0:
    #             data_bb['head'] = line.strip()
    #         else:
    #             val = line.strip()
    #             data_bb['strain'].append(float(val))
    #
    # # Data len
    # N = len(data_fo['strain'])
    #
    # # Time axis for signal plot
    # t_ax = np.arange(N) / fs
    #
    # # Frequency axis for FFT plot
    # xf = np.linspace(-fs / 2.0, fs / 2.0 - 1 / fs, N)
    #
    # # FFTs
    # yf_fo = sfft.fftshift(sfft.fft(data_fo['strain']))
    # yf_bb = sfft.fftshift(sfft.fft(data_bb['strain']))
    #
    # plt.figure()
    # plt.subplot(211)
    # plt.plot(t_ax, data_fo['strain'])
    # plt.xlabel('Tiempo [s]')
    # plt.ylabel('Strain [-]')
    # plt.title('Registro Reykjanes telesismo DAS')
    # plt.grid(True)
    #
    # plt.subplot(212)
    # plt.plot(xf, np.abs(yf_fo) / np.max(np.abs(yf_fo)))
    # plt.xlabel('Frecuencia [-]')
    # plt.ylabel('Amplitud [-]')
    # plt.grid(True)
    # plt.tight_layout()
    # plt.savefig('Imgs/Telesismo/TelesismoDAS_spec.png')
    #
    # plt.clf()
    # plt.subplot(211)
    # plt.plot(t_ax, data_bb['strain'])
    # plt.xlabel('Tiempo [s]')
    # plt.ylabel('Strain [-]')
    # plt.title('Registro Reykjanes telesismo sismómetro')
    # plt.grid(True)
    #
    # plt.subplot(212)
    # plt.plot(xf, np.abs(yf_bb) / np.max(np.abs(yf_bb)))
    # plt.xlabel('Frecuencia [-]')
    # plt.ylabel('Amplitud [-]')
    # plt.grid(True)
    # plt.tight_layout()
    # plt.savefig('Imgs/Telesismo/TelesismoBBS_spec.png')

    # plt.figure()
    # plt.plot(t_ax, data_fo['strain'])
    # plt.grid(True)
    # plt.xlabel('Tiempo [s]')
    # plt.ylabel('Strain [-]')
    # plt.title('Registro Reykjanes telesismo DAS')
    # plt.savefig('Imgs/TelesismoDAS.png')
    #
    # plt.clf()
    # plt.plot(t_ax, data_bb['strain'])
    # plt.grid(True)
    # plt.xlabel('Tiempo [s]')
    # plt.ylabel('Strain [-]')
    # plt.title('Registro Reykjanes telesismo sismómetro')
    # plt.savefig('Imgs/TelesismoBBS.png')

    # plt.clf()
    # line_fo, = plt.plot(t_ax, data_fo['strain'], label='DAS')
    # line_bb, = plt.plot(t_ax, data_bb['strain'], label='Sismómetro')
    # plt.grid(True)
    # plt.xlabel('Tiempo [s]')
    # plt.ylabel('Strain [-]')
    # plt.title('Registros telesismo DAS y sismómetro')
    # plt.legend(handles=[line_fo, line_bb], loc='upper left')
    # plt.savefig('Imgs/TelesismoComp.png')
    #
    # plt.clf()
    # line_st, = plt.plot(signal.resample(data_fo['strain'], 6000), label='DAS')
    # line_das, = plt.plot(st_trace, label='STEAD')
    # plt.grid(True)
    # plt.xlabel('Muestras [s]')
    # plt.ylabel('Strain [-]')
    # plt.title('Traza STEAD y traza DAS Reykjanes telesismo')
    # plt.legend(handles=[line_st, line_das], loc='upper left')
    # plt.savefig('Imgs/STEADTelesismo.png')
    #
    # plt.clf()
    # plt.subplot(211)
    # plt.plot(st_trace)
    # plt.grid(True)
    # plt.xlabel('Muestras [s]')
    # plt.ylabel('Strain [-]')
    # plt.title('Traza STEAD y traza DAS Reykjanes telesismo')
    # plt.subplot(212)
    # plt.plot(signal.resample(data_fo['strain'], 6000))
    # plt.grid(True)
    # plt.xlabel('Muestras [s]')
    # plt.ylabel('Strain [-]')
    # plt.savefig('Imgs/STEADTelesismo1.png')

    # Fig. 5a_fo
    # Registro de sismo local con DAS

    # file = '../Data_Reykjanes/Jousset_et_al_2018_003_Figure5a_fo.ascii'
    # n_trazas = 26
    # plt_tr = 10
    # fs = 200
    #
    # data = {
    #     'head': '',
    #     'strain': np.empty((1, n_trazas))
    # }
    #
    # with open(file, 'r') as f:
    #     for idx, line in enumerate(f):
    #         if idx == 0:
    #             data['head'] = line.strip()
    #
    #         else:
    #             row = np.asarray(list(map(float, re.sub(' +', ' ', line).strip().split(' '))))
    #             data['strain'] = np.concatenate((data['strain'], np.expand_dims(row, 0)))
    #
    # data['strain'] = data['strain'][1:]
    # # data['strain'] = data['strain'] / data['strain'].max(axis=0)
    # data['strain'] = data['strain'].transpose()
    # data_das = data
    #
    # # Number of traces to plot
    # n = 4
    #
    # # Traces to plot
    # trtp = []
    #
    # Init rng
    rng = default_rng()

    # # Traces to plot numbers
    # trtp_ids = rng.choice(len(data['strain']), size=n, replace=False)
    # trtp_ids.sort()
    #
    # # Retrieve selected traces
    # for idx, trace in enumerate(data['strain']):
    #     if idx in trtp_ids:
    #         trtp.append(trace)
    #
    # # Data len
    # N = data['strain'].shape[1]
    #
    # # Time axis for signal plot
    # t_ax = np.arange(N) / fs
    #
    # # Frequency axis for FFT plot
    # xf = np.linspace(-fs / 2.0, fs / 2.0 - 1 / fs, N)
    #
    # # Figure to plot
    # plt.figure()
    #
    # # For trace in traces to print
    # for idx, trace in enumerate(trtp):
    #     yf = sfft.fftshift(sfft.fft(trace))
    #
    #     plt.clf()
    #     plt.subplot(211)
    #     plt.plot(t_ax, trace)
    #     plt.title(f'Traza Reykjanes sismo local 1 y espectro #{trtp_ids[idx]}')
    #     plt.xlabel('Tiempo [s]')
    #     plt.ylabel('Amplitud [-]')
    #     plt.grid(True)
    #
    #     plt.subplot(212)
    #     plt.plot(xf, np.abs(yf) / np.max(np.abs(yf)))
    #     plt.xlabel('Frecuencia [Hz]')
    #     plt.ylabel('Amplitud [-]')
    #     plt.grid(True)
    #     plt.tight_layout()
    #     plt.savefig(f'Imgs/Local1/Local1_{trtp_ids[idx]}')

    # Create animation of whole data
    # fig_tr = plt.figure()
    # ims_tr = []
    #
    # for trace in data['strain']:
    #     im_tr = plt.plot(t_ax, trace)
    #     plt.title('Trazas dataset Reykjanes sismo local 1')
    #     plt.ylabel('Amplitud [-]')
    #     plt.xlabel('Tiempo [s]')
    #     plt.grid(True)
    #     ims_tr.append(im_tr)
    #
    # ani_tr = animation.ArtistAnimation(fig_tr, ims_tr, interval=1000, blit=True, repeat=False)
    # ani_tr.save('Animations/Reykjanes_dastraces_local1.mp4')
    #
    # # Create animation of whole data spectrums
    # fig_sp = plt.figure()
    # ims_sp = []
    #
    # for trace in data['strain']:
    #     yf = sfft.fftshift(sfft.fft(trace))
    #     im_sp = plt.plot(xf, np.abs(yf) / np.max(np.abs(yf)))
    #     plt.title('Espectro trazas dataset Reykjanes sismo local 1')
    #     plt.ylabel('Amplitud [-]')
    #     plt.xlabel('Frecuencia [Hz]')
    #     plt.grid(True)
    #     ims_sp.append(im_sp)
    #
    # ani_sp = animation.ArtistAnimation(fig_sp, ims_sp, interval=1000, blit=True, repeat=False)
    # ani_sp.save('Animations/Reykjanes_dasspectrums_local1.mp4')

    # t_ax = np.arange(len(data['strain'][plt_tr])) / fs
    #
    # plt.clf()
    # plt.plot(t_ax, data['strain'][plt_tr])
    # plt.grid(True)
    # plt.xlabel('Tiempo [s]')
    # plt.ylabel('Strain [-]')
    # plt.title('Registro sismo local DAS')
    # plt.savefig('Imgs/SismolocalDAS.png')
    #
    # plt.clf()
    # line_st, = plt.plot(st_trace, label='STEAD')
    # line_das, = plt.plot(signal.resample(data['strain'][plt_tr], 6000), label='DAS')
    # plt.grid(True)
    # plt.xlabel('Muestras [s]')
    # plt.ylabel('Strain [-]')
    # plt.title('Traza STEAD y traza DAS Reykjanes sismo local')
    # plt.legend(handles=[line_st, line_das], loc='upper left')
    # plt.savefig('Imgs/STEADLocal.png')
    #
    # plt.clf()
    # plt.subplot(211)
    # plt.plot(st_trace)
    # plt.grid(True)
    # plt.xlabel('Muestras [s]')
    # plt.ylabel('Strain [-]')
    # plt.title('Traza STEAD y traza DAS Reykjanes sismo local')
    # plt.subplot(212)
    # plt.plot(signal.resample(data['strain'][plt_tr], 6000))
    # plt.grid(True)
    # plt.xlabel('Muestras [s]')
    # plt.ylabel('Strain [-]')
    # plt.savefig('Imgs/STEADLocal1.png')

    # # Fig. 5a_gph
    # # Registro de sismo local con geofono

    file = '../Data_Reykjanes/Jousset_et_al_2018_003_Figure5a_gph.ascii'
    n_trazas = 26
    plt_tr = 10
    fs = 200

    data = {'head': '', 'strain': np.empty((1, n_trazas))}

    with open(file, 'r') as f:
        for idx, line in enumerate(f):
            if idx == 0:
                data['head'] = line.strip()

            else:
                row = np.asarray(
                    list(map(float,
                             re.sub(' +', ' ', line).strip().split(' '))))
                data['strain'] = np.concatenate(
                    (data['strain'], np.expand_dims(row, 0)))

    data['strain'] = data['strain'][1:]
    # data['strain'] = data['strain'] / data['strain'].max(axis=0)
    data['strain'] = data['strain'].transpose()

    # Number of traces to plot
    n = 4

    # Traces to plot
    trtp = []

    # Traces to plot numbers
    trtp_ids = rng.choice(len(data['strain']), size=n, replace=False)
    trtp_ids.sort()

    # Retrieve selected traces
    for idx, trace in enumerate(data['strain']):
        if idx in trtp_ids:
            trtp.append(trace)

    # Data len
    N = data['strain'].shape[1]

    # Time axis for signal plot
    t_ax = np.arange(N) / fs

    # Frequency axis for FFT plot
    xf = np.linspace(-fs / 2.0, fs / 2.0 - 1 / fs, N)

    # Figure to plot
    plt.figure()

    # For trace in traces to print
    for idx, trace in enumerate(trtp):
        yf = sfft.fftshift(sfft.fft(trace))

        plt.clf()
        plt.subplot(211)
        plt.plot(t_ax, trace)
        plt.title(
            f'Traza Reykjanes sismo local 1 geófono y espectro #{trtp_ids[idx]}'
        )
        plt.xlabel('Tiempo [s]')
        plt.ylabel('Amplitud [-]')
        plt.grid(True)

        plt.subplot(212)
        plt.plot(xf, np.abs(yf) / np.max(np.abs(yf)))
        plt.xlabel('Frecuencia [Hz]')
        plt.ylabel('Amplitud [-]')
        plt.grid(True)
        plt.tight_layout()
        plt.savefig(f'Imgs/Local1/Local1_geofono_{trtp_ids[idx]}')

    # Create animation of whole data
    fig_tr = plt.figure()
    ims_tr = []

    for trace in data['strain']:
        im_tr = plt.plot(t_ax, trace)
        plt.title('Trazas dataset Reykjanes sismo local 1 geofono')
        plt.ylabel('Amplitud [-]')
        plt.xlabel('Tiempo [s]')
        plt.grid(True)
        ims_tr.append(im_tr)

    ani_tr = animation.ArtistAnimation(fig_tr,
                                       ims_tr,
                                       interval=1000,
                                       blit=True,
                                       repeat=False)
    ani_tr.save('Animations/Reykjanes_dastraces_local1_geofono.mp4')

    # Create animation of whole data spectrums
    fig_sp = plt.figure()
    ims_sp = []

    for trace in data['strain']:
        yf = sfft.fftshift(sfft.fft(trace))
        im_sp = plt.plot(xf, np.abs(yf) / np.max(np.abs(yf)))
        plt.title('Espectro trazas dataset Reykjanes sismo local 1 geofono')
        plt.ylabel('Amplitud [-]')
        plt.xlabel('Frecuencia [Hz]')
        plt.grid(True)
        ims_sp.append(im_sp)

    ani_sp = animation.ArtistAnimation(fig_sp,
                                       ims_sp,
                                       interval=1000,
                                       blit=True,
                                       repeat=False)
    ani_sp.save('Animations/Reykjanes_dasspectrums_local1_geofono.mp4')
def main(args):
    if True:
        with tf.Graph().as_default() as g:
            #with tf.device("/cpu:0"):
            sess = tf.Session()
            with sess.as_default():
                model_exp = os.path.expanduser(FLAGS.checkpoint_dir)
                if (os.path.isfile(model_exp)):
                    print('Model filename: %s' % model_exp)
                    with gfile.FastGFile(model_exp, 'rb') as f:
                        graph_def = tf.GraphDef()
                        graph_def.ParseFromString(f.read())
                        tf.import_graph_def(graph_def, name='')
                else:
                    print('Model directory: %s' % model_exp)
                    meta_file, ckpt_file = get_model_filenames(model_exp)
                    print('Metagraph file: %s' % meta_file)
                    print('Checkpoint file: %s' % ckpt_file)
                    saver = tf.train.import_meta_graph(
                        os.path.join(model_exp, meta_file))
                    saver.restore(sess, os.path.join(model_exp, ckpt_file))
                img_list = glob.glob(FLAGS.test_dir + '/*.jpg')
                images_expand = np.zeros((len(img_list), 32, 32, 3),
                                         dtype=float)
                i = 0
                laa = []
                image_placeholder = g.get_tensor_by_name('model/input:0')
                # batch_size_placeholder = g.get_tensor_by_name('model/batch_size')
                phase_train = g.get_tensor_by_name('model/phase_train:0')
                #logits = g.get_tensor_by_name(tensor_name[19][0])
                predictions = []

                for i in range(10):
                    logits = g.get_tensor_by_name(tensor_name[2 * i + 1][0])
                    predictions.append(logits)

                for image_path in img_list:
                    #print (image_path)
                    img = cv2.imread(image_path)
                    image_path = os.path.split(
                        os.path.splitext(image_path)[0])[1]
                    label = image_path.split('_')[0]
                    label_id = int(label)
                    print(label_id)
                    laa.append(label_id)
                    #print (label_id)
                    #img_w = img.shape[1]
                    #img_h = img.shape[0]
                    image_np = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                    image_np = prewhiten(image_np)
                    #print (image_np)
                    image_np_expanded = np.expand_dims(image_np, axis=0)
                    #images_expand[i,:,:,:] = image_np
                    #i+=1
                    #print(images_expand)
                    h = sess.partial_run_setup(
                        predictions, [image_placeholder, phase_train])
                    classifier1 = sess.partial_run(h,
                                                   predictions[0],
                                                   feed_dict={
                                                       image_placeholder:
                                                       image_np_expanded,
                                                       phase_train: False
                                                   })

                    classifier1 = softmax(classifier1)
                    #if np.max(classifier1)>0.95:
                    print("exit 1:", classifier1)
                    #continue
                    #else:
                    for i in range(1, 10):
                        res_logits = sess.partial_run(h, predictions[i])
                        #if np.max(softmax(res_logits))>0.99:
                        #break
                        print("exit", i + 1, ":", softmax(res_logits))

                #classifier1 = sess.partial_run(h,predictions[0],feed_dict={image_placeholder:images_expand,phase_train:False})
                #print (classifier1)
                #classifier1 = softmax(classifier1)
                #if np.max(classifier1)>0.9:
                # print ("1:",classifier1,":",laa)

                #else:
                #  for i in range(1,10):
                #    res_logits = sess.partial_run(h,predictions[i])
                #    if np.max(softmax(res_logits))>0.9:
                #       break
                #    print (i,":",res_logits,":",laa)

                #res_logits = sess.run(logits,feed_dict={image_placeholder:images_expand,phase_train_placeholder:False})
                #labels = np.array(laa)
                #top1 = computeScore(res_logits,labels)
                #print (top1)
                #print (np.argsort(res_logits[0]))

    else:
        with tf.Graph().as_default() as g:
            with tf.device('/cpu:0'):
                images_test, labels_test = msdnet.inputs(args, True)
                # Get images and labels for CIFAR-10.
                #eval_data = FLAGS.eval_data == 'test'
                #images, labels = cifar10.inputs(eval_data=eval_data)
                #gpu_options = tf.GPUOptions()
                #configs = tf.ConfigProto(gpu_options=gpu_options,log_device_placement=False)

                sess = tf.Session()
                tf.train.start_queue_runners(sess=sess)
                with sess.as_default():
                    model_exp = os.path.expanduser(FLAGS.checkpoint_dir)
                    if (os.path.isfile(model_exp)):
                        print('Model filename: %s' % model_exp)
                        with gfile.FastGFile(model_exp, 'rb') as f:
                            graph_def = tf.GraphDef()
                            graph_def.ParseFromString(f.read())
                            tf.import_graph_def(graph_def, name='')
                    else:
                        print('Model directory: %s' % model_exp)
                        meta_file, ckpt_file = get_model_filenames(model_exp)
                        print('Metagraph file: %s' % meta_file)
                        print('Checkpoint file: %s' % ckpt_file)
                        saver = tf.train.import_meta_graph(
                            os.path.join(model_exp, meta_file))
                        saver.restore(sess, os.path.join(model_exp, ckpt_file))
                    #print ('start')

                    #print (images_test)

                    num_examples = 10000
                    num_iter = int(math.ceil(num_examples / args.batch_size))
                    test_step = 0
                    N = 0.0
                    total_sample_count = num_iter * args.batch_size
                    total = 0.0
                    logits = g.get_tensor_by_name(tensor_name[19][0])
                    image_placeholder = g.get_tensor_by_name('model/input:0')
                    print('start')
                    #while test_step<num_iter:
                    images, labels = sess.run([images_test, labels_test])
                    print(images)
                    #print (images.shape)
                    for i in range(64):
                        cv2.imwrite(
                            './tmp/' + str(labels[i]) + '_' + str(i) + '.jpg',
                            images[i])
                    res_logits = sess.run(
                        logits, feed_dict={image_placeholder: images})
                    top1 = computeScore(res_logits, labels)
                    print(top1)
Exemplo n.º 60
0
def data_loader(subject):
	raw_X_test, raw_y_test = load_test(subject)
	raw_X_train, raw_train_activity_labels = load_train(subject)
	X_train, y_train = segment_signal(raw_X_train, raw_train_activity_labels)
	X_train = np.expand_dims(X_train, 1)

	X_test, y_test = segment_signal(raw_X_test, raw_y_test)
	X_test = np.expand_dims(X_test, 1)
	return (X_train, y_train, X_test, y_test)

'''	
	load source subject test data
'''
raw_X_test, raw_y_test = load_test(SOURCE_SUBJECT)
X_test, y_test = segment_signal(raw_X_test, raw_y_test)
X_test = np.expand_dims(X_test, 1)
D_test = np.full((y_test.shape[0], 2), [1, 0])

'''	
	load target subject test data
'''
raw_X_test_target, raw_y_test_target = load_test(TARGET_SUBJECT)
X_test_target, y_test_target = segment_signal(raw_X_test_target, raw_y_test_target)
X_test_target = np.expand_dims(X_test_target, 1)
D_test_target = np.full((y_test_target.shape[0], 2), [0, 1])

'''
	Load source subject train data
'''
source_path = data_folder+"subject"+str(SOURCE_SUBJECT)
target_path = data_folder+"subject"+str(TARGET_SUBJECT)