Ejemplo n.º 1
0
 def __ilshift__(self, A): 
     """ A can be two things :
       * G <<= any_GF_Initializers will init the GFBloc with the initializer
       * G <<= g2 where g2 is a GFBloc will copy g2 into self
     """
     if isinstance(A, self.__class__) : 
         if self is not A : self.copyFrom(A) # otherwise it is useless AND does not work !!
     elif isinstance(A, lazy_expressions.lazy_expr) : # A is a lazy_expression made of GF, scalars, descriptors 
         A2= Descriptors.convert_scalar_to_Const(A)
         def e_t (x) : 
             if not isinstance(x, Descriptors.Base) : return x
             tmp = self.copy()
             x(tmp)
             return tmp
         #e_t2 = self.__lazy_expr_eval_context__()
         self.copyFrom ( lazy_expressions.eval_lazy_expr(e_t, A2) )
     elif isinstance(A, lazy_expressions.lazy_expr_terminal) : #e.g. g<<= SemiCircular (...) 
         self <<= lazy_expressions.lazy_expr(A)
     elif Descriptors.is_scalar(A) : #in the case it is a scalar .... 
         self <<= lazy_expressions.lazy_expr(A)
     elif isinstance(A, GF_Initializers.Base) : # backwards compatibility, deprecated
         A(self)
     else :
         raise RuntimeError, " GF Block : <<= operator : RHS not understood"
     return self
Ejemplo n.º 2
0
    def __ilshift__(self, A):
        """ A can be two things :
          * G <<= any_GF_Initializers will init the GFBloc with the initializer
          * G <<= g2 where g2 is a GFBloc will copy g2 into self
        """
        if isinstance(A, self.__class__):
            if self is not A:
                self.copyFrom(
                    A)  # otherwise it is useless AND does not work !!
        elif isinstance(
                A, lazy_expressions.lazy_expr
        ):  # A is a lazy_expression made of GF, scalars, descriptors
            A2 = Descriptors.convert_scalar_to_Const(A)

            def e_t(x):
                if not isinstance(x, Descriptors.Base): return x
                tmp = self.copy()
                x(tmp)
                return tmp

            #e_t2 = self.__lazy_expr_eval_context__()
            self.copyFrom(lazy_expressions.eval_lazy_expr(e_t, A2))
        elif isinstance(A, lazy_expressions.lazy_expr_terminal
                        ):  #e.g. g<<= SemiCircular (...)
            self <<= lazy_expressions.lazy_expr(A)
        elif Descriptors.is_scalar(A):  #in the case it is a scalar ....
            self <<= lazy_expressions.lazy_expr(A)
        elif isinstance(
                A,
                GF_Initializers.Base):  # backwards compatibility, deprecated
            A(self)
        else:
            raise RuntimeError, " GF Block : <<= operator : RHS not understood"
        return self
def MolecularDesc(dtable, pr_desc, clean=0):

    # useless to compute descriptor for four bacteria, same compound
    pfilout = pr_desc + "desc_compound.csv"

    if path.exists(pfilout) and path.getsize(pfilout) > 50:
        return pfilout
    if clean == 1:
        remove(pfilout)

    # log file
    p_log = pr_desc + "log.txt"
    logfile = open(p_log, "w")

    l_chemID = dtable[dtable.keys()[0]].keys()

    for chemID in l_chemID:
        desc = Descriptors.Descriptors(dtable[dtable.keys()[0]][chemID],
                                       writecheck=0,
                                       logfile=logfile)
        desc.get_descriptorOD1D()
        desc.get_descriptor2D()

        if desc.log == "ERROR":
            continue

        desc.writeTablesDesc(pfilout)
    return pfilout
Ejemplo n.º 4
0
 def __idiv__(self,arg):
     d,t = self._data.array, self._tail
     if hasattr(arg,"_data") :
         d2 = arg._data.array
         assert d.shape == d2.shape ," Green function block multiplication with arrays of different size !"
         for om in range (d.shape[-1]) : 
             d[:,:,om ] = numpy.dot(d[:,:,om], numpy.linalg.inv(d2[:,:,om]))
         t /= arg._tail
     elif Descriptors.is_scalar(arg): # a scalar
         d[:,:,:] /= arg
         for n in range(t.OrderMax):
             t[n].array[:,:] /= arg
         #t = self._tail; t /= arg
     else:
       raise NotImplementedError
     return self
Ejemplo n.º 5
0
 def __idiv__(self, arg):
     d, t = self._data.array, self._tail
     if hasattr(arg, "_data"):
         d2 = arg._data.array
         assert d.shape == d2.shape, " Green function block multiplication with arrays of different size !"
         for om in range(d.shape[-1]):
             d[:, :, om] = numpy.dot(d[:, :, om],
                                     numpy.linalg.inv(d2[:, :, om]))
         t /= arg._tail
     elif Descriptors.is_scalar(arg):  # a scalar
         d[:, :, :] /= arg
         for n in range(t.OrderMax):
             t[n].array[:, :] /= arg
         #t = self._tail; t /= arg
     else:
         raise NotImplementedError
     return self
Ejemplo n.º 6
0
 def __isub__(self,arg):
     d,t = self._data.array, self._tail
     if hasattr(arg,"_data") : # a GF
         d[:,:,:] -= arg._data.array
         t -= arg._tail
     elif isinstance(arg,numpy.ndarray): # an array considered as a constant function 
         for om in range (d.shape[-1]) : d[:,:,om ] -= arg
         t[0].array[:,:] -= arg
     elif isinstance(arg,list): # a list
         arg = numpy.array(arg)
         for om in range (d.shape[-1]) : d[:,:,om ] -= arg
         t[0].array[:,:] -= arg
     elif Descriptors.is_scalar(arg): # just a scalar
         arg = arg*numpy.identity(self.N1)
         for om in range (d.shape[-1]) : d[:,:,om ] -= arg
         t[0].array[:,:] -= arg
     else:
       raise NotImplementedError
     return self
Ejemplo n.º 7
0
 def __imul__(self,arg):
     """ If arg is a scalar, simple scalar multiplication
         If arg is a GF (any object with _data and _tail as in GF), they it is a matrix multiplication, slice by slice
     """
     d,t = self._data.array, self._tail
     if hasattr(arg,"_data") : 
         d2 = arg._data.array
         assert d.shape == d2.shape ," Green function block multiplication with arrays of different size !"
         for om in range (d.shape[-1]) : 
             d[:,:,om ] = numpy.dot(d[:,:,om], d2[:,:,om])
         t *= arg._tail
     elif Descriptors.is_scalar(arg): # a scalar
         d[:,:,:] *= arg
         # to be simplified when the *= scalar for tail will be added !
         for n in range(t.OrderMin,t.OrderMax+1):
             t[n].array[:,:] *= arg
     else:
       print type(arg)
       raise NotImplementedError
     return self
Ejemplo n.º 8
0
 def __imul__(self, arg):
     """ If arg is a scalar, simple scalar multiplication
         If arg is a GF (any object with _data and _tail as in GF), they it is a matrix multiplication, slice by slice
     """
     d, t = self._data.array, self._tail
     if hasattr(arg, "_data"):
         d2 = arg._data.array
         assert d.shape == d2.shape, " Green function block multiplication with arrays of different size !"
         for om in range(d.shape[-1]):
             d[:, :, om] = numpy.dot(d[:, :, om], d2[:, :, om])
         t *= arg._tail
     elif Descriptors.is_scalar(arg):  # a scalar
         d[:, :, :] *= arg
         # to be simplified when the *= scalar for tail will be added !
         for n in range(t.OrderMin, t.OrderMax + 1):
             t[n].array[:, :] *= arg
     else:
         print type(arg)
         raise NotImplementedError
     return self
Ejemplo n.º 9
0
 def __isub__(self, arg):
     d, t = self._data.array, self._tail
     if hasattr(arg, "_data"):  # a GF
         d[:, :, :] -= arg._data.array
         t -= arg._tail
     elif isinstance(
             arg,
             numpy.ndarray):  # an array considered as a constant function
         for om in range(d.shape[-1]):
             d[:, :, om] -= arg
         t[0].array[:, :] -= arg
     elif isinstance(arg, list):  # a list
         arg = numpy.array(arg)
         for om in range(d.shape[-1]):
             d[:, :, om] -= arg
         t[0].array[:, :] -= arg
     elif Descriptors.is_scalar(arg):  # just a scalar
         arg = arg * numpy.identity(self.N1)
         for om in range(d.shape[-1]):
             d[:, :, om] -= arg
         t[0].array[:, :] -= arg
     else:
         raise NotImplementedError
     return self
Ejemplo n.º 10
0
def computeDenseTraj(path, show_track, quality, min_distance, init_gap):
    # show_track = False
    # quality = 0.001             # experimental threshold value
    # min_distance = 5            # grid spacing of W = 5 pixels
    frame_count = 0

    hogInfo = desc.DescInfo(8, False, desc.patch_size, desc.patch_size,
                            desc.nxy_cell, desc.nt_cell)
    hofInfo = desc.DescInfo(9, True, desc.patch_size, desc.patch_size,
                            desc.nxy_cell, desc.nt_cell)
    mbhInfo = desc.DescInfo(8, False, desc.patch_size, desc.patch_size,
                            desc.nxy_cell, desc.nt_cell)

    tracks = []  # list of sampled feature points at each scale
    cap = cv2.VideoCapture(path)
    if cap.isOpened():
        vid_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        vid_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        vid_length = cap.get(cv2.CAP_PROP_FRAME_COUNT)
        fps = cap.get(cv2.CAP_PROP_FPS)
        print('width: %f \n height: %f \n fps: %f \n frames: %f \n' %
              (vid_width, vid_height, fps, vid_length))
        scales, sizes = desc.init_pry(cap)
    # hog = np.zeros([hogInfo.dim * desc.nt_cell, scales.shape[0]], dtype=np.float32)
    else:
        sys.exit("Unable to open the video: " + path)
    # file to write feature info into
    txtFile = open(path[:-4] + "_features.txt", "w")
    # alocate memory for features
    features = np.empty([0, 436])
    # writer = csv.writer(csvFile)

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            print("could not get decode video")
            break
        grey = cv2.cvtColor(frame, cv2.cv2.COLOR_BGR2GRAY)
        scaled_frames = desc.build_pry(
            grey, sizes)  # gets frame at multiple spacial scales
        if frame_count == 0:  # start frame
            # create tracker objects
            for iScale in range(len(scaled_frames)):
                points = []
                points = desc.dense_sample(scaled_frames[iScale], points,
                                           min_distance, quality)
                # save points in tracker object for each point on given iScale
                tracks.append(
                    Trackerinit(points, desc.track_len, hogInfo, hofInfo,
                                mbhInfo))
                del points
            prev_grey = grey
            prev_scaled_frames = scaled_frames
            frame_count = frame_count + 1
            continue

        # get optical flow for all scales. Function accounts for this
        flow = desc.calcOptFlow(grey, prev_grey, scales, sizes, 5)
        for iScale in range(len(scaled_frames)):
            width = scaled_frames[iScale].shape[1]
            height = scaled_frames[iScale].shape[0]

            # compute integral histograms for descriptors for all scales
            hog_int_hist = desc.hogComp(prev_scaled_frames[iScale], hogInfo)
            hof_int_hist = desc.hofComp(flow[iScale], hofInfo)
            mbhx_int_hist, mbhy_int_hist = desc.mbhComp(flow[iScale], mbhInfo)

            # track feature points at each scale
            i = 0
            points_ = []
            # to_delete = []

            for iTrack in tracks[iScale]:
                index = iTrack.index
                prev_point = iTrack.points[index]
                x = int(min(max(np.round(prev_point[0]), 0), width - 1))
                y = int(min(max(np.round(prev_point[1]), 0), height - 1))
                point = np.array([
                    prev_point[0] + flow[iScale][y][x][0],
                    prev_point[1] + flow[iScale][y][x][1]
                ])

                # store point for dense sampling
                points_.append(point)
                # if point out of bounds, store index to remove
                if point[0] <= 0 or point[0] >= width or point[
                        1] <= 0 or point[1] >= height:
                    tracks[iScale] = np.delete(tracks[iScale], i)
                    continue
                # get descriptors for each feature point from integral histogram
                rect = desc.RectInfo(prev_point, height, width, hogInfo)
                start_index = index * hogInfo.dim
                end_index = start_index + hogInfo.dim
                hof_start_index = index * hofInfo.dim
                iTrack.hog[start_index:end_index] = desc.getDesc(
                    hog_int_hist, rect, hogInfo, height, width)
                # hof has different dimension to rest
                iTrack.hof[hof_start_index:hof_start_index +
                           hofInfo.dim] = desc.getDesc(hof_int_hist, rect,
                                                       hofInfo, height, width)
                iTrack.mbhX[start_index:end_index] = desc.getDesc(
                    mbhx_int_hist, rect, mbhInfo, height, width)
                iTrack.mbhY[start_index:end_index] = desc.getDesc(
                    mbhy_int_hist, rect, mbhInfo, height, width)
                iTrack.addPoint(point)

                # draw trajectories
                if show_track and iScale == 0:
                    desc.drawTrack(iTrack.points, iTrack.index, scales[iScale],
                                   frame)

                # Check if trajectory surpasses max track length
                if iTrack.index >= desc.track_len:
                    # get points. Have one point more than max length for calculation of trajectory shape.
                    # trajectories = iTrack.points[: desc.track_len + 1] * scales[iScale]

                    isValid, Traj, mean_x, mean_y, var_x, var_y, traj_length = \
                     desc.tShapeComp(iTrack.points * scales[iScale], desc.track_len)
                    if isValid:
                        f = np.empty([436], dtype=np.float32)
                        # writer.writerow(['frame', 'x mean', 'y mean', 'x var', 'y var', 'length', 'scale'])
                        txtFile.write("%d\t%f\t%f\t%f\t%f\t%f\t%f\t" %
                                      (frame_count, mean_x, mean_y, var_x,
                                       var_y, traj_length, scales[iScale]))
                        f[:7] = np.array([
                            frame_count, mean_x, mean_y, var_x, var_y,
                            traj_length, scales[iScale]
                        ])

                        # for spatio temporal data
                        # spatio width
                        txtFile.write("%f\t" %
                                      min(max(mean_x / vid_width, 0.0), 0.999))
                        # spatio_height
                        txtFile.write(
                            "%f\t" % min(max(mean_y / vid_height, 0.0), 0.999))
                        # spatio length
                        txtFile.write("%f\t" % min(
                            max((frame_count - desc.track_len / 2.0) /
                                vid_length, 0.0), 0.999))
                        # for spatio temporal data
                        # spatio width
                        f[7] = min(max(mean_x / vid_width, 0.0), 0.999)
                        # spatio_height
                        f[8] = min(max(mean_y / vid_height, 0.0), 0.999)
                        # spatio length
                        f[9] = min(
                            max((frame_count - desc.track_len / 2.0) /
                                vid_length, 0.0), 0.999)
                        count = 0
                        for t in Traj:
                            txtFile.write("%f\t%f\t" % (t[0], t[1]))
                            f[10 + count] = t[0]
                            f[10 + count + 1] = t[1]
                            count = count + 2

                        # store descriptors
                        # writer.writerow(["HOG"])
                        desc.printDesc_txt(txtFile, hogInfo, iTrack.hog,
                                           desc.track_len)
                        # writer.writerow(["HOF"])
                        desc.printDesc_txt(txtFile, hofInfo, iTrack.hof,
                                           desc.track_len)
                        # writer.writerow(["MBHX"])
                        desc.printDesc_txt(txtFile, mbhInfo, iTrack.mbhX,
                                           desc.track_len)
                        # writer.writerow(["MBHY"])
                        desc.printDesc_txt(txtFile, mbhInfo, iTrack.mbhY,
                                           desc.track_len)

                        # writer.writerow(["HOG"])
                        f[40:136] = desc.getFeature(hogInfo, iTrack.hog,
                                                    desc.track_len)
                        # writer.writerow(["HOF"])
                        f[136:244] = desc.getFeature(hofInfo, iTrack.hof,
                                                     desc.track_len)
                        # writer.writerow(["MBHX"])
                        f[244:340] = desc.getFeature(mbhInfo, iTrack.mbhX,
                                                     desc.track_len)
                        # writer.writerow(["MBHY"])
                        f[340:436] = desc.getFeature(mbhInfo, iTrack.mbhY,
                                                     desc.track_len)
                        # writer.writerow([])
                        features = np.vstack((features, f))
                        del f
                        # writer.writerow([])
                        txtFile.write("\n")

                    tracks[iScale] = np.delete(tracks[iScale], i)
                    # to_delete.append(i)
                    continue
                i = i + 1
            # remove unwanted points
            # tracks[iScale] = np.delete(tracks[iScale], to_delete)
            # free up memory
            del hog_int_hist
            del hof_int_hist
            del mbhx_int_hist
            del mbhy_int_hist

            # densely resample points every init_gap frames

            if frame_count % init_gap != 0:
                continue

            points_ = desc.dense_sample(scaled_frames[iScale], points_,
                                        min_distance, quality)
            # add as points to track
            tracks[iScale] = np.append(tracks[iScale],
                                       Trackerinit(points_, desc.track_len,
                                                   hogInfo, hofInfo, mbhInfo),
                                       axis=0)
        # store previous frame
        prev_grey = grey
        prev_scaled_frames = scaled_frames
        frame_count = frame_count + 1

        if show_track:
            cv2.imshow('Dense Track', frame)
            if cv2.waitKey(0) & 0xFF == ord('q'):
                break
    if show_track:
        cv2.destroyAllWindows()
    cap.release()
    return features
 def parse_query(self, xml_dom_query_node, caltype, inputf ):
     '''
     Parses a query from XML Calibration File and returns a Calibration
     request event with the corresponding information. Unfinished: priority 
     parsing value
     
     @param xml_dom_query_node: a query XML Dom Node; ie <DOM Element: query at 0x921392c>
     @type xml_dom_query_node: Dom Element
     
     @param caltype: Calibration, ie bias, flat, dark, etc.
     @type caltype: string
     
     @param input: an input fits URI
     @type input: string
     
     @return: Returns a Calibration Request Event.
     @rtype: CalibrationRequestEvent
     '''
     import Descriptors # bad to load on import, import mess
     calReqEvent = CalibrationRequest()
     calReqEvent.caltype = caltype
     query = xml_dom_query_node
     
     if not query.hasAttribute("id"):
         raise "Improperly formed. QUERY needs an id, for example 'bias'."
     
     tempcal = str(query.getAttribute("id"))
     
     if( tempcal != caltype ):
         raise "The id in the query does not match the caltype '"+tempcal+"' '"+str(caltype)+"'."
     
     if type( inputf ) == AstroData:
         ad = inputf
     elif type( inputf ) == str:
         ad = AstroData( inputf )
     else:
         raise RuntimeError("Bad Argument: Wrong Type, '%(val)s' '%(typ)s'." %{'val':str(inputf),'typ':str(type(inputf))})
     desc = Descriptors.get_calculator( ad )
     #===============================================================
     # IDENTIFIERS
     #===============================================================
     identifiers = query.getElementsByTagName( "identifier" )
     if len( identifiers ) > 0:
         identifiers = identifiers[0]
     else:
         raise "Improperly formed. XML calibration has no identifiers."
     
     
     for child in identifiers.getElementsByTagName( "property" ):
         #creates dictionary object with multiple values    
         temp = self.parse_property(child, desc, ad)  
         #print "CDL112:", temp         
         calReqEvent.identifiers.update( temp ) 
     
     #===============================================================
     # CRITERIA
     #===============================================================
     criteria = query.getElementsByTagName( "criteria" )
     if len( criteria ) > 0:
         criteria = criteria[0]
     else:
         raise "Improperly formed. XML calibration has no criteria" 
     
     #print 'Locating a %s for %s.' %(str(caltype), str(ad.filename))
     #print 'Using the following criteria:'
     
     for child in criteria.getElementsByTagName( "property" ):
         crit = self.parse_property( child, desc, ad )
         calReqEvent.criteria.update( crit )      
         # print self.str_property( crit )
     
     #===============================================================
     # PRIORITIES
     #===============================================================
     priorities = query.getElementsByTagName( "priorities" )
     if len( priorities ) > 0:
         priorities = priorities[0]
     else:
         raise "Improperly formed. XML calibration has no priorities"
     
     for child in priorities.getElementsByTagName( "property" ):
         calReqEvent.priorities.update( self.parse_property(child, desc, ad) )
     
     
     calReqEvent.filename = inputf
     return calReqEvent
Ejemplo n.º 12
0

from os import listdir
from os.path import isfile, join
'''
cloudy

'''
count = 0
mypath = 'D:\dev\weather_detectcnn\Image\cl500'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]

dim = (400, 300)
im = cv2.imread(join(mypath, onlyfiles[0]))

matrix = des.autoCorrelogram(im)
a = np.asarray(matrix)
a = np.reshape(a, (-1, 64))
df = pd.DataFrame(a)
training_set = []
training_labels = []

k = 0
training_labels = []
training_labels.append(0)
exceptons = []
e = 0
for n in range(1, len(onlyfiles)):
    im = cv2.imread(join(mypath, onlyfiles[n]))
    matrix = des.autoCorrelogram(im)
    a = np.asarray(matrix)