def negate_prop(prop):
    """ Negates the rel part of a proposition, for e.g. neg of [1,0,0],
    i.e. to the right of, is [-1,0,0] (to the left of). """
    opposite_rel = utilities.convert(utilities.relfn(prop))
    # Change only relation in prop, don't touch subj or obj.
    prop[0] = list(opposite_rel)
    return prop
예제 #2
0
def read_audio(audio_path):
    if not exists(audio_path):
        return -1
    fs, data = wavfile.read(audio_path)  # load the data
    # plt.plot(data, 'r')
    # plt.show()
    dims = data.shape
    if len(dims) > 1 or fs != 16000:
        _, data = convert(audio_path)
    return data
def switch(newprop, fixprops, mod):
    """ If rel is converse of the one in mod, this func tries to
    swap subj and obj items, otherwise (or if swap yields model
    conflicting with item in fixprops), it tries to move subj, and if
    that fails, it tries to move obj. Otherwise returns None. """
    rel = utilities.relfn(newprop)
    obj = utilities.objfn(newprop)
    subj = utilities.subjfn(newprop)
    subj_coord = utilities.finds(subj, mod)
    obj_coord = utilities.finds(obj, mod)
    # Semantics between subj and obj in mod is opposite of current rel:
        # we need to swap subj and obj in mod.
    newmod = swap(subj, subj_coord, obj, obj_coord, mod)
    newmod = spaces_to_none(newmod)
    newmod = spatial_array.copy_shrink_array(newmod)
    # Check if the relation between subject and object is opposite to
    # rel in newprop, and if none of the props in fixprop are false in
    # the model produced by swapping subj  and obj. If so, return newmod.
    # Note: conflict props returns [] if there are no conflicting props
    if find_rel_prop(subj_coord, obj_coord) == utilities.convert(rel) and \
       conflict_props(fixprops, newmod) == [] and newmod != {}:
        return newmod
    # After swapping the subject and object in the conclusion premise, there
    # are some conflicting props (premises) . In each of these premises, move
    # subj from subj_coord to new coordinates. If there is no conflict, return
    # the resulting model. Otherwise, try to move object to new position.
    newmod = move(subj, subj_coord, rel, obj_coord, mod)
    newmod = spaces_to_none(newmod)
    newmod = spatial_array.copy_shrink_array(newmod)
    if conflict_props(fixprops, newmod) == [] and newmod != {}:
        return newmod
    # Move object from obj_coord to new coordinates. If there is no
    # conflict, return the resulting model. Otherwise, return None.
    obj_rel = utilities.convert(rel)
    newmod = move(obj, obj_coord, obj_rel, subj_coord, mod)
    newmod = spaces_to_none(newmod)
    newmod = spatial_array.copy_shrink_array(newmod)
    if conflict_props(fixprops, newmod) == [] and newmod != {}:
        return newmod
    return None
예제 #4
0
def predict_if_liked(artwork_location, theta):
    '''Given theta values from a model, predicts if an artwork will be liked. Returns 1 if so.
    '''
    # Convert the artwork to an unrolled vector
    artwork = np.asarray(ut.convert(artwork_location))

    # Add a 1 to the beginning of the vector
    artwork = np.insert(artwork, 0, 1)

    # Get the probability that the user will like that artwork, given the latest model
    probability = 1 / (1 + np.exp(-np.dot(artwork, theta)))

    if probability >= .5:
        return 1
    else:
        return 0
예제 #5
0
    def by_chunks(variant_list, chunksize, step, file_name, collection_name,
                  db_name):
        """
        Export data to MongoDB by chunks, iteratively. It retrives annovar data from a csv file and myvariant data from
        their web-server, joins it, and exports it.
        This method is well-fitted for large files. Only the 1000 documents are held in memory and processed at a time,
        instead of attempting to parse and process an entire csv file at once.
        As soon as the method is run, the data will automatically be stored to it. Database and collection name should
        be specified, and there must be a running MongoDB connection. The script will set up a client to communicate
        between python (through pymongo) and the the database.

        :param variant_list: list of HGVS variant ID's. Usually retrived beforehand using the method get_variants_from_vcf
        from the class VariantParsing.
        :param chunksize: set at 1000 usually for simplicity, since the queries to myvariant.info also have a cap of
        1000 variants per query
        :param step: just a counter object
        :param file_name: name of the collection to which store the data
        :param collection_name:
        :param db_name: name of the database to which store the collection
        :return: nothing, simply exports data to MongoDB
        """

        while step * chunksize < len(variant_list):

            chunk_ids = variant_list[chunksize * step:chunksize * (step + 1)]
            df = csv_to_df.parse_to_df(
                csv_to_df.open_and_parse_chunks(file_name, chunksize, step))
            from_annovar = get_list_from_annovar_csv(df, chunk_ids)

            open_file = myvariant_parsing_utils.VariantParsing()
            from_myvariant = open_file.get_dict_myvariant(chunk_ids)

            final_joint(from_annovar, from_myvariant)
            joined_list = from_annovar

            # From unicode to string
            joined_list = convert(joined_list)

            print 'Parsing to MongoDB ...'
            export(joined_list, collection_name, db_name)
            step = step + 1
            print 'Step: {} of {}'.format(step,
                                          (len(variant_list) / chunksize) + 1)

        return 'Finished!'
def zmq_producer(context,instrument):
    socket = context.socket(zmq.PUB)
    socket.connect('tcp://127.0.0.1:5000')

    while True:
		now_time=int(time.time())
		now=datetime.datetime.now(pytz.timezone(TIMEZONE))
		now_day=now.day
		now_month=now.month
		global start_time
		global start_day
		global count
		global f
		if ((now_time-start_time) > THRESHOLD_TIME) or (now_day!=start_day):
			if now_day!=start_day:
				count=find_count(now_day, now_month)-1 #Decrementing by one since we always increase by 1 o/w
				delete_older_folders(now) #Deleting 2 days older folder
			count=count+1
			start_time=now_time
			start_day=now_day
			start_month=now_month
			f.close()
			f=open(DATA_BASE_PATH+str(start_day)+"_"+str(start_month)+"/"+str(count)+".csv","wa")
			f.write(HEADER)

		else:
			try:
				readings_array = instrument.read_registers(EM6400_BASE_REGISTER,EM6400_NUMBER_OF_REGISTERS)
				row=str(now_time)+","
				for i in range(0,len(readings_array)-1,2):
					a=(readings_array[i+1]<<16) +readings_array[i]
					row=row+str(convert(a))+","
				socket.send(row)
				row=row[:-1]+"\n"
				f.write(row)
				#x = time.time() * 1000+GMT_TIME_DIFFERENCE_MILLISECONDS
				#y=float(row.split(",")[2])
				#socket.send(json.dumps(dict(x=x, y=y)))
				gevent.sleep(0.5)		
			except Exception as e:
				global log_file
				log_file=open(DATA_BASE_PATH+"log.txt","w")
				log_file.write(str(time.time())+" "+e.__str__())
				log_file.close()
				instrument = minimalmodbus.Instrument(METER_PORT, METER_ID)		
예제 #7
0
    def create_groups(self, rel_to_move, intermediate, located, mod):
        """ Takes the cooedinates of the located and intermediate object,
        and the rel to move from the int to the loc onject, and create groups
        of tokens in a different direction to rel_to_move."""
        # Set int new coords and loc new coords to the current loc coords and
        # int coords respectively.
        int_object = intermediate[0]
        # Create a list of intermediate objects for the loop through the
        # intensional representations.
        int_obj_list = []
        int_obj_list.append(int_object)
        int_new_coords = located[1]
        loc_object = located[0]
        loc_obj_list = []
        loc_obj_list.append(loc_object)
        loc_new_coords = intermediate[1]
        # Create a dict of tokens to move. List is of form: token: newcoords
        move_tokens_coordinates = {}
        move_tokens_coordinates[int_object] = int_new_coords
        move_tokens_coordinates[loc_object] = loc_new_coords

        # Search for intermediate object in the subject and object of the
        # internsional representations of all the premises, all tokens which
        # are reached in a different direction than the direction between int,
        # ref and loc objects are to be moved (they are indirectly grouped)

        # rel_to_move is from loc object to intermediate object, as we are
        # moving from int to loc object, this has to be reversed.
        opp_rel = utilities.convert(rel_to_move)
        move_tokens_coordinates = self.loop_throught_intnesionalrepr(
            int_obj_list, opp_rel, move_tokens_coordinates, mod)

        # Search for located object in subj and obj of all the intensional
        # representations of all the premises, all tokens which are reached
        # in a different direction than the direction between int, ref and
        # loc objects are to be moved (they are indirectly grouped)

        # This time, rel_to_move is the correct direction to move: no need
        # to reverse it.
        move_tokens_coordinates = self.loop_throught_intnesionalrepr(
            loc_obj_list, rel_to_move, move_tokens_coordinates, mod)
        # Return the dictionary of the new locations where each token has to
        # be moved.
        return move_tokens_coordinates
    def by_chunks(variant_list, chunksize, step, file_name, collection_name, db_name):

        """
        Export data to MongoDB by chunks, iteratively. It retrives annovar data from a csv file and myvariant data from
        their web-server, joins it, and exports it.
        This method is well-fitted for large files. Only the 1000 documents are held in memory and processed at a time,
        instead of attempting to parse and process an entire csv file at once.
        As soon as the method is run, the data will automatically be stored to it. Database and collection name should
        be specified, and there must be a running MongoDB connection. The script will set up a client to communicate
        between python (through pymongo) and the the database.

        :param variant_list: list of HGVS variant ID's. Usually retrived beforehand using the method get_variants_from_vcf
        from the class VariantParsing.
        :param chunksize: set at 1000 usually for simplicity, since the queries to myvariant.info also have a cap of
        1000 variants per query
        :param step: just a counter object
        :param file_name: name of the collection to which store the data
        :param collection_name:
        :param db_name: name of the database to which store the collection
        :return: nothing, simply exports data to MongoDB
        """

        while step*chunksize < len(variant_list):

            chunk_ids = variant_list[chunksize*step:chunksize*(step+1)]
            df = csv_to_df.parse_to_df(csv_to_df.open_and_parse_chunks(file_name, chunksize, step))
            from_annovar = get_list_from_annovar_csv(df, chunk_ids)

            open_file = myvariant_parsing_utils.VariantParsing()
            from_myvariant = open_file.get_dict_myvariant(chunk_ids)

            final_joint(from_annovar, from_myvariant)
            joined_list = from_annovar

            # From unicode to string
            joined_list = convert(joined_list)

            print 'Parsing to MongoDB ...'
            export(joined_list, collection_name, db_name)
            step = step + 1
            print 'Step: {} of {}'.format(step, (len(variant_list)/chunksize)+1)

        return 'Finished!'
def makeContours(xcoordinates,ycoordinates,width,height,binsize): 

    getLngLat = utilities.makeGetLngLat(metadata)
    getMeters = utilities.makeGetMeters(metadata)

    # make the 2d histogram
    clusterdata, xedges, yedges = numpy.histogram2d(xcoordinates, ycoordinates, bins=(int(width/binsize), int(height/binsize)), range=((0, width), (0, height)))
    if len(xcoordinates) == 0:
        clusterdata = numpy.zeros((int(width/binsize), int(height/binsize)), dtype=numpy.dtype(float))

    # make contours for the three levels; the contour polygons are expressed in pixel-index coordinates (not lng/lat or meters)
    contoursMin = utilities.contours(clusterdata, xedges, yedges, 0.5, interpolate=True, smooth=True)
    cutLevel50 = utilities.cutLevel(clusterdata, 50.0)
    contours50 = utilities.contours(clusterdata, xedges, yedges, cutLevel50, interpolate=True, smooth=True)
    cutLevel95 = utilities.cutLevel(clusterdata, 95.0)
    contours95 = utilities.contours(clusterdata, xedges, yedges, cutLevel95, interpolate=True, smooth=True)

    # construct output data that includes the polygons in lng,lat coordinates, circumferences in meters, and areas in meters^2
    clusterData = {"contoursMin": [], "contours50": [], "contours95": [],
                        "numberOfLabeledPixels": len(xcoordinates), "cutLevel50": cutLevel50, "cutLevel95": cutLevel95}

    for polygon in contoursMin:

        lnglatPolygon = utilities.convert(polygon, getLngLat)
        metersPolygon = utilities.convert(lnglatPolygon, getMeters)

        data = {"rowcolpolygon": polygon, "lnglatpolygon": lnglatPolygon, "areaInPixels": numpy.abs(utilities.area(polygon)), "circumferenceInMeters": numpy.abs(utilities.circumference(metersPolygon)), "areaInMeters": numpy.abs(utilities.area(metersPolygon)), "centroidInLngLat": utilities.centroid(lnglatPolygon)}
        clusterData["contoursMin"].append(data)

    for polygon in contours50:

        lnglatPolygon = utilities.convert(polygon, getLngLat)
        metersPolygon = utilities.convert(lnglatPolygon, getMeters)

        data = {"rowcolpolygon": polygon, "lnglatpolygon": lnglatPolygon, "areaInPixels": numpy.abs(utilities.area(polygon)), "circumferenceInMeters": numpy.abs(utilities.circumference(metersPolygon)), "areaInMeters": numpy.abs(utilities.area(metersPolygon)), "centroidInLngLat": utilities.centroid(lnglatPolygon)}
        clusterData["contours50"].append(data)

    for polygon in contours95:

        lnglatPolygon = utilities.convert(polygon, getLngLat)
        metersPolygon = utilities.convert(lnglatPolygon, getMeters)

        data = {"rowcolpolygon": polygon, "lnglatpolygon": lnglatPolygon, "areaInPixels": numpy.abs(utilities.area(polygon)), "circumferenceInMeters": numpy.abs(utilities.circumference(metersPolygon)), "areaInMeters": numpy.abs(utilities.area(metersPolygon)), "centroidInLngLat": utilities.centroid(lnglatPolygon)}
        clusterData["contours95"].append(data)

    return clusterData
예제 #10
0
    def full_file(variant_list, file_name):
        """
        Retrives annovar data from a csv file and myvariant data from their web-server and return a list of documents
        containing info from both services. All done at once.
        This method is well-fitted for smaller files.

        :param variant_list: list of HGVS variant ID's. Usually retrived beforehand using the method get_variants_from_vcf
        from the class VariantParsing.
        :param file_name: name of csv_file to be used (coming from annovar's output)
        :return:
        """

        df = csv_to_df.parse_to_df(csv_to_df.open_and_parse(file_name))
        from_annovar = get_list_from_annovar_csv(df, variant_list)

        open_file = myvariant_parsing_utils.VariantParsing()
        from_myvariant = open_file.get_dict_myvariant(variant_list)

        final_joint(from_annovar, from_myvariant)
        joined_list = from_annovar
        joined_list = convert(joined_list)

        print 'Finished!'
        return joined_list
예제 #11
0
    def full_file(variant_list, file_name):
        """
        Retrives annovar data from a csv file and myvariant data from their web-server and return a list of documents
        containing info from both services. All done at once.
        This method is well-fitted for smaller files.

        :param variant_list: list of HGVS variant ID's. Usually retrived beforehand using the method get_variants_from_vcf
        from the class VariantParsing.
        :param file_name: name of csv_file to be used (coming from annovar's output)
        :return:
        """

        df = csv_to_df.parse_to_df(csv_to_df.open_and_parse(file_name))
        from_annovar = get_list_from_annovar_csv(df, variant_list)

        open_file = myvariant_parsing_utils.VariantParsing()
        from_myvariant = open_file.get_dict_myvariant(variant_list)

        final_joint(from_annovar, from_myvariant)
        joined_list = from_annovar
        joined_list = convert(joined_list)

        print 'Finished!'
        return joined_list
예제 #12
0
def main(metadata,mask,imgArray):


    imgMean = numpy.mean(imgArray,axis=0)                           # find the mean of each band

    imgCov = numpy.cov(imgArray.T - imgMean.reshape(-1,1))          # Find covariance
                                                    
    imgPval, imgPvec = numpy.linalg.eig(imgCov)   # The eigenvalues and eigenvectors of the covariance matrix
    indexList = numpy.argsort(-imgPval)           # Sort principal components
    imgPval = imgPval[indexList]
    imgPvec = imgPvec[:,indexList]        

    imgProj = numpy.dot(imgPvec.T,imgArray.T - imgMean.reshape(-1,1)) # Projection of original data on PCs
    imgMdist = numpy.sum(numpy.square(imgProj.T) / imgPval,axis=1)    # Mahalanobis distance

    # sort distances and select top k1 std devs above
    # take k1 = 6
    # Assume distances have log-normal distribution,
    # so find k1-sigma extreme point in log distribution, and transform back

    k1 = 6
    lmean = numpy.mean(numpy.log(imgMdist))
    lsd = numpy.std(numpy.log(imgMdist))
    npix = numpy.size(imgMdist)
    dtol = numpy.exp(lmean + k1*lsd)
    imgFlag = (imgMdist > dtol)
    nsubpix = numpy.sum(imgFlag)

    raredict = {}
    if nsubpix > 2:
       
        # Similarity matrix (cosine)
        imgNorm = numpy.sqrt(numpy.sum(imgProj[:,imgFlag]**2,axis=0))
        imgDot = numpy.dot(imgProj[:,imgFlag].T,imgProj[:,imgFlag])
        simmat = imgDot / numpy.outer(imgNorm,imgNorm)  
        
        # Proximity matrix (taxi-cab metric)
        ny,nx = numpy.mgrid[0:mask.shape[0],0:mask.shape[1]-2]
        ny = numpy.reshape(ny,(-1,),'F')
        nx = numpy.reshape(nx,(-1,),'F')
        imgRna = ny[imgFlag]
        imgCna = nx[imgFlag]
        
        proxmat = numpy.abs(imgRna - imgRna.reshape(-1,1)) + numpy.abs(imgCna - imgCna.reshape(-1,1))

        # Thresholds
        k2 = .95
        k3 = 3
        k4 = 5

        spfmat = numpy.logical_and(numpy.abs(simmat) > k2, proxmat < k3)

        imgRct = numpy.sum(spfmat,axis=0)
        imgCct = numpy.sum(spfmat,axis=1)
                
        imgRall = imgRna[imgRct >= k4]
        imgCall = imgCna[imgCct >= k4]

        if (imgCall.size > 3):
            img3Find = mask.shape[0]*imgCall + imgRall
            imgMdistRpc = imgMdist[img3Find]

            # Get the original spectra of the rare pixels
            imageRPC = imgArray[img3Find]

            proxmat2 = proxmat[imgRct>=k4,:]
            proxmat2 = proxmat2[:,imgCct>=k4]

            proxmat2[proxmat2 != 1] = 0
            sprox = numpy.sum(proxmat2==1)
            if (sprox > 0):
                clout = clump(proxmat2)
                plmap = numpy.hstack((numpy.reshape(numpy.array(imgRall),(-1,1)),numpy.reshape(numpy.array(imgCall),(-1,1)),imgMdistRpc.reshape(-1,1),numpy.array(clout[1].reshape(-1,1),dtype=int),imageRPC))
                plmap = plmap[numpy.argsort(plmap[:,3]),:]
                plmap2 = numpy.hstack((plmap,numpy.zeros((plmap.shape[0],2))))
                for x in numpy.unique(plmap[:,3]):
                    plmean = numpy.mean(plmap[plmap[:,3]==x,2])
                    plsd   = numpy.var(plmap[plmap[:,3]==x,2],ddof=1)
                    plrpcmean = numpy.mean(plmap[plmap[:,3]==x,4:],axis=0) # average radiance
                                                                           # for a color
                    plmap2[plmap[:,3]==x,-2] = plmean
                    plmap2[plmap[:,3]==x,-1] = plsd
                    plmap2[plmap[:,3]==x,4:plmap2.shape[1]-2] = plrpcmean
                plmap2 = plmap2[numpy.lexsort((plmap2[:,0],plmap2[:,1],-plmap2[:,-2])),:]

                getLngLat = utilities.makeGetLngLat(metadata)
                lnglatPolygon = utilities.convert(plmap2[:,:2].tolist(), getLngLat)
                for x in numpy.unique(plmap2[:,3]):
                    raredict['%d' % x] = {"color": x,
                                        "row,column": plmap2[plmap2[:,3]==x,:2].tolist(),
                                        "longitude,latitude": [[lnglatPolygon[ll][0],lnglatPolygon[ll][1]] for ll in numpy.arange(plmap2.shape[0])[plmap2[:,3]==x]],
                                        "mdist": plmap2[plmap2[:,3]==x,2].tolist(),
                                        "mdistmean": plmap2[plmap2[:,3]==x,-2][0], # these values are identical, so just take one of them
                                        "mdistvar": plmap2[plmap2[:,3]==x,-1][0],
                                        "colorSpectrum": plmap2[plmap2[:,3]==x,4:plmap2.shape[1]-2].tolist()[0]
                                       }
    return raredict
예제 #13
0
    def call_appropriate_func(self, prop, coords, models):
        """ This function calls appropriate functions depending on
        whether subj_coords or obj_coords are already in mods (not None).
        If subj and obj are in models , it calls verify_model. If one of
        them is in mods, it calls add-item (add subj or add object).
        If neither is in models, it calls startmod. If subj_mod
        and obj_mod are mutually independent, it calls combine.
        OVERRIDES the spatial_reasoning function"""
        # Unpack coords and mods: this is necessary only to avoid upsetting
        # Pylint.
        subj_coords = coords[0]
        obj_coords = coords[1]
        subj_mod = models[0]
        obj_mod = models[1]
        rel = utilities.relfn(prop)
        subj = utilities.subjfn(prop)
        obj = utilities.objfn(prop)
        if subj_coords is not None and obj_coords is not None:
            if subj_mod == obj_mod:
                # We have reached a conclusion premise, i.e. subj and obj
                # were found in the same mod. OR we need to generate a
                # conclusion if rel = (), empty tuple returned by relfn
                if rel == ():
                    # We have to generate the relation between subj and
                    # obj as we have a generate conclusion current premise
                    rel = model_validation.find_rel_prop(
                        subj_coords, obj_coords)
                    prop[0] = list(rel)
                    # Add initial mod to modelslist and to neighbourhoodmodels
                    self.modelslist.append(subj_mod)
                    self.neighbourhoodmodels[0].append(subj_mod)
                    # Call a function to generate the conclusion(s) and print
                    # them. If the conclusions are different in the preferred
                    # model and in alternative models, both conclusions are
                    # printed, along with the models.
                    self.gen_and_print_conclusions(prop, subj_mod)
                    return subj_mod
                print("Verifying if the conclusion is correct!")
                mod = model_validation.verify_model(prop, subj_mod)
                if mod is not None:
                    # If premises are true in preferred model, no need for
                    # variation. Return the model.
                    print("Premises are true in model, no model variation "
                          "required (possibly invalid result if multiple "
                          "models are possible).")
                    print("Final model: \n{},".format(mod))
                    return mod
                # verify_model returned None: model variation may be required
                # to try to find a true result in the alternate models.
                if self.annotations == []:
                    # Determinate, return the false model: no model variation
                    print("Premises are false in model. No model variation "
                          "necessary!")
                    print("Final model: \n{},".format(subj_mod))
                    return subj_mod
                # Annotation(s) present: alternative models present.
                # Model variation required.
                print("Model variation necessary as annotation(s) are present")
                # First, put the preferred model in the models list and in
                # the neighbourhood models defaultdict
                self.modelslist.append(subj_mod)
                self.neighbourhoodmodels[0].append(subj_mod)
                self.get_alternate_models(copy.deepcopy(subj_mod))
                # Print alternative models upto threhold
                self.print_alt_mods_threshold()
                # Dummy return: we are going to use self.modelsist for print
                # in this case
                return subj_mod

            # There are separate subj and obj mods which need to be combined
            # (subj_mod != obj_mod)
            print("Combining 2 separate models together.")
            mod = model_combination.combine(rel, subj_coords, obj_coords,
                                            subj_mod, obj_mod)
            print("Intermediate model: \n{}".format(mod))
            return mod
        elif subj_coords is not None and obj_coords is None:
            print("Adding object!")
            # Subj-obj order interchanged, rel interchanged.
            rel = utilities.convert(rel)
            mod, annotations_p = model_builder.add_item_prism(
                subj_coords, rel, obj, subj_mod, self.annotations)
            print("Intermediate model: \n{}".format(mod))
            #return mod
        elif subj_coords is None and obj_coords is not None:
            print("Adding subject!")
            # Regular 2nd premise, rel unchanged
            mod, annotations_p = model_builder.add_item_prism(
                obj_coords, rel, subj, obj_mod, self.annotations)
            self.annotations = annotations_p
            print("Intermediate model: \n{}".format(mod))
            #return mod
        else:
            print("Starting model")
            mod = model_builder.start_mod(rel, subj, obj)
            print("Intermediate model: \n{}".format(mod))
        # return model: applies for add subj, add obj and start model
        return mod
예제 #14
0
    def get_alternate_models(self, mod, i=0, neighbourhood=0):
        """ Recursive: goes through annotations, find the reference object (3rd
        term in annotation) and located object (2nd term) in the model. Find
        intermed object by moving from loc to ref object in the correct dir.
        The intermediate and located objects are effectively exchanged, along
        with objects which are grouped with them.
        KWARGS: i: iteration number (used to get alternate models from each
                   of the models in the changing list self.modelslist).
                neighbourhood: current neigbhourhood
        The recursive call at the end gets alternative models for every member
        in self.modelslist: i.e. this includes models created inside the for
        loop in the previous iteration. The stopping condition is when the
        iteration variable becomes len(self.modelslist) - 1.
        """
        orig_mod = copy.deepcopy(mod)
        new_neighbourhood = neighbourhood + 1
        for annotation in self.annotations:
            # annotation: [[rel], l.o., r.o]
            # coords_to_swap is of the form [[coords_loc1, coords_int1],
            # [coords_loc2, coords_int2],...]. mod[coords_int_m] and
            # mod[coords_loc_n] need to be swapped.
            located_object = annotation[1]
            # Get the coords of reference_object and located_object in mod.
            # reference_object = annotation[2]
            ref_coords = utilities.get_coordinates_from_token(
                annotation[2], orig_mod)
            loc_coords = utilities.get_coordinates_from_token(
                located_object, orig_mod)
            # Move in direction negate(rel) from the located object - i.e. find
            # tokens in between ref object and loc object.
            rel_to_move = utilities.convert(annotation[0])
            # Find instances in which the reference object is found in the
            # subject of the premise (intensionalrepr), and instances in which
            # it is found in the object of the premise.
            intermediate_coords = utilities.update_coords(
                loc_coords, rel_to_move)
            # If intermeidate_coords = ref_coords (and therefore, ref_obj
            # = intermediate_object, this annotation should NOT be processed.
            # The ref and loc object SHOULD NOT be swapped in any case)
            if intermediate_coords == ref_coords:
                continue
            intermediate_object = orig_mod.get(intermediate_coords)
            tokens_coordinates = self.create_groups(
                rel_to_move, (intermediate_object, intermediate_coords),
                (located_object, loc_coords), orig_mod)

            mod = spatial_array.insert_moved_objects(tokens_coordinates,
                                                     orig_mod)
            if mod not in self.modelslist:
                self.modelslist.append(mod)
                self.neighbourhoodmodels[new_neighbourhood].append(mod)

        if i < len(self.modelslist) - 1:
            # Set the value of new_neighbourhood to send in the recursive call
            # based on if a model has already been inserted in the current
            # neighbourhood or not. If not, the recursive call will have the
            # old value of neighbourhood again.
            new_neighbourhood = new_neighbourhood \
                if self.neighbourhoodmodels[new_neighbourhood] != [] \
                else neighbourhood
            # The above check produces an empty list because it's a defaultdict
            # Remove this new empty list element at a new key
            self.delete_empty_keys()
            # Recursive call with next model in modelslist
            #  newmod = copy.deepcopy(self.modelslist[i+1])
            self.get_alternate_models(copy.deepcopy(self.modelslist[i + 1]),
                                      i + 1, new_neighbourhood)
myModel = pickle.load(open(prev_model, 'rb'))
#kernels used to increase features to get better results
#You can use two kernels for this purpose Radial Basis Function Kernel
#and Additive Chi Squared Kernel
#We have refrained ourselves from using other kernels becuase
#they do not provide satisfactory results with our model
sampler = AdditiveChi2Sampler()
#Comment the above sampler and uncomment the lower one to change kernels
#sampler = RBFSampler(gamma=1, random_state=1)
# If file is not a wav file than convert to .wav format
#if (file[-3:] != "wav"):
#    cmd = "C:/ffmpeg/bin/ffmpeg.exe -i " + file + " " + file[:-3] + ".wav"
#    os.system(cmd)
#    file = file[:-3] + "wav"
if file.rsplit('.')[-1] != 'wav':
    convert(file)
X = pcp(file)
X = np.array([X])
#Change the features using sampler
X = sampler.fit_transform(X)
#predicts the chord of the file using the model you provide it with
pred = myModel.predict(X)
print("The model predicted chord to be: ", NtoC(pred[0]))
#Checks if there is any error with prediction and actual output
#And if they differ it fits the true data with the PCP vector
#The changed model is then resaved in the current model
ans = input("Is the predicted chord correct?[yes|no]\n")
if ans == 'yes':
    print('Thanks for using our program.')
else:
    print('We are sorry, please help us train the model further.')
 def batch_convert(self):
     file_batch, output_format = self.file_batch, self.output_format
     output_files = [convert(input_file, output_format) for input_file in file_batch]
     return output_files
예제 #17
0
파일: anbm.py 프로젝트: acad2/pride
def query_oracle(input_data, key1=BINARY, key2=TERNARY, padding=8):
    return convert(input_data, key1, key2).zfill(padding)
예제 #18
0
파일: anbm.py 프로젝트: acad2/pride
    assert int(decremented, 2) == x - 1
    
    incremented = increment(in_binary, "01")
    assert int(incremented, 2) == x + 1, (in_binary, incremented, x + 1)
        
for x in xrange(1, 16):
    #print "Decrementing: ", x
    decremented = decrement(str(x), "0123456789")
    assert int(decremented) == x - 1
    
 #   print "Incrementing: ", x
    incremented = increment(str(x), "0123456789")
    assert int(incremented) == x + 1, (incremented, x + 1)


test_string = "10000000"
converted = convert(test_string, BINARY, TERNARY)
#print converted
#assert format(int(converted, len(TERNARY)), 'b') == test_string, (format(int(converted, len(TERNARY)), 'b'), test_string)
   
for x in xrange(int("10000000", 2)):
    _string = format(x, 'b').zfill(8)
    incremented = increment(_string, BINARY)
    base_3 = convert(incremented, BINARY, TERNARY).zfill(8)
#    print "\nbinary : {}".format(incremented)
#    print "ternary: {}".format(base_3)
    
for possible_keys in crack(BINARY):
   # print len(possible_keys), possible_keys
    print_in_place("Possible key: " + str(possible_keys))
    
			delete_older_folders(now)
		count=count+1
		start_time=now_time
		start_day=now_day
		start_month=now_month
		f.close()
		f=open(DATA_BASE_PATH+str(start_day)+"_"+str(start_month)+"/"+str(count)+".csv","wa")
		f.write(HEADER)

	else:
		try:
			readings_array = instrument.read_registers(EM6400_BASE_REGISTER,EM6400_NUMBER_OF_REGISTERS)
			row=str(now_time)+","
			for i in range(0,len(readings_array)-1,2):
				a=(readings_array[i+1]<<16) +readings_array[i]
				row=row+str(convert(a))+","
			row=row[:-1]+"\n"
			print now
			f.write(row)
		
		except Exception as e:
			log_file=open(DATA_BASE_PATH+"log.txt","w")
			log_file.write(str(time.time())+" "+e.__str__())
			log_file.close()
			instrument = minimalmodbus.Instrument(METER_PORT, METER_ID)		

	


	
    def call_appropriate_func(self, prop, coords, models):
        """ This function calls appropriate functions depending on
        whether subj_coords or obj_coords are already in mods (not None).
        If subj and obj are in models , it calls verify_model. If one of
        them is in mods, it calls add-item (add subj or add object).
        If neither is in models, it calls startmod. If subj_mod
        and obj_mod are mutually independent, it calls combine."""
        # Unpack coords and mods tuples: this is necessary only to prevent
        # Pylint no. of vairables and no. of statements warnings.
        subj_coords, obj_coords = coords
        subj_mod, obj_mod = models
        rel = utilities.relfn(prop)
        subj = utilities.subjfn(prop)
        obj = utilities.objfn(prop)
        if subj_coords is not None and obj_coords is not None:
            if subj_mod == obj_mod:
                # We have reached a conclusion premise, i.e. subj and obj
                # were found in the same mod. OR we need to generate a
                # conclusion if rel = (), empty tuple returned by relfn
                if rel == ():
                    # We have to generate the relation between subj and
                    # obj as we have a generate conclusion current premise
                    rel = model_validation.find_rel_prop(
                        subj_coords, obj_coords)
                    prop[0] = list(rel)
                    # Call a function to generate the conclusion and print
                    # it. If the conclusions are different in the preferred
                    # model and in an alternative model, both conclusions are
                    # printed, along with the 2 models.
                    self.gen_and_print_conclusions(prop, subj_mod)
                    return subj_mod
                print("Verifying if the conclusion is correct!")
                mod = model_validation.verify_model(prop, subj_mod)
                if mod is not None:
                    print(
                        "Premises are true in model. \nIntermediate model: "
                        "\n{}.\nAttempting to falsify the model.".format(mod))
                    alt_mod = model_validation.make_false(
                        prop, subj_mod, self.premises)
                    print("Make false returned mod: \n{}".format(alt_mod))
                    if mod == alt_mod:
                        print("No falsifying model found!\nPremise follows "
                              "validly from previous premises.\nFinal model "
                              "(premises true): {}".format(mod))
                    else:
                        print("Premise was previously possibly true, but can "
                              "also be false.\n Initial model (premises true):"
                              "\n{}\nVaried model (premises false):\n {}\n".
                              format(mod, alt_mod))
                        self.models.append(mod)
                        self.models.append(alt_mod)
                    return alt_mod
                # verify_model returned None
                print(
                    "Premises are false in model.\nIntermediate model: \n{}"
                    "\nAttempting to make the model true.\n".format(subj_mod))
                alt_mod = model_validation.make_true(prop, subj_mod,
                                                     self.premises)
                print("Make true returned mod: \n{}".format(alt_mod))
                if subj_mod == alt_mod:
                    print("No true model found!\nPremise is inconsistent with "
                          "previous premises.\nFinal model (premises false):"
                          "\n{}:\n".format(subj_mod))
                else:
                    print("Premise was previously possibly false, but can also"
                          " be true.\nInitial model (premises false):\n {}\n"
                          "Altered model (premises true):{}\n".format(
                              subj_mod, alt_mod))
                    self.models.append(subj_mod)
                    self.models.append(alt_mod)
                return subj_mod

            # There are separate subj and obj mods which need to be combined
            # (subj_mod != obj_mod)
            print("Combining 2 separate models together.")
            mod = model_combination.combine(rel, subj_coords, obj_coords,
                                            subj_mod, obj_mod)
            print("Intermediate model: \n{}".format(mod))
            return mod
        elif subj_coords is not None and obj_coords is None:
            print("Adding object!")
            # Subj-obj order interchanged, rel interchanged.
            mod = model_builder.add_item(subj_coords, utilities.convert(rel),
                                         obj, subj_mod)
            print("Intermediate model: \n{}".format(mod))
        elif subj_coords is None and obj_coords is not None:
            print("Adding subject!")
            # Regular 2nd premise, rel unchanged
            mod = model_builder.add_item(obj_coords, rel, subj, obj_mod)
            print("Intermediate model: \n{}".format(mod))
        else:
            print("Starting model")
            mod = model_builder.start_mod(rel, subj, obj)
            print("Intermediate model: \n{}".format(mod))
        # return model: applies for add subj, add obj and start model
        return mod