def mapUniref2Uniprot(inputDir,outputDir):
    def mapResults2QueryFile(mapResultsFile):
        mapResults=pd.read_csv(mapResultsFile, 
                               sep="\t")
        query=mapResults["Cluster ID"]
        FileName=mapResultsFile.split("/")[-1]
        List="{0}/{1}".format(outputDir,FileName.replace(".tab",".list"))
        query.to_csv(List, 
                     sep="\t", 
                     index=None,
                     header=False)
        return List
    params={
        "from":"NF50",
        "to":"ACC",
        "format":"tab",
        "columns":"id"
        }
    for mapResults in tqdm([d for d in os.listdir(inputDir) \
                       if d!=".ipynb_checkpoints"]):
        mapResultsFile = "{0}/{1}".format(inputDir,mapResults)
        List=mapResults2QueryFile(mapResultsFile)
        output=List.replace(".list",".tab")\
        .replace("uniprot2Uniref","uniref2Uniprot")
        mapping(List,output,params)
def mapUniprot2Uniref(inputListDir, outputMappingDir):
    params = {"from": "ACC", "to": "NF50", "format": "tab", "columns": "id"}
    for List in tqdm(
        [d for d in os.listdir(inputListDir) if d != ".ipynb_checkpoints"]):
        queryFile = "{0}/{1}".format(inputListDir, List)
        baseName = List.replace("toMap2Uniref",
                                "uniprot2Uniref").replace(".list", ".tab")
        output = "{0}/{1}".format(outputMappingDir, baseName)
        mapping(queryFile, output, params)
def symb():
    bits = []
    bits = bitstream(100000)
    symbol = []
    for i in range(0, bits.size - 2, 3):
        b0 = bits[i]
        b1 = bits[i + 1]
        b2 = bits[i + 2]
        symbol.append(mapping(b0, b1, b2))
    b0 = bits[len(bits) - 1]
    b1 = 0
    b2 = 0
    symbol.append(mapping(b0, b1, b2))
    print(len(symbol))
    return symbol
Exemple #4
0
def running(dx, withData=True, compare=True, proj="M", filename="fullData.txt"):

    """
	dx gives the resolution, dx=1 for 1x1 grid
	withData is a boolean, withData=True plots the data points on top the map
	projection has the same options as gmt, Mercator projection is the default
	filename is the name of the file that stores the data
	"""

    """
	Reading data
	"""
    theta0 = [600000]
    for theta in theta0:
        xpoints = np.arange(-180, 180, dx)
        ypoints = np.arange(-90 + dx, 90, dx)
        if filename.endswith("txt"):
            rawData = readData.readTxt(filename)
        elif filename.endswith("xlsx"):
            rawData = readData.readCols(filename)
        else:
            print "I don't think I can handle this format"
            return
        """
		Classifier being trained
		"""
        rfc = mapping.training(rawData)
        """
		Map being made with the classifier
		"""
        data = [(rawData["lon"][i], rawData["lat"][i]) for i in range(len(rawData["lon"]))]
        labels = [mapping.downLabels(i) for i in rawData["classif"]]
        data, labels = mapping.cleanDoubles(data, labels)
        scores = cross_val_score(rfc, data, labels, cv=5)
        print ("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
        prediction = mapping.mapping(xpoints, ypoints, rfc)
        """
		Map being written to a file
		"""
        writeResults.writePredictions(
            xpoints,
            ypoints,
            prediction,
            header="> Predictions made with training.py",
            newFilename="seabed_lithology_regular_grid.txt",
        )
        """
		GMT file written
		"""
        writeResults.makeMap(dx, withData, proj)
        """
		Postscript called to call gmt
		"""
        writeResults.gmtMap()
        print len(rawData["lon"]), len(rawData["classif"])
        """
		Statisctics being compared
		"""
        if compare:
            compareStats(rawData, theta)
 def test_mapping(self):
     self.assertEqual(mapping(["a", "b", "c"]), {
         "a": "A",
         "b": "B",
         "c": "C"
     })
     self.assertEqual(mapping(["p", "s", "t"]), {
         "p": "P",
         "s": "S",
         "t": "T"
     })
     self.assertEqual(mapping(["a", "v", "y", "z"]), {
         "a": "A",
         "v": "V",
         "y": "Y",
         "z": "Z"
     })
Exemple #6
0
 def test_mapping(self):
     """To test the mapping() function"""
     self.assertEqual("true", mapping("abc", "bcd"))
     self.assertEqual("false", mapping("foo", "bar"))
     self.assertEqual("true", mapping("bar", "foo"))
     self.assertEqual("true", mapping("123123", "456456"))
     self.assertEqual("true", mapping("123", "abcdef"))
     self.assertEqual("false", mapping("123456", "123"))
def main():

    while d.discovery_todo.find().count() > 0:
        try:
            next_device = db.NextAvailDoc().main(d)
            s = ssh.Session(next_device['ip_address'], cfg['un'], cfg['pw'])
            upsert_doc(next_device, next_device['ip_address'], 'discovery_completed')
            current_ip = next_device['ip_address']
            m = mapping.mapping()
            d_complete = discovery_loop(s, m)
            if 'vrf' in d_complete:
                print 'vrf found'
            update_collections(d_complete, current_ip)
        except Exception as e:
            print upsert_doc(next_device, next_device['ip_address'], 'discovery_completed')
Exemple #8
0
    def __init__(self, master=None):
        Frame.__init__(self, master)
        self.pack()
        # self.drawer = None
        self.isOn = False
        self.imLabel = None
        self.buttonwidth = 20

        self.playButton = Button(
            self, text="play", command=self.play, width=self.buttonwidth
        )
        self.playButton.grid(row=0, column=0)
        self.change_record_mode_button = Button(
            self, text="Record", command=self.change_record_mode, width=self.buttonwidth
        )
        self.change_record_mode_button.grid(row=0, column=1)
        self.settings_button = Button(
            self, text="Settings", command=self.change_settings, width=self.buttonwidth
        )
        self.settings_button.grid(row=0, column=2)
        # self.controller_canvas = Canvas(self, height=500, width=500)
        tmp = np.int16(Image.open("./resource/pro_controller.tif"))
        tmp[:, :, 3] = tmp[:, :, 3] * 0.7
        # tmp.putalpha(196)
        # alpha = tmp.split()[-1]
        # print(np.amax(np.int8(alpha).shape))
        # tmp = Image.open('./resource/pro_controller.tif').convert("RGBA")
        controller_pic = ImageTk.PhotoImage(
            Image.fromarray(tmp.astype("uint8"), "RGBA")
        )
        self.master.protocol("WM_DELETE_WINDOW", self.on_closing)
        self.imLabel = Label(self, image=controller_pic)
        self.imLabel.image = controller_pic
        self.imLabel.grid(row=1, column=0, columnspan=3)
        self.mapping = mapping()
        self.pad = Controller(self.mapping, self.imLabel, record_mode=False)
Exemple #9
0
def main():
    script_path = os.path.abspath(os.path.dirname(__file__))
    conf_dict = read_conf('%s/template.conf' % script_path)
    #conf_dict['General']['startdir'] = os.getcwd() + '/'
    #conf_dict['General']['startdir'] = '/homeb/liulijuan/single_cell/QC/data/pythonTest/'
    # output directory alse can be designed by user
    #conf_dict['General']['outputdirectory'] = conf_dict['General']['startdir'] + 'output/'
    # samples dir
    #conf_dict['General']['samples_file'] = conf_dict['General']['startdir'] + "samples/"
    # input data format
    #conf_dict['General']['format'] = "sam"
    Parameter_list = sys.argv[1:]
    for i in range(0, len(Parameter_list) - 1, 2):
        if (Parameter_list[i].split('-')[1] in conf_dict['General']):
            conf_dict['General'][Parameter_list[i].split('-')
                                 [1]] = Parameter_list[i + 1]
        else:
            pass

    # create output dir
    if (os.path.isfile(conf_dict['General']['outputdirectory'].rstrip("/"))):
        print(
            'ERROR: name of your output dir is exist as a file, cannot create a dir, MyQC exit'
        )
        sys.exit(1)
    elif os.path.isdir(conf_dict['General']['outputdirectory']):
        print(
            'name of your output dir is exist as a dir, overwrite is turned on, write output result in existing dir'
        )
    else:
        CMD("mkdir %s " %
            (conf_dict['General']['outputdirectory'].rstrip("/")))

    ### move to output dir
    os.chdir(conf_dict['General']['outputdirectory'])
    ### specify the main progress log file
    logfile = conf_dict['General']['outputdirectory'] + 'progress_log.txt'
    ### remove existing log file
    if (os.path.isfile(logfile)):
        # linux delete command is rm
        CMD('rm %s' % logfile)
        # in windows delete command is del
        #CMD('del %s' %logfile)

    ### main step for MyQC
    wlog("Start MyQC", logfile)
    t = time.time()
    start_time = t
    ############## Step1:mapping #####################
    wlog("Step1:mapping", logfile)
    samples_info = mapping(conf_dict, logfile)
    #mapping(conf_dict, logfile)
    s1time = time.time() - t
    wlog("time for alignment : %s" % (s1time), logfile)
    wlog("Step1:alignment DONE", logfile)

    ############## Step 2:calculate expression value##################
    t = time.time()
    wlog("Step2:calculate samples expression value", logfile)
    if conf_dict['General']['expression_matrix'].rstrip() == "":
        sample_detected_genes = calculate_expression(conf_dict, logfile)

        #write sample sequence information
        outfile = open(
            '%sseq_information.txt' % conf_dict['General']['outputdirectory'],
            'w')
        header = 'Sample.Name' + '\t' + 'Total.Reads' + '\t' + 'Mapped.Reads' + '\t' + 'Mapped.Rate' + '\t' + 'Reads.Complexity' + '\t' + 'Gene.Detected' + '\n'
        outfile.write(header)

        cmd = 'ls' + '\t' + conf_dict['General'][
            'expression'] + "*.genes.results"
        a = commands.getstatusoutput(cmd)
        Temp_Raw = a[1].split('\n')
        sample_name_list = [
            i.split('/')[-1].split('.genes.results')[0] for i in Temp_Raw
        ]
        for i in range(len(sample_name_list)):
            totalN, mappedN, mapping_rate, reads_complexity = calculate_total_reads(
                '%s%s.sam' %
                (conf_dict['General']['sam'], sample_name_list[i]))
            outfile.write(sample_name_list[i] + '\t')
            #totalN = samples_info['samples_totalN'][i]
            #mappedN = samples_info['samples_mappableN'][i]
            #mapping_rate = samples_info['samples_mapping_rate'][i]
            #reads_complexity = samples_info['samples_unique_reads'][i]
            detected_genes = sample_detected_genes[i]
            outfile.write(
                str(totalN) + '\t' + str(mappedN) + '\t' + str(mapping_rate) +
                '\t' + str(reads_complexity) + '\t' + str(detected_genes) +
                '\n')
            print('The Current sample is %s' % sample_name_list[i])
        outfile.close()
        conf_dict['General']['expression_matrix'] = conf_dict['General'][
            'outputdirectory']
        s2time = time.time() - t
        wlog("time for calculate expression value : %s" % (s2time), logfile)
    wlog("Step2:calculate expression DONE", logfile)

    #################### Step3:calculate distinct p_value ########################
    t = time.time()
    wlog('Step3:calculate distinct p_value', logfile)
    if conf_dict['General']['pvalue_file'].rstrip() == "":
        expression_matirx_file = conf_dict['General'][
            'expression_matrix'] + 'expression_matrix.txt'

        cmd = 'Rscript' + '\t' + script_path + '/MI.R' + '\t' + expression_matirx_file + '\t' + conf_dict[
            'General']['outputdirectory']
        CMD(cmd)
        conf_dict['General']['pvalue_file'] = conf_dict['General'][
            'outputdirectory']
        s3time = time.time() - t
        wlog("time for calculate distinct p_value : %s" % (s3time), logfile)
    wlog("Step3:calculate calculate distinct p_value DONE", logfile)

    ###################Step4: Calculate SeqQC quantile in all samples###############
    t = time.time()
    wlog('Step4:calculate SeqQC quantile in all samples', logfile)
    cmd = 'Rscript' + '\t' + script_path + '/Calculate_Quantile_in_all_samples_update.R' + '\t' + conf_dict[
        'General']['pvalue_file'] + 'Distinct_PValue.txt' + '\t' + conf_dict[
            'General']['pvalue_mi_cutoff'] + '\t' + conf_dict['General'][
                'pvalue_pearson_cutoff'] + '\t' + conf_dict['General'][
                    'pvalue_spearman_cutoff'] + '\t' + conf_dict['General'][
                        'logical_tag'] + '\t' + conf_dict['General'][
                            'pvalue_file'] + 'seq_information.txt' + '\t' + conf_dict[
                                'General']['outputdirectory']
    CMD(cmd)
    s4time = time.time() - t
    wlog('time for calculate SeqQC quantile in all samples : %s' % (s4time),
         logfile)
    wlog('Step4:calculate SeqQC quantile in all samples DONE', logfile)

    ####################Step5: calculate Weighted Combined Quality Score and MQS#############
    t = time.time()
    wlog(
        'Step5:calculate Weighted Combined Quality Score and Minimal Quantile Score',
        logfile)
    cmd = 'Rscript' + '\t' + script_path + '/Weight_Combined_Score_and_MQS_update.R' + '\t' + conf_dict[
        'General'][
            'outputdirectory'] + 'Samples_Seq_Quality_in_all_Quantile.txt' + '\t' + conf_dict[
                'General']['outputdirectory']
    CMD(cmd)
    s5time = time.time() - t
    wlog(
        'time for calculate Weighted Combined Quality Score and Minimal Quantile Score : %s'
        % (s5time), logfile)
    wlog(
        'Step5:calculate Weighted Combined Quality Score and Minimal Quantile Score DONE',
        logfile)

    ####################Step6: FPR#########################################################
    t = time.time()
    wlog('Step6:FPR', logfile)
    # Check How Many Gene Expression Outliors and MainPopulationCell
    wlog('Start check how many expression outliers and mainpopulationcell',
         logfile)
    infile = open(
        conf_dict['General']['outputdirectory'] +
        'Samples_Seq_Quality_in_all_Quantile.txt', 'r')
    header = infile.readline()
    MainPopulationCell_Count = int(0)
    GeneExpressionOutlier_Count = int(0)
    line = infile.readline()
    while (line):
        type = line.split()[1]
        if (type == 'GeneExpressionOutlier'):
            GeneExpressionOutlier_Count += 1
        elif (type == 'MainPopulationCell'):
            MainPopulationCell_Count += 1
        else:
            print('Error ......')
        line = infile.readline()
    infile.close()
    print('GeneExpressionOutlier_Count = %s, MainPopulationCell_Count = %s' %
          (GeneExpressionOutlier_Count, MainPopulationCell_Count))
    # Start calculating False Positive Table ...
    wlog('Start calculating false positive table', logfile)
    if (MainPopulationCell_Count > 0 and GeneExpressionOutlier_Count > 0):
        cmd = 'Rscript' + '\t' + script_path + '/False_Positive_Rate_Table_Raw.R' + '\t' + conf_dict[
            'General'][
                'outputdirectory'] + 'Samples_MQS_and_WeightedCombinedQualityScore.txt' + '\t' + conf_dict[
                    'General']['outputdirectory']
        CMD(cmd)
        Plot_FPR(script_path, conf_dict, logfile)
    else:
        pass

    s6time = time.time()
    wlog('time for FPR : %s' % s6time, logfile)
    wlog('Step6:FPR DONE', logfile)

    ###################Step7: Annotate artifact#####################
    t = time.time()
    wlog('Step7:Annotate artifact', logfile)
    if (MainPopulationCell_Count > 0 and GeneExpressionOutlier_Count > 0):
        Estimate_Cutoff_and_Annotate_Artifact(conf_dict, logfile)
    else:
        Append_PASS(conf_dict, logfile)
    s7time = time.time() - t
    wlog('time for annotate artifact : %s' % s7time, logfile)
    wlog('Step7:Annotate artifact DONE', logfile)

    ############# Ploting PCA###################################
    cmd = 'Rscript' + '\t' + script_path + '/PCA.R' + '\t' + conf_dict['General'][
        'expression_matrix'] + 'expression_matrix.txt' + '\t' + conf_dict['General'][
            'outputdirectory'] + 'MyQC_All_Samples_QC_information.txt' + '\t' + conf_dict[
                'General']['outputdirectory']
    CMD(cmd)

    ################ Summary ##########################
    outfile_summary = open(
        conf_dict['General']['outputdirectory'] + 'Summary.txt', 'w')
    outfile_summary.write('Summary of MyQC Run' + '\n')
    outfile_summary.write('\n')
    if (MainPopulationCell_Count > 0 and GeneExpressionOutlier_Count > 0):
        infile = open(
            conf_dict['General']['outputdirectory'] + 'Real_cutoff.txt', 'r')
        header = infile.readline()
        line = infile.readline()
        MQS_Cutoff = line.split()[0].split('(')[1].split(')')[0].split(',')[0]
        WCQS_Cutoff = line.split()[0].split('(')[1].split(')')[0].split(',')[1]
        N_Artifacts = line.split()[1]
        Fraction_Artifacts = line.split()[2]
        Real_FPR = line.split()[-1]
        infile.close()

        total_cells = int(MainPopulationCell_Count) + int(
            GeneExpressionOutlier_Count)
        w_artifact = {}
        infile = open(
            conf_dict['General']['outputdirectory'] +
            'MyQC_All_Samples_QC_information.txt', 'r')
        header = infile.readline()
        line = infile.readline()
        while (line):
            sample_name = line.split()[0]
            QC = line.split()[-1]
            if (QC == 'Artifact'):
                w_artifact[sample_name] = ''
            else:
                pass
            line = infile.readline()
        infile.close()

        outfile_summary.write('Total Samples: ' + '\t' + str(total_cells) +
                              '\n')
        outfile_summary.write('Total GeneExpressionOutliers: ' + '\t' +
                              str(GeneExpressionOutlier_Count) + '\n')
        outfile_summary.write('Maximal False Positive Rate (FPR) Allowed: ' +
                              '\t' + str(conf_dict['General']['max_fpr']) +
                              '\n')
        outfile_summary.write('MQS_Cutoff[Estimated]=' + str(MQS_Cutoff) +
                              '\t' + 'WCQS_Cutoff[Estimated]=' +
                              str(WCQS_Cutoff) + '\n')
        outfile_summary.write(
            'Real False Positive Rate (FPR) in the Dataset:' + '\t' +
            Real_FPR + '\n')
        outfile_summary.write("Total Artifacts:" + '\t' +
                              str(len(w_artifact.keys())) + '\n')
        outfile_summary.write('Artifact Samples:' + '\t' +
                              ','.join(w_artifact.keys()) + '\n')
        outfile_summary.write('\n')
    else:
        total_cells = int(MainPopulationCell_Count) + int(
            GeneExpressionOutlier_Count)
        outfile_summary.write('Total Samples: ' + '\t' + str(total_cells) +
                              '\n')
        outfile_summary.write('Total GeneExpressionOutliers: ' + '\t' +
                              str(GeneExpressionOutlier_Count) + '\n')
        outfile_summary.write('Total MainPopulationCell:' + '\t' +
                              str(MainPopulationCell_Count) + '\n')
        if (GeneExpressionOutlier_Count == 0):
            outfile_summary.write(
                'Skip downstream analysis due to No GeneExpressionOutliers found!'
                + '\n')
        else:
            outfile_summary.write(
                'Skip downstream analysis due to No MainPopulationCells found!'
                + '\n')

    running_time = time.time() - start_time
    outfile_summary.write('Total Running Time: %.2d:%.2d:%.2d' %
                          (running_time / 3600,
                           (running_time % 3600) / 60, running_time % 60) +
                          '\n')
    outfile_summary.close()
Exemple #10
0
streamer.sc = sc
streamer.sqlContext = sqlContext

title = "Twitter Story Maker"
tweetCount = g.integerbox("Number of tweets to be collected:",
                          title=title,
                          upperbound=1000)
streamer.setTweets(tweetCount)
print(streamer.nrOfTweets)
waitingMsg = WaitMsg()
waitingMsg.start()
try:
    streamer.start()
    print('x')
except Exception as e:
    print(e)
os.startfile('liveTweetsLocation.csv')
import clusteringApp as clustering
clustering.conf = conf
clustering.sc = sc
clustering.sqlContext = sqlContext
clustering.kmeans_from_csv2()
k = g.integerbox("Insert k", title=title)
clustering.kmeans_from_csv(k=k)

import mapping as m
os.startfile('prettyPrint.csv')
term = g.enterbox("Insert wanted term")
cluster = m.getMaxCluster(term, k)
m.mapping(cluster, term)
os.startfile('my_mapStamen.html')
def spectral1D(N, dT):

    # ==============================================================================
    # Physical variables
    # ==============================================================================
    
    # Initial x coordinate of the phenomena (m)
    X0 = 0.
    
    # Final x coordinate of the phenomena (m)
    XF = 5.
    
    # Initial time analyzed for the phenomena (s)
    T0 = 1.
    
    # Final time for the phenomena (s)
    TF = 2.
    
    # Diffusion coefficient in (m2/s)
    Dx = 0.3
    
    # Mass of tracer injected to the system
    M = 10
    
    # Injection point in the domain
    xo = 2
    
    # ==============================================================================
    # Numerical parameters
    # ==============================================================================
    
    # Stability parameter Sx for diffusion when handling explicit methods
#    Sx = 0.1
    
    # Crank-Nicholson ponderation factor theta for specific cases
    # theta = 0.0 explicit
    # theta = 1.0 implicit
#     theta = 1.
    
    # Number of nodes in the domain
    # N = 30
    
    # Calculating domain length
    L = XF - X0
    
    # ==============================================================================
    # Start different calculations given the parameters from the model
    # ==============================================================================
    
    # Calculating the dt with the stability parameter
    # T = 0.00005
    
    # Defining number of timesteps
    nT = int((TF - T0) / dT)
    
    # Error saving
    ert = np.zeros(int(nT))
    
    # d(chi)/dx - Constant value that arose from mapping to natural coordinates
    dchi_dx = 2 / (XF - X0)
    
    # Calculating GLL points, as N - 2 points and the adding -1 and 1 as extreme 
    # values (this is already in natural coordinates)
    [LGLP, W] = np.polynomial.legendre.leggauss(N - 2)
    LGLP = np.insert(LGLP, 0, -1)
    LGLP = np.append(LGLP, 1)
    
    # Calculating real coordinates of points from natural coordinates location
    xn = mp.mapping(LGLP, X0, XF)
    
    # P coefficient for calculations
    P = dchi_dx ** 2 * dT * Dx
    
    # ==============================================================================
    # Building the differentiation matrix. I know I need the second derivative 
    # matrix, but it is a fact that I can square the first derivative matrix to 
    # obtain the second derivative matrix
    # ==============================================================================
    
    # Defining diffusion matrix for the problem (as sparse matrix)
    # K = sp.lil_matrix((N, N))
    K = np.zeros((N,N))
    
    # Filling each row of the matrix qith the derivatives of the Lagrange 
    # polynomials
    for i in range(0, N):
        
        K[:, i] = lpd.Lag_pder(LGLP, i)
        
    # Calculating second derivative as the matrix matrix multiplication
    D2 = np.matmul(K, K)
    
    # Freeing space - take out to prove it works
    del(K)
    
    # Final matrix for solving
    K = P * D2 - np.identity(N)
    K[0, :] = np.zeros(N)
    K[N - 1, :] = np.zeros(N)
    K[0, 0] = 1.
    K[N -1, N - 1] = 1.
    
#    plt.spy(K)
#    plt.draw()
#    plt.pause(1.5)
#    plt.clf()
    # ==============================================================================
    # Initial condition and start up of other parameters
    # ==============================================================================
    
    # Generating initial condition
    C = AN.difuana(M, XF - X0, Dx, xn, xo, T0)
    
    C1 = np.zeros(N)
#    Cmax = np.max(C)
    
#    # Plotting initial condition
#    plt.ion()
#    plt.figure(1, figsize=(11, 8.5))
#    style.use('ggplot')
#    
#    plt.subplot(1, 1, 1)
#    plt.plot(xn, C)
#    plt.title('Initial condition')
#    plt.xlabel(r'Distance $(m)$')
#    plt.ylabel(r'Concentration $ \frac{kg}{m} $')
#    plt.draw()
#    plt.pause(1.5)
    
    # Entering time loop 
    
    for t in range(1, nT):
        
        # Generating analytical solution
        Ca = AN.difuana(M, XF - X0, Dx, xn, xo, T0 + t * dT)
        
        # Setting up right hand side
        C[0] = -AN.difuana(M, L, Dx, X0, xo, T0 + t * dT)
        C[N - 1] = -AN.difuana(M, L, Dx, XF, xo, T0 + t * dT)
        
        # Solving system (matrix vector multiplication)
        C1 = -np.linalg.solve(K, C)
        
        # Estimating error
        err = np.abs(C1 - Ca)
        ert[t] = np.linalg.norm(err)
        
#        # Plotting numerical solution and comparison with analytical
#        plt.clf()
#        
#        plt.subplot(2, 2, 1)
#        plt.plot(xn, C1, 'b')
#        plt.xlim([X0, XF])
#        plt.ylim([0, Cmax])
#        plt.ylabel(r'Concentration $ \frac{kg}{m} $')
#        plt.title('Numerical solution')
#        
#        plt.subplot(2, 2, 2)
#        plt.plot(xn, Ca)
#        plt.xlim([X0, XF])
#        plt.ylim([0, Cmax])
#        plt.title('Analytical solution')
#        
#        plt.subplot(2, 2, 3)
#        plt.semilogy(xn, err)
#        plt.xlim([X0, XF])
#        plt.ylim([1e-8, 1e2])
#        plt.ylabel('Absolute error')
#        plt.title('Error')
#        
#        plt.subplot(2, 2, 4)
#        plt.semilogy(np.linspace(T0, TF, nT), ert)
#        plt.xlim([T0 - 0.2, TF + 0.2])
#        plt.ylim([1e-8, 1e2])
#        plt.title('Error evolution')
#        
#        plt.draw()
#        titulo = 'Spectral elements method in single domain solution implicit'
#        plt.suptitle(titulo)
#        plt.pause(0.2)
        
        # Preparing for next timestep   
        C = C1
        
    return ert
    def update(self,
               measurement,
               measurementCovariance,
               new,
               currentStateMean=None,
               currentStateCovariance=None,
               currentRobotAbs=None,
               currentRobotCov=None):
        global seenLandmarks_
        global dimR_
        global seenLandmarksX_
        global it
        # get robot current pose
        if currentRobotAbs == None:
            currentRobotAbs = self.robot.getPose()
        if currentRobotCov == None:
            currentRobotCov = self.robot.getCovariance()
        label = measurement[2]
        # get landmark absolute position estimate given current pose and measurement (robot.sense)
        [landmarkAbs, G1,
         G2] = self.robot.inverseSense(currentRobotAbs, measurement)
        # get KF state mean and covariance

        if currentStateMean == None:
            currentStateMean = stateMean = np.array(self.stateMean)
        else:
            stateMean = currentStateMean

        if currentStateCovariance == None:
            currentStateCovariance = stateCovariance = np.array(
                self.stateCovariance)
        else:
            stateCovariance = currentStateCovariance

        print '###############################'

        # if new landmark augment stateMean and stateCovariance
        if new:
            stateMean = np.concatenate(
                (stateMean, [[landmarkAbs[0]], [landmarkAbs[1]]]), axis=0)
            Prr = self.robot.getCovariance()
            # print 'Prr:',Prr

            if len(seenLandmarks_) == 1:
                #print 'Robo lanf If start '
                Plx = np.dot(G1, Prr)
                #print'Robot Land If stop'
            else:
                lastStateCovariance = KalmanFilter.getStateCovariance(self)
                Prm = lastStateCovariance[0:3, 3:]
                Plx = np.dot(G1, np.bmat([[Prr, Prm]]))

            Pll = np.array(np.dot(np.dot(G1, Prr),
                                  np.transpose(G1))) + np.array(
                                      np.dot(np.dot(G2, measurementCovariance),
                                             np.transpose(G2)))
            P = np.bmat([[stateCovariance, np.transpose(Plx)], [Plx, Pll]])
            stateCovariance = P

        elif label == seenLandmarks_[0]:

            landmarkPos = [0, 0]
            landmarkPos[0] = (stateMean[dimR_][0])
            landmarkPos[1] = (stateMean[dimR_ + 1][0])
            stateMean[0, 0] = landmarkPos[0] - measurement[0]
            stateMean[1, 0] = landmarkPos[1] - measurement[1]
            self.robot.setPose(stateMean[0:3][0:3])

        else:
            # if old landmark stateMean & stateCovariance remain the same (will be changed in the update phase by the kalman gain)
            # calculate expected measurement
            vec = mapping(seenLandmarks_.index(label) + 1)
            expectedMeas = [0, 0]
            print 'vec:', vec
            print 'stateMean:', stateMean.shape
            print 'label:', label
            print 'new', new
            expectedMeas[0] = np.around(stateMean[dimR_ + vec[0] - 1][0], 3)
            expectedMeas[1] = np.around(stateMean[dimR_ + vec[1] - 1][0], 3)

            [landmarkRelative, _,
             _] = Absolute2RelativeXY(currentRobotAbs, expectedMeas)
            #Z = ([ [np.around(landmarkAbs[0],3)],[np.around(landmarkAbs[1],3)] ])
            measured = ([
                np.around(landmarkRelative[0][0], 3),
                np.around(landmarkRelative[1][0], 3)
            ])

            # y = Z - expectedMeasurement
            # AKA Innovation Term
            #measured = ([ [np.around(expectedMeas[0],3)],[np.around(expectedMeas[1],3)] ])
            Z = ([np.around(measurement[0], 3), np.around(measurement[1], 3)])

            y = np.array(RelativeLandmarkPositions(Z, measured))

            # build H
            # H = [Hr, 0, ..., 0, Hl,  0, ..,0] position of Hl depends on when was the landmark seen? H is C ??
            H = np.reshape(G1, (2, 3))
            for i in range(0, seenLandmarks_.index(label)):
                H = np.bmat([[H, np.zeros([2, 2])]])
            H = np.bmat([[H, np.reshape(G2, (2, 2))]])
            for i in range(
                    0,
                    len(seenLandmarks_) - seenLandmarks_.index(label) - 1):
                H = np.bmat([[H, np.zeros([2, 2])]])

            measurementCovariance = np.array(measurementCovariance)
            try:
                S = np.array(
                    np.add(np.dot(np.dot(H, stateCovariance), np.transpose(H)),
                           measurementCovariance))
            except ValueError:
                print('Value error S')
                print 'H shape', H.shape
                print 'State Cov', stateCovariance.shape
                print 'measurement Cov', measurementCovariance.shape
                return np.array(stateMean), np.array(stateCovariance)

            if (S < 0.000001).all():
                print('Non-invertible S Matrix')
                raise ValueError
                return np.array(stateMean), np.array(stateCovariance)

            # calculate Kalman gain
            K = np.array(
                np.dot(np.dot(stateCovariance, np.transpose(H)),
                       np.linalg.inv(S)))

            # compute posterior mean
            posteriorStateMean = np.array(np.add(stateMean, np.dot(K, y)))

            # compute posterior covariance
            kc = np.array(np.dot(K, H))
            kcShape = len(kc)

            posteriorStateCovariance = np.dot(np.subtract(np.eye(kcShape), kc),
                                              stateCovariance)

            # check theta robot is a valid theta in the range [-pi, pi]
            posteriorStateMean[2][0] = pi2pi(posteriorStateMean[2][0])

            # update robot pose

            robotPose = ([posteriorStateMean[0][0]
                          ], [posteriorStateMean[1][0]],
                         [posteriorStateMean[2][0]])
            robotCovariance = posteriorStateCovariance[0:3, 0:3]

            # updated robot covariance
            if not (np.absolute(posteriorStateMean[0][0]) > 3.5
                    or np.absolute(posteriorStateMean[1][0]) > 3.5):
                stateMean = posteriorStateMean
                stateCovariance = posteriorStateCovariance
                # set robot pose
                self.robot.setPose(robotPose)
                # set robot covariance
                self.robot.setCovariance(robotCovariance)
                print 'IM DONEXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'

        # set posterior state mean
        self.stateMean = stateMean
        # set posterior state covariance
        self.stateCovariance = stateCovariance

        print 'Robot Pose:', currentRobotAbs
        vec = mapping(seenLandmarks_.index(label) + 1)
        land = [[np.around(stateMean[dimR_ + vec[0] - 1][0], 3)],
                [np.around(stateMean[dimR_ + vec[1] - 1][0], 3)]]
        #print 'Done' , land

        if land[0][0] > 4:
            land[0][0] = 2.543
        if land[0][0] < -4:
            land[0][0] = -0.503
        if land[1][0] > 4:
            land[1][0] = 3.517
        if land[1][0] < -4:
            land[1][0] = -0.592

        #landmark_abs_[int(label)-1].append([[np.around(stateMean[dimR_ + vec[0]-1][0],3)],[np.around(stateMean[dimR_ + vec[1]-1][0],3)]])
        landmark_abs_[int(label) - 1].append(land)
        seenLandmarksX_[int(label) - 1].append(
            np.around(stateMean[dimR_ + vec[0] - 1][0], 3))
        for i in range(0, len(landmark_abs_)):
            #count=Counter(seenLandmarksX_[i])
            print 'landmark absolute position : ', i + 1, ',', np.median(
                landmark_abs_[i], 0)  #count.most_common(1)

        print '____END______'
        return np.array(stateMean), np.array(stateCovariance)
Exemple #13
0
from mapping import mapping

print "start...\n"

mp = mapping((20, 20), 10)

mp.update_map((10, 10, 0), (0, 50))
mp.save("test_map_0.txt",(10, 10))
mp.update_map((10, 10, 0), (90, 50))
mp.save("test_map_90.txt",(10, 10))
mp.update_map((10, 10, 0), (180, 50))
mp.save("test_map_180.txt",(10, 10))
mp.update_map((10, 10, 0), (45, 50))
mp.save("test_map_45.txt",(10, 10))
mp.update_map((10, 10, 0), (135, 50))

mp.save("test_map.txt",(10, 10))

print "stop.\n"

    def update(self, measurement, measurementCovariance, new):
        global seenLandmarks_
        global dimR_
        # get robot current pose
        currentRobotAbs = self.robot.getPose()

        # get landmark absolute position estimate given current pose and measurement (robot.sense)
        [landmarkAbs, G1,
         G2] = self.robot.inverseSense(currentRobotAbs, measurement)
        # get KF state mean and covariance
        stateMean = self.stateMean
        stateCovariance = self.stateCovariance

        # print 'update mean: ', currentRobotAbs
        print '###############################'

        # if new landmark augment stateMean and stateCovariance
        if new:
            stateMean = np.concatenate(
                (stateMean, [[landmarkAbs[0]], [landmarkAbs[1]]]), axis=0)
            Prr = self.robot.getCovariance()
            # print 'Prr:',Prr

            if len(seenLandmarks_) == 1:
                #print 'Robo lanf If start '
                Plx = np.dot(G1, Prr)
                #print'Robot Land If stop'
            else:
                #print 'Robo Land Else start'

                # GOING FOR LUNCH.. Will be back Soon

                lastStateCovariance = KalmanFilter.getStateCovariance(self)
                #print 'STEP 1'
                #print 'Last covar: ',lastStateCovariance
                #end=len(lastStateCovariance[0][:])
                end = lastStateCovariance.shape
                print 'end:', end[1]

                #Prm    = lastStateCovariance[0:3, -1*(end[1]-3):(end[1]-1)]
                Prm = lastStateCovariance[0:3, 3:]
                #'''
                #print 'STEP 2'
                #print 'G1 : ', G1
                #print 'Prr : ', Prr
                #print 'Prm : ', Prm
                #'''
                Plx = np.dot(G1, np.bmat([[Prr, Prm]]))
                #print 'Robo Lanf Else stop'
            Pll = np.array(np.dot(np.dot(G1, Prr),
                                  np.transpose(G1))) + np.array(
                                      np.dot(np.dot(G2, measurementCovariance),
                                             np.transpose(G2)))
            P = np.bmat([[stateCovariance, np.transpose(Plx)], [Plx, Pll]])
            stateCovariance = P
            #print ' Stop'
            # else:
            # if old landmark stateMean & stateCovariance remain the same (will be changed in the update phase by the kalman gain)
            # calculate expected measurement

        print 'state covar : ', stateCovariance.shape
        #print 'inside update', currentRobotAbs

        [landmarkAbs, Hr, Hl] = Relative2AbsoluteXY(currentRobotAbs,
                                                    measurement)
        print 'Label : ', measurement[2]
        print 'land z: ', measurement[0]
        print 'land th: ', measurement[1]
        print 'landmarkAbs: ', landmarkAbs
        print 'robot absolute pose : ', currentRobotAbs
        # get measurement
        Z = ([[measurement[0]], [measurement[1]]])

        #Update
        x = stateMean
        label = measurement[2]

        # y = Z - expectedMeasurement
        # AKA Innovation Term
        measured = ([[landmarkAbs[0]], [landmarkAbs[1]]])
        y = np.subtract(Z, measured)

        #print 'z: ', Z
        print '________'
        #print 'meas : ',  measured
        #print 'xxx'
        #print 'y : ', y
        #print '________________'

        # build H
        # H = [Hr, 0, ..., 0, Hl,  0, ..,0] position of Hl depends on when was the landmark seen? H is C ??
        H = np.reshape(Hr, (2, 3))

        print ' H Start: ', seenLandmarks_.index(label)

        for i in range(0, seenLandmarks_.index(label)):
            H = np.bmat([[H, np.zeros([2, 2])]])
        H = np.bmat([[H, np.reshape(Hl, (2, 2))]])
        for i in range(0,
                       len(seenLandmarks_) - seenLandmarks_.index(label) - 1):
            H = np.bmat([[H, np.zeros([2, 2])]])
        #print 'H done'
        #print 'HHHHHHHHHHHHHHHHHHHHHHH'
        # compute S
        # print 'Before Getting Stuck'
        #print 'H : ', H.shape
        #print 'State covar: ',stateCovariance
        #print '___________ERROR Start_______________'
        print 'G1 : ', G1
        print 'G2 : ', G2
        print 'H : ', H
        try:
            s1 = np.dot(H, stateCovariance)
        except ValueError:
            print 'Value Error S1'
            print 'H shape', H.shape
            print 'State Cov', stateCovariance.shape
            return
        # print 's1: ', s1
        #print 'xxxxxxxxxxxxxxxx'
        #print 'Done s1'

        try:
            S = np.add(np.dot(np.dot(H, stateCovariance), np.transpose(H)),
                       measurementCovariance)
        except ValueError:
            print('Value error S')
            return

        #print '__________ERROR ZONE CROSSED________________'
        #print 'Done s'

        if (S < 0.000001).all():
            print('Non-invertible S Matrix')
            raise ValueError
            return
        #else:
        # print 'mat invertible'
        # calculate Kalman gain
        K = np.array(
            np.dot(np.dot(stateCovariance, np.transpose(H)), np.linalg.inv(S)))

        #print 'K gain Done',K

        # compute posterior mean
        posteriorStateMean = np.add(stateMean, np.dot(K, y))

        #print ' New mean state DONE'

        # compute posterior covariance
        kc = np.array(np.dot(K, H))
        kcShape = len(kc)
        #print 'Kc shape',kcShape

        posteriorStateCovariance = np.dot(np.subtract(np.eye(kcShape), kc),
                                          stateCovariance)

        #print ' New Covar Done'
        # print 'xxxxxxxxxxxxxxxxxxxxxxxx'

        # check theta robot is a valid theta in the range [-pi, pi]
        posteriorStateMean[2][0] = pi2pi(posteriorStateMean[2][0])

        #print 'pi2pi Done'
        # update robot pose

        #print 'post state mean: ', posteriorStateMean

        robotPose = ([posteriorStateMean[0][0]], [posteriorStateMean[1][0]],
                     [posteriorStateMean[2][0]])

        #print 'calculate robot pose done'
        # set robot pose
        self.robot.setPose(robotPose)
        #print 'update',robotPose
        # updated robot covariance
        robotCovariance = posteriorStateCovariance[0:3, 0:3]
        #print 'updated Cov',robotCovariance
        # set robot covariance
        self.robot.setCovariance(robotCovariance)
        # set posterior state mean
        KalmanFilter.setStateMean(self, posteriorStateMean)
        # set posterior state covariance
        KalmanFilter.setStateCovariance(self, posteriorStateCovariance)
        #print 'robot absolute pose : ',robotPose
        vec = mapping(seenLandmarks_.index(label) + 1)
        landmark_abs_[int(label) - 1].append(
            [[stateMean[dimR_ + vec[0] - 1][0]],
             [stateMean[dimR_ + vec[1] - 1][0]]])
        for i in range(0, len(landmark_abs_)):
            print 'landmark absolute position : ', i + 1, ',', np.median(
                landmark_abs_[i], 0)

        print 'post mean: ', posteriorStateMean
        print 'post covar: ', posteriorStateCovariance

        print '____END______'
        return posteriorStateMean, posteriorStateCovariance
Exemple #15
0
from mapping import mapping

print "start...\n"

mp = mapping((20, 20), 10)

mp.update_map((10, 10, 0), (0, 50))
mp.save("test_map_0.txt", (10, 10))
mp.update_map((10, 10, 0), (90, 50))
mp.save("test_map_90.txt", (10, 10))
mp.update_map((10, 10, 0), (180, 50))
mp.save("test_map_180.txt", (10, 10))
mp.update_map((10, 10, 0), (45, 50))
mp.save("test_map_45.txt", (10, 10))
mp.update_map((10, 10, 0), (135, 50))

mp.save("test_map.txt", (10, 10))

print "stop.\n"
Exemple #16
0
#-- GENERATE PLAYER ------------------------------------------------------------------------------#
import character as ch
player_bstat = ch.basic_stat(acc=3,
                             jump_power=10,
                             max_speed=10,
                             max_hp=100,
                             max_mp=100)
player_phstat = ch.physics_stat(width=20, height=20, air_drag=0.2)
player = ch.player("1P", (500, 400), player_bstat, player_phstat)

#-- MAPS FOR USE ------------------------------------------------------------------------------#
maps = {}

# TEST MAP (test_map)
test_map = mapping.mapping((40, 24))
test_map_chars = []
temp = ch.character("HEOSU", (600, 300), player_bstat, player_phstat)
temp.set_map(test_map)
test_map_chars.append(temp)

test_map.map_setting(mapping.map_temp, {'start': (50, 300)}, test_map_chars)
test_map.background_setting(pygame.image.load("img/map/test_map_bg.png"))
test_map.add_block(
    entity.eventblock(
        test_map, (5, 5), entity.PLAYER_COLLIDE,
        lambda: test_map.player.harms.append(attack.damage(5, False))))
test_map.add_block(
    entity.eventblock(test_map, (10, 5), entity.PLAYER_COLLIDE,
                      lambda: test_map.player.set_acc((3, 3), 10)))
test_map.add_block(
    def update(self, measurement, measurementCovariance, new):
        global seenLandmarks_
        global dimR_
        global lab
        global solution
        #   get robot current pose
        robotCurrentAbs = self.robot.getPose()
        # get landmark absolute position estimate given current pose and measurement (robot.sense)
        [landmarkAbs, G1,
         G2] = self.robot.inverseSense(robotCurrentAbs, measurement)
        #   get KF state mean and covariance
        stateMean = self.getStateMean()
        stateCovariance = self.getStateCovariance()
        # if new landmark augment stateMean and stateCovariance
        if new:
            # print 'new'
            stateMean = np.concatenate(
                (stateMean, [[landmarkAbs[0]], [landmarkAbs[1]]]), axis=0)
            Prr = self.robot.getCovariance()
            # print stateMean
            if len(seenLandmarks_) == 1:
                Plx = np.dot(G1, Prr)
            else:

                lastStateCovariance = self.getStateCovariance()
                Prm = lastStateCovariance[0:3, 3:]
                Plx = np.dot(G1, np.bmat([[Prr, Prm]]))
            Pll = np.array(np.dot(np.dot(G1, Prr),
                                  np.transpose(G1))) + np.array(
                                      np.dot(np.dot(G2, measurementCovariance),
                                             np.transpose(G2)))
            P = np.bmat([[stateCovariance, np.transpose(Plx)], [Plx, Pll]])
            stateCovariance = P
            # new cylinder detected and the dimension of statemean and statecovariance changes, need to update
            self.setStateMean(self, stateMean)
            self.setStateCovariance(self, stateCovariance)
        else:
            # if old landmark stateMean & stateCovariance remain the same (will be changed in the update phase by the kalman gain)
            # calculate expected measurement
            # get the index of the cylinder observed to get its previous position and calculate exected measurement
            label = measurement[2]
            vec1 = mapping(seenLandmarks_.index(label) + 1)
            landmarkPriorAbs = [[stateMean[dimR_ + vec1[0] - 1][0]],
                                [stateMean[dimR_ + vec1[1] - 1][0]]]
            [expectmeasurement, Hr,
             Hl] = self.robot.sense(robotCurrentAbs, landmarkPriorAbs)
            # get measurement
            Z = [[measurement[0]], [measurement[1]]]
            # Update
            x = stateMean
            # y = Z - expectedMeasurement
            y = np.array(Z) - np.array(expectmeasurement)
            # H = [Hr, 0, ..., 0, Hl] position of Hl depends on when was the landmark seen?
            H = np.reshape(Hr, (2, 3))
            for i in range(0, seenLandmarks_.index(label)):
                H = np.bmat([[H, np.zeros([2, 2])]])
            H = np.bmat([[H, np.reshape(Hl, (2, 2))]])
            for i in range(
                    0,
                    len(seenLandmarks_) - seenLandmarks_.index(label) - 1):
                H = np.bmat([[H, np.zeros([2, 2])]])
            # compute S
            S = np.dot(np.dot(H, stateCovariance),
                       np.transpose(H)) + measurementCovariance

            if (abs(S) < 0.000001).all():
                print('Non-invertible S Matrix')
                raise ValueError
                return
            else:
                #   calculate Kalman gain
                K = np.dot(np.dot(stateCovariance, np.transpose(H)),
                           np.linalg.inv(S))
                #   compute posterior mean
                # simple filtering approach, if the correctness value is too big, consider it as an error, don't update
                E = np.array(np.dot(K, y))
                if abs(E[0]) > 0.1:
                    posteriorStateMean = np.array(x)
                else:
                    posteriorStateMean = np.array(x) + np.array(np.dot(K, y))

                # compute posterior covariance
                I = np.identity(len(np.dot(K, H)))
                posteriorStateCovariance = np.dot(I - (np.dot(K, H)),
                                                  stateCovariance)
                # check theta robot is a valid theta in the range [-pi, pi]
                posteriorStateMean[2][0] = pi2pi(posteriorStateMean[2][0])
                # update robot pose
                robotPose = posteriorStateMean[0:3]
                lab.append(str(label))
                # set robot pose
                self.robot.setPose(robotPose)
                # updated robot covariance
                robotCovariance = posteriorStateCovariance[0:3, 0:3]
                # set robot covariance
                self.robot.setCovariance(robotCovariance)
                # set posterior state mean
                KalmanFilter.setStateMean(self, posteriorStateMean)
                # set posterior state covariance
                KalmanFilter.setStateCovariance(self, posteriorStateCovariance)
                # print 'robot absolute pose : ', robotAbs
                vec = mapping(seenLandmarks_.index(label) + 1)
                landmark_abs_[int(label) -
                              1] = [[[stateMean[dimR_ + vec[0] - 1][0]],
                                     [stateMean[dimR_ + vec[1] - 1][0]]]]
            for i in range(0, len(landmark_abs_)):
                print 'landmark absolute position : ', i + 1, ',', np.median(
                    landmark_abs_[i], 0)

            solution = landmark_abs_
            return posteriorStateMean, posteriorStateCovariance
Exemple #18
0
from edge import return_3_view_img
from image import drawSame
from mapping import mapping
from matplotlib import pyplot as plt

front, left, top = drawSame()
return_3_view_img()
data = mapping(front, left, top, 100, 100, 100, 100)
mid = front.shape[0]/2
x, y, z = zip(*data)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(x, y, z)
plt.show()
Exemple #19
0
#trim the reads
trim(readData, cfg, numThreads)

#setup inital mapping jobs
mapping_list = []
for id in readData.runtime['trimmed']:
    mapping_list.append((id, readData.runtime['trimmed'][id][0],
                         readData.runtime['trimmed'][id][1],
                         os.path.abspath(cfg['exec']['referenceSequence'])))
sc.checkexists(os.path.join(cfg['exec']['outdir'] + '/inital_mapping'))

#index reference reference sequence
indexing(cfg, os.path.abspath(cfg['exec']['referenceSequence']))

#run inital mapping jobs
bam_list = mapping(cfg, mapping_list,
                   cfg['exec']['outdir'] + '/inital_mapping', numThreads)

#determine average depth
print('\nSniffles: Calculating average read depth in the initial mapping')
bam_list = average_depth(cfg, bam_list,
                         cfg['exec']['outdir'] + '/inital_mapping',
                         cfg['exec']['outdir'] + '/coverage')
print('\nSniffles: Finished determining average read depth')

#normalize coverage
if cfg['exec']['normalizeCoverage']:
    fastq_list = rc.normCoverage(cfg, bam_list, numThreads)
    mapping_list = []
    for fastq in fastq_list:
        read1 = fastq[0]
        read2 = fastq[1]
Exemple #20
0
            elif k == ord('q'):
                cap.release()
                sys.exit(0)


    cv2.destroyAllWindows()
    print "training"
    #leftRegressorX.fit([[x[0]] for x in leftEyeXs],[x[1] for x in leftEyeXs])
    #leftRegressorY.fit([[y[0]] for y in leftEyeYs],[y[1] for y in leftEyeYs])
    leftRegressorX.fit([[x[0]] for x in rightEyeXs],[x[1] for x in rightEyeXs])
    leftRegressorY.fit([[y[0]] for y in rightEyeYs],[y[1] for y in rightEyeYs])

    print "Done Training"
    # Once all the calibration points have experimental XYs for each eye, use the correlation to make a mapping function
    polyorder = 3
    leftEyeXpoly = mapping.mapping(leftEyeXs, polyorder)
    leftEyeYpoly = mapping.mapping(leftEyeYs, polyorder)
    rightEyeXpoly = mapping.mapping(rightEyeXs, polyorder)
    rightEyeYpoly = mapping.mapping(rightEyeYs, polyorder)

    #plot

    leftEyeXpoly = np.poly1d(leftEyeXpoly)

    #plt.plot([x[0] for x in leftEyeXs],[x[1] for x in leftEyeXs], '.',[x[0] for x in leftEyeXs],leftEyeXpoly)
    #plt.show()

    leftEyeYpoly = np.poly1d(leftEyeYpoly)
    rightEyeXpoly = np.poly1d(rightEyeXpoly)
    rightEyeYpoly = np.poly1d(rightEyeYpoly)
Exemple #21
0
import shlex
#end if

from qfunc import qfunc
from bs import bitstream
from mapping import mapping

bits = []
bits = bitstream(100000)
symbol =[]

for i in range(0,bits.size-2,3):
	b0 = bits[i];
	b1 = bits[i+1];
	b2 = bits[i+2];
	symbol.append(mapping(b0,b1,b2))


b0 =bits[len(bits)-1]
b1 =0
b2 =0

symbol.append(mapping(b0,b1,b2))
snr_db = np.linspace(0,9,10)
snrlen=10

err =[]

print(len(symbol))

s_in,s_q =0,0
Exemple #22
0
def run_style_transfer(
    content_img, style_img, input_img, first_pass_img, style_aligned_img,
    mask, learning_rate, content_layers, style_layers, n_iter,
    style_weight=0.007, content_weight=1.0, phase=0, pass_=1):

    # extract content features
    content_features = Net(content_layers=content_layers)(content_img, phase=phase).content_features
    style_aligned_features = Net(content_layers=content_layers, mask=mask)(style_aligned_img, phase=phase).content_features

    # modify the content features through the use of gain maps (style transfer
    # for head portraits)
    if use_gain_maps:
        for i, (c, s) in enumerate(zip(content_features, style_aligned_features)):
            content_features[i] = c * gain_map(c, s)

    # extract style features
    style_features   = Net(style_layers=style_layers, mask=mask)(style_img, phase=phase).style_features
    input_features = Net(style_layers=style_layers, mask=mask)(first_pass_img, phase=phase).style_features

    # first pass
    if pass_ == 1:
        maps = mapping(input_features, style_features)
        modified_style_features = align(style_features, maps)

    # second pass
    else:
        # index of the reference layer
        ref = 2
        # determine the matching between content and style patches
        map = mapping([input_features[ref]], [style_features[ref]])[0]
        mask = nn.Upsample(size=style_features[ref].shape[2:4], mode='nearest')(mask)

        # make the mapping more robust
        map = refined_mapping(map, style_features[ref][0], mask.reshape(-1))

        # propagate the mapping obtained at the reference layer to other style layers
        mappings = [propagate_mapping(map, style_features[ref].shape[2:4], sf.shape[2:4]) for sf in style_features]

        # align the style features based on the mapping
        modified_style_features = align(style_features, mappings)

    net = Net(content_layers=content_layers, style_layers=style_layers, mask=mask)
    features = {}

    optimizer = optim.LBFGS([input_img.requires_grad_()], lr=learning_rate)

    run = [0]
    while run[0] <= n_iter:

        def closure():
            input_img.data.clamp_(0, 1)
            optimizer.zero_grad()
            model = net(input_img, content_features=content_features, \
                        style_features=modified_style_features, phase=phase)

            content_score = model.content_loss
            style_score = model.style_loss

            content_score = content_weight/len(content_layers) * content_score
            style_score = style_weight/len(style_layers) * style_score

            tv_loss = 0.000001 * total_variation_loss(input_img)

            loss = content_score + style_score + tv_loss
            loss.backward(retain_graph=True)


            run[0] += 1
            if run[0] % 50 == 0:
                print("run {}:".format(run))
                print('Style Loss : {:4f} Content Loss: {:4f} TV Loss: {:4f}'.format(
                    style_score, content_score, tv_loss, loss))
                Image.from_tensor(input_img).save("./frames/frame-{}-{}.png".format(phase,run[0]))

            return style_score + content_score

        optimizer.step(closure)

    input_img.data.clamp_(0, 1)

    return input_img
#!/usr/bin/env python
from __future__ import print_function
import argparse
import mapping

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--input', '-i', help='input filepath', required=True)
    parser.add_argument('--output',
                        '-o',
                        help='output filepath',
                        required=True)
    args = parser.parse_args()

    words = [l.strip() for l in open(args.input)]

    with open(args.output, 'w') as fg:
        for entry in mapping.mapping(words):
            print(' . '.join([syl.placeTone().toString() for syl in entry]),
                  file=fg)
nT = int((TF - T0) / dT)

# Error saving
ert = np.zeros(int(nT))

# d(chi)/dx - Constant value that arose from mapping to natural coordinates
dchi_dx = 2 / (XF - X0)

# Calculating GLL points, as N - 2 points and the adding -1 and 1 as extreme
# values (this is already in natural coordinates)
[LGLP, W] = np.polynomial.legendre.leggauss(N - 2)
LGLP = np.insert(LGLP, 0, -1)
LGLP = np.append(LGLP, 1)

# Calculating real coordinates of points from natural coordinates location
xn = mp.mapping(LGLP, X0, XF)

# P coefficient for calculations
P = dchi_dx**2 * dT * Dx

# ==============================================================================
# Building the differentiation matrix. I know I need the second derivative
# matrix, but it is a fact that I can square the first derivative matrix to
# obtain the second derivative matrix
# ==============================================================================

# Defining diffusion matrix for the problem (as sparse matrix)
# K = sp.lil_matrix((N, N))
K = np.zeros((N, N))

# Filling each row of the matrix qith the derivatives of the Lagrange
def extractFromFile(input_file, output_file, groundtruthFile):  
    hm_terminal_enABNF = importENabnf()
    f = open(input_file,"r")
    f_out_overall = open("extracted_tmp","w")
    
    counter_line = 0
    original_sentence = []
    statement_hm = {}
    statement_overall = {}

    
    for line in f:
        line = line.replace("\n","")
        flag = False
        if line.startswith("<"):
            tmp = line.split(" -> ")
            if hm_terminal_enABNF.has_key(tmp[1].lower()):
                replacement = hm_terminal_enABNF[tmp[1].lower()]
                statement_hm[tmp[0]] = replacement

            else:
                statement_hm[tmp[0]] = tmp[1]
        if line == "":
            for key,value in statement_hm.iteritems():
                if "Statement" in key:
                    while "Unknown" in value or "Individual" in value or "Statement" in value:
                        value = value.replace(" ","-")
                        value = value.replace("-<SGM>-","-")
#                        print key,value
                        tmp_terminals =  re.findall(r'(<[A-Za-z\_0-9]*>)', value)
                        for y in tmp_terminals:
                            try:
                                if "Unknown" not in statement_hm[y] and "Individual" not in statement_hm[y] and "Statement" not in statement_hm[y]:
                                    value = value.replace(y,"<"+statement_hm[y].upper().replace(" ","")+">")
                                else:
                                    value = value.replace(y,statement_hm[y])

                            except:
                                pass
                if statement_overall.has_key(key):
                    t_s = statement_overall[key]
                    if value in t_s:
                        pass
                    else:
                        t_s.append(value)
                        statement_overall[key] = t_s
                else:
                    statement_overall[key] = [value]
                    
            statement_hm = {}

#Idee: Alle regeln fuer einen Block raussuchen, die mit < beginnen.
#Dann an " -> " trennen (speichern in tmp) und dann hm erzeugen, key= tmp[0] value = tmp[1]
#im letzten schritt dann laengste staetment regeln nehmen, die "unknowns" in der Liste nachschauen, und dann direkt matchen zu en.abnf.
#Danach dann normal weiter zum mapping, allerdings schauen, ob ich mir nicht einmal abspeichern sparen kann und direkt der function mapping ein array mit allen statement+
#fragment rules uebergebe.
#Hm, dran denken, dass ich mehrmals die variable Unknown haben kann, mit unterschiedlichen staedte Namen..
#Ich koennte natuerlich auch direkt in diesem Schritt, das mappen nach den eabnf regeln machen!!!
#und dann den Namen des Stadt erst garnicht abspeichern zu muessen.
#Beim Mapping so lange iterieren, bis kein Key mehr gefunden wird in der hm und das muesste dann die letzte moeglichkeit sein


    for key in statement_overall:
        write_string=""
        if len(statement_overall[key]) > 0 and "statement" in key.lower():
            write_string += key+","
            for x in statement_overall[key]:
                write_string += x+","
            write_string = write_string[:-1]
            write_string = write_string.replace(" ","")
            if write_string.endswith(":  \""):
                pass
            else:
                write_string += "\n\n\n"
                f_out_overall.write(write_string)
            
    f_out_overall.close()
    mapping("extracted_tmp",output_file,groundtruthFile)