Exemple #1
0
def analyze_file(input_file, output_file):
    pipe = pipes.Template()
    command = 'lt-proc -a /home/anna/apertium-en-es/es-en.automorf.bin'
    pipe.append(command, '--')
    pipe.copy(input_file, output_file)
 def testBadOpenMode(self):
     t = pipes.Template()
     self.assertRaises(ValueError, t.open, 'bogusfile', 'x')
 def testSetDebug(self):
     t = pipes.Template()
     t.debug(False)
     self.assertEqual(t.debugging, False)
     t.debug(True)
     self.assertEqual(t.debugging, True)
Exemple #4
0
def write_pipe(data, pipefile):

    t = pipes.Template()
    fpw = t.open(pipefile, 'w')
    fpw.write(data)
    fpw.close()
 def testReadOpenSink(self):
     # check calling open('r') on a pipe ending with
     # a sink raises ValueError
     t = pipes.Template()
     t.append('boguscmd', pipes.SINK)
     self.assertRaises(ValueError, t.open, 'bogusfile', 'r')
def analyze(input_file, output_file, directory, pair):
    pipe = pipes.Template()
    command = 'lt-proc -a ' + directory + pair[1] + '-' + pair[0] + '.automorf.bin'
    pipe.append(command, '--')
    pipe.copy(input_file, output_file)
Exemple #7
0
def get_bricks(host, vol):
    t = pipes.Template()
    t.prepend("gluster --remote-host=%s system getspec %s" % (host, vol), ".-")
    return t.open(None, "r")
Exemple #8
0
def make_xml_transfer_weights_mono(scores_fname, prefix, rule_map, rule_xmls):
    """
    Sum up the weights for each rule-pattern pair,
    add the result to xml weights file.
    """
    print('Summing up the weights and making xml rules.')
    btime = clock()

    # make output file names
    sorted_scores_fname = prefix + '-chunk-weights-sorted.txt'
    ofname = prefix + '-rule-weights.w1x'

    # create pipeline
    pipe = pipes.Template()
    pipe.append('sort $IN > $OUT', 'ff')
    pipe.copy(scores_fname, sorted_scores_fname)

    # create empty output xml tree
    oroot = etree.Element('transfer-weights')
    et_newrulegroup = etree.SubElement(oroot, 'rule-group')

    with open(sorted_scores_fname, 'r', encoding='utf-8') as ifile:
        # read and process the first line
        prev_group_number, prev_rule_number, prev_pattern, weight = ifile.readline(
        ).rstrip('\n').split('\t')
        total_pattern_weight = float(weight)
        et_newrule = make_et_rule(prev_rule_number, et_newrulegroup, rule_map,
                                  rule_xmls)

        # read and process other lines
        for line in ifile:
            group_number, rule_number, pattern, weight = line.rstrip(
                '\n').split('\t')
            if group_number != prev_group_number:
                # rule group changed: flush pattern, close previuos, open new
                et_newpattern = make_et_pattern(et_newrule, prev_pattern,
                                                total_pattern_weight)
                et_newrulegroup = etree.SubElement(oroot, 'rule-group')
                et_newrule = make_et_rule(rule_number, et_newrulegroup,
                                          rule_map, rule_xmls)
                total_pattern_weight = 0.
            elif rule_number != prev_rule_number:
                # rule changed: flush previous pattern, create new rule
                et_newpattern = make_et_pattern(et_newrule, prev_pattern,
                                                total_pattern_weight)
                et_newrule = make_et_rule(rule_number, et_newrulegroup,
                                          rule_map, rule_xmls)
                total_pattern_weight = 0.
            elif pattern != prev_pattern:
                # pattern changed: flush previous
                et_newpattern = make_et_pattern(et_newrule, prev_pattern,
                                                total_pattern_weight)
                total_pattern_weight = 0.
            # add up rule-pattern weights
            total_pattern_weight += float(weight)
            prev_group_number, prev_rule_number, prev_pattern = group_number, rule_number, pattern

        # flush the last rule-pattern
        et_newpattern = make_et_pattern(et_newrule, prev_pattern,
                                        total_pattern_weight)

    if using_lxml:
        # lxml supports pretty print
        etree.ElementTree(oroot).write(ofname,
                                       pretty_print=True,
                                       encoding='utf-8',
                                       xml_declaration=True)
    else:
        etree.ElementTree(oroot).write(ofname,
                                       encoding='utf-8',
                                       xml_declaration=True)

    print('Done in {:.2f}'.format(clock() - btime))
    return ofname
Exemple #9
0
def make_xml_transfer_weights_parallel(scores_fname, prefix, rule_map,
                                       rule_xmls):
    """
    Sum up the weights for each rule-pattern pair,
    add the result to xml weights file.
    """
    print('Summing up the weights and making xml rules.')
    btime = clock()

    # make output file names
    sorted_scores_fname = prefix + '-chunk-weights-sorted.txt'
    ofname = prefix + '-rule-weights.w1x'

    # create pipeline
    pipe = pipes.Template()
    pipe.append('sort $IN > $OUT', 'ff')
    pipe.copy(scores_fname, sorted_scores_fname)

    # create empty output xml tree
    oroot = etree.Element('transfer-weights')
    et_newrulegroup = etree.SubElement(oroot, 'rule-group')
    pattern_rule_weights = {}

    with open(sorted_scores_fname, 'r', encoding='utf-8') as ifile:
        # read and process the first line
        prev_group_number, rule_number, pattern, weight = ifile.readline(
        ).rstrip('\n').split('\t')
        pattern_rule_weights[pattern] = {}
        pattern_rule_weights[pattern][rule_number] = float(weight)

        # read and process other lines
        for line in ifile:
            group_number, rule_number, pattern, weight = line.rstrip(
                '\n').split('\t')

            if group_number != prev_group_number:
                # rule group changed: flush previuos
                make_et_rule_group(et_newrulegroup, pattern_rule_weights,
                                   rule_map, rule_xmls)
                et_newrulegroup = etree.SubElement(oroot, 'rule-group')
                pattern_rule_weights = {}

            pattern_rule_weights.setdefault(pattern, {})
            pattern_rule_weights[pattern].setdefault(rule_number, 0.)
            pattern_rule_weights[pattern][rule_number] += float(weight)

            prev_group_number = group_number

        # flush the last rule-pattern
        make_et_rule_group(et_newrulegroup, pattern_rule_weights, rule_map,
                           rule_xmls)

    if using_lxml:
        # lxml supports pretty print
        etree.ElementTree(oroot).write(ofname,
                                       pretty_print=True,
                                       encoding='utf-8',
                                       xml_declaration=True)
    else:
        etree.ElementTree(oroot).write(ofname,
                                       encoding='utf-8',
                                       xml_declaration=True)

    print('Done in {:.2f}'.format(clock() - btime))
    return ofname
Exemple #10
0
                print "Set min Position to {}".format(minPos)
        else:
            if toggle:
                print "Go."
                toggle = False

            numberOfPos = minPos - maxPos + 1
            percentage = (y - maxPos) / numberOfPos

            if percentage < 0:
                percentage = 0
            if percentage > 1:
                percentage = 1

            if args["green"] == "g":
                f = pipes.Template().open('percent1.txt', 'w')
                f.write("{}".format(percentage))
            else:
                f = pipes.Template().open('percent2.txt', 'w')
                f.write("{}".format(percentage))

        M = cv2.moments(c)
        center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

        # only proceed if the radius meets a minimum size
        if radius > 10:
            # draw the circle and centroid on the frame,
            # then update the list of tracked points
            cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)
            cv2.circle(frame, center, 5, (0, 0, 255), -1)
def generate_mapping_stats(sam_files, output_file, gtf_file, sample_name,
                           count):

    print("Loading GTF file...")
    gtf_dict = load_GTF(gtf_file)
    print("Loaded.")

    #OUTPUT TABLE CONTAING STATS
    output_table = OrderedDict()

    #Dict indicating to which genes a specific read maps to
    #It is a temporary dict
    reads_mapped_to = defaultdict(str)
    exonic_mappings_temp = defaultdict(str)
    #Dict indicating which read is multi-mapped
    #It is a temporary dict
    multi_maps = defaultdict(str)
    #sam_files = [input_file]
    exonic_multi_table = defaultdict(str)

    #MAPPABILITY
    output_table["total"] = 0
    output_table["mapped"] = 0
    output_table["unmapped"] = 0
    output_table["unique"] = 0
    output_table["multi"] = 0

    #CODING VERSUS NON-CODING REGIONS
    output_table["intergenic"] = 0
    output_table["intragenic"] = 0
    output_table["exonic"] = 0
    output_table["intronic"] = 0
    output_table["ambigious"] = 0

    #CODING REGIONS MAPPABILITY
    output_table["exonicU"] = 0
    output_table["exonicM"] = 0

    #ALIGNMENT CODING VS NONCODING
    output_table["alignments"] = 0
    output_table["multi-intergenic"] = 0
    output_table["multi-intragenic"] = 0
    output_table["multi-exonic"] = 0
    output_table["multi-intronic"] = 0
    output_table["multi-ambigious"] = 0

    #ERROR
    output_table["perfect"] = 0
    output_table["partly_perfect"] = 0
    output_table["mapped_no_correct"] = 0
    for i in range(0, 10):
        output_table["S_" + str(i)] = 0
    output_table["S_10+"] = 0
    output_table["I"] = 0
    output_table["D"] = 0
    output_table["INDEL"] = 0

    reads = Counter()
    multi_reads = defaultdict(str)

    #SAM PARSE SAM FILE
    for sam in sam_files:
        print("Parsing sam file... %s" % sam)
        t = pipes.Template()
        t.append("samtools view $IN ", 'f-')
        ff = t.open(sam, 'r')
        for line in ff:
            ###sam_f = pysam.AlignmentFile(sam, "rb")
            ###for sam_f_alignment in sam_f:
            ###split=line.split("\t")
            #line = sam_f_alignment.tostring()
            split = line.split("\t")
            if (not line.startswith("@PG") and not line.startswith("@HD")
                    and not line.startswith("@SQ") and len(split) >= 10):
                read_name = split[0]
                flagCode = int(split[1])
                chrom = split[2]
                pos = split[3]
                errors = split[5]
                read = split[9]

                errors_a = list(errors)
                number = ""
                num = 0
                error_table = defaultdict(int)
                name_and_flag = read_name

                #CHECK IF READ MAPPED OR UNMAPPED
                #IT US UNMAPPED
                if (flagCode & 0x0004 != 0):
                    output_table["unmapped"] += 1
                    output_table["total"] += 1
                    error_table["*"] += 1
                #IT IS MAPPED
                else:
                    if (flagCode & 0x0001 !=
                            0):  #This is paired end sequencing
                        if (flagCode & 0x0040 != 0):  #1st read
                            name_and_flag += ";first"
                        if (flagCode & 0x0080 != 0):  #2nd read
                            name_and_flag += ";second"

                    # CHECK TO WHICH GENE(S) IT MAPPED TO
                    genes_info, num_genes, num_exons = get_gene(
                        gtf_dict, [chrom, pos])
                    #GENE COUNTS: only NON-overlapping genes and if read within exons
                    if (count and num_genes == 1 and num_exons > 0):
                        info = genes_info[0]
                        gene_id = info[4]
                        mapped_to = []
                        if (name_and_flag in reads_mapped_to):
                            mapped_to = reads_mapped_to[name_and_flag]
                        mapped_to.append(gene_id)
                        reads_mapped_to[name_and_flag] = mapped_to

                    output_table["alignments"] += 1.0
                    #STATS
                    if (name_and_flag not in reads):
                        reads[name_and_flag] += 1
                        output_table["unique"] += 1
                        output_table["total"] += 1
                        output_table["mapped"] += 1

                        if (num_genes == 0):
                            output_table["intergenic"] += 1
                        elif (num_genes == 1):
                            output_table["intragenic"] += 1
                            if (num_exons == 0):
                                output_table["intronic"] += 1
                            else:
                                output_table["exonic"] += 1
                                output_table["exonicU"] += 1
                                d = []
                                if (name_and_flag in exonic_mappings_temp):
                                    d = exonic_mappings_temp[name_and_flag]
                                d.append([genes_info[0], chrom, pos])
                                exonic_mappings_temp[name_and_flag] = d
                        elif (num_genes > 1):
                            output_table["ambigious"] += 1

                    #READ IS MULTI-MAPPED
                    else:
                        if (reads[name_and_flag] == 1):
                            output_table["unique"] -= 1
                            output_table["exonicU"] -= 1
                            output_table["multi"] += 1
                        reads[name_and_flag] += 1
                        d = []
                        #GET KNOWLEDGE IF FIRST MAPPING EXONIC OR INTRONIC
                        if (name_and_flag in exonic_mappings_temp):
                            d = exonic_mappings_temp[name_and_flag]
                        #output_table["alignments"] += 1.0
                        if (num_genes == 0):
                            output_table["multi-intergenic"] += (1)
                        elif (num_genes == 1):
                            output_table["multi-intragenic"] += (1)
                            if (num_exons == 0):
                                output_table["multi-intronic"] += (1)
                            else:
                                output_table["multi-exonic"] += (1)
                                d.append([genes_info[0], chrom, pos])
                        elif (num_genes > 1):
                            output_table["multi-ambigious"] += (1)
                        #IF AT LEAST ONE EXONIC ALIGNMENT
                        if (len(d) > 0):
                            exonic_multi_table[name_and_flag] = d
                    #PARSE MAPPING ERRORS
                    for i in errors_a:
                        if (re.match("[0-9]", i)):
                            number += (i)
                        elif (re.match("[A-Z]", i)):
                            num = int(number)
                            error_table[i] += num
                            number = ""
                    #print output_table
                    #TABLE OF HOW MANY READS MAP PERFECT, PARTLY PERFECT, SUBSTITUINTS ETC
                    if ("M" in error_table and len(error_table) == 1):
                        output_table["perfect"] += 1
                    elif ("M" in error_table and len(error_table) > 1):
                        output_table["partly_perfect"] += 1
                    elif ("M" not in error_table and "*" not in error_table):
                        output_table["mapped_no_correct"] += 1

                    if ("S" in error_table):
                        if (int(error_table["S"]) < 10):
                            output_table["S_" + str(error_table["S"])] += 1
                        else:
                            output_table["S_10+"] += 1
                    elif ("S" not in error_table):
                        output_table["S_0"] += 1

                    if ("I" in error_table):
                        output_table["I"] += 1

                    if ("D" in error_table):
                        output_table["D"] += 1

                    if ("I" in error_table or "D" in error_table):
                        output_table["INDEL"] += 1

        ff.close()
    #WHEIGHT COUNTS
    if (count):
        counts, counts_unique, counts_multi = weight_counts(reads_mapped_to)
        write_counts(output_file + ".counts.unique", counts_unique,
                     sample_name)
        write_counts(output_file + ".counts", counts, sample_name)
        write_counts(output_file + ".counts.multi", counts_multi, sample_name)

    o = ""
    exonicM = len(exonic_multi_table.keys())
    output_table["exonicM"] = exonicM
    write_stats(output_file, output_table, sample_name)
Exemple #12
0
def main(args):
    f = gzip.open(args.vcf) if args.vcf.endswith('.gz') else open(args.vcf)

    pca_data = read_pcs(args.pca)
    sex_data = read_sex(args.sex)
    consanguineous_samples = read_consanguineous_samples(args.consanguineous)

    # Opening output files
    if not args.output.endswith('.gz'): args.output += '.gz'
    pipe = pipes.Template()
    pipe.append('bgzip -c /dev/stdin', '--')
    g = pipe.open(args.output, 'w')

    header = None
    for line in f:
        line = line.strip()

        # Reading and writing header lines
        if line.startswith('#'):
            if line.startswith('#CHROM'):
                for metric in metrics:
                    print >> g, '##INFO=<ID=%s_HIST,Number=R,Type=String,Description="Histogram for %s; Mids: %s">' % (
                        metric, metric, all_mids[metric])
                print >> g, '##INFO=<ID=DOUBLETON_DIST,Number=A,Type=String,Description="Euclidean distance of carriers of doubletons">'

                print >> g, '##INFO=<ID=AC_MALE,Number=A,Type=String,Description="Allele count among males">'
                print >> g, '##INFO=<ID=AC_FEMALE,Number=A,Type=String,Description="Allele count among females">'

                print >> g, '##INFO=<ID=AN_MALE,Number=1,Type=String,Description="Allele number among males">'
                print >> g, '##INFO=<ID=AN_FEMALE,Number=1,Type=String,Description="Allele number among females">'

                print >> g, '##INFO=<ID=AC_CONSANGUINEOUS,Number=A,Type=String,Description="Allele count among individuals with F > 0.05">'
                print >> g, '##INFO=<ID=AN_CONSANGUINEOUS,Number=1,Type=String,Description="Allele number among individuals with F > 0.05">'
                print >> g, '##INFO=<ID=Hom_CONSANGUINEOUS,Number=A,Type=String,Description="Homozygote count among individuals with F > 0.05">'

                header_list = line.split('\t')
                g.write('\t'.join(header_list[:8]) + '\n')
                header_list = [
                    x.replace('#', '').replace(' ', '_') for x in header_list
                ]
                header = dict([(x.replace('#', '').replace(' ', '_'), i)
                               for i, x in enumerate(header_list)])
            else:
                # Edits for VCF header
                if line.startswith('##INFO=<ID=AC_') or line.startswith(
                        '##INFO=<ID=Hom_'):
                    line = line.replace('Number=1', 'Number=A').replace(
                        'Type=String', 'Type=Integer')
                elif line.startswith('##INFO=<ID=Het_'):
                    line = line.replace('Number=A', 'Number=.')
                elif line == '##fileformat=VCFv4.1':
                    line = '##fileformat=VCFv4.2'
                g.write(line + '\n')
            continue

        if header is None:
            print >> sys.stderr, "VCF file does not have a header line (CHROM POS etc.). Exiting."
            sys.exit(1)

        fields = line.split('\t')
        alt_alleles = fields[header['ALT']].split(',')
        alts = len(alt_alleles)
        # Pull out annotation info from INFO and ALT fields
        new_info = fields[header['INFO']].rstrip(';')

        # Pre-computing histograms
        data_list, ad_means, ad_stdevs = get_histograms_for_variant(fields,
                                                                    metrics,
                                                                    all_bins,
                                                                    alts=alts)
        for i, metric in enumerate(metrics):
            hists = []
            for j in range(alts + 1):
                hist = data_list[i * (alts + 1) + j]
                hists.append('|'.join(map(str, hist)))
                new_info += ';%s_HIST=%s' % (metric, ','.join(hists))

        info_field = dict([(x.split('=', 1)) if '=' in x else (x, x)
                           for x in re.split(';(?=\w)', fields[header['INFO']])
                           ])
        acs = info_field['AC_Adj'].split(',')
        homs = info_field['AC_Hom'].split(',')

        if fields[header['FILTER']] == 'PASS':
            if not sum(map(int, info_field['AC_Adj'].split(','))):
                fields[header['FILTER']] = 'AC_Adj0_Filter'
            elif 'InbreedingCoeff' in info_field and float(
                    info_field['InbreedingCoeff']) <= -0.2:
                fields[header['FILTER']] = 'InbreedingCoeff_Filter'

        doubleton_dists = ['.'] * alts

        ac_male = ['.'] * alts
        ac_female = ['.'] * alts
        ac_consang = ['.'] * alts
        hom_consang = ['.'] * alts

        all_samples = get_sample_info(fields)
        if hemizygous_x(fields):
            all_samples = dict([(sample, gt)
                                for sample, gt in all_samples.items()
                                if sex_data[header_list[sample]] == 'Female'
                                or len(set(gt)) == 1])
        elif fields[header['CHROM']] == 'Y':
            all_samples = dict([(sample, gt)
                                for sample, gt in all_samples.items()
                                if sex_data[header_list[sample]] == 'Male'])
        # print all_samples
        variant_sex_data = Counter(
            [sex_data[header_list[sample]] for sample in all_samples])
        an_male = variant_sex_data['Male'] * 2 if not hemizygous_segment(
            fields) else variant_sex_data['Male']
        an_female = variant_sex_data['Female'] * 2

        sample_names = set([header_list[sample] for sample in all_samples])
        an_consang = len(
            set(consanguineous_samples.keys()).intersection(sample_names)) * 2

        for i, alt in enumerate(alt_alleles):
            allele_num = str(i + 1)
            if acs[i] == '0':
                continue
            samples = dict([(sample, gt) for sample, gt in all_samples.items()
                            if allele_num in gt])
            # Calculate doubleton euclidean distance
            if acs[i] == '2' and homs[i] == '0':
                if len(samples) != 2:
                    print >> sys.stderr, 'Variant %s seems to be AC_Adj = 2, but %s samples found with allele' % (
                        '-'.join([
                            fields[header['CHROM']], fields[header['POS']],
                            fields[header['REF']], alt
                        ]), len(samples))
                else:
                    doubleton_samples = samples.keys()
                    if header_list[
                            doubleton_samples[0]] in pca_data and header_list[
                                doubleton_samples[1]] in pca_data:
                        doubleton_dists[i] = euclid_dist(
                            pca_data[header_list[doubleton_samples[0]]],
                            pca_data[header_list[doubleton_samples[1]]])

            # Add male and female allele counts
            ac_male[i] = sum([
                Counter(gt)[allele_num] for sample, gt in samples.items()
                if sex_data[header_list[sample]] == 'Male'
            ])
            if hemizygous_segment(fields):
                ac_male[
                    i] /= 2  # Males will be labelled as homozygous (filtered previously) on non-PAR X/Y
            ac_female[i] = sum([
                Counter(gt)[allele_num] for sample, gt in samples.items()
                if sex_data[header_list[sample]] == 'Female'
            ])

            # Get consanguineous counts
            ac_consang[i] = sum([
                Counter(gt)[allele_num] for sample, gt in samples.items()
                if header_list[sample] in consanguineous_samples
            ])
            hom_consang[i] = sum([
                Counter(gt)[allele_num] == 2 for sample, gt in samples.items()
                if header_list[sample] in consanguineous_samples
            ])

        # Write results
        new_info += ';DOUBLETON_DIST=%s' % (','.join(map(str,
                                                         doubleton_dists)))
        new_info += ';AC_MALE=%s' % (','.join(map(str, ac_male)))
        new_info += ';AC_FEMALE=%s' % (','.join(map(str, ac_female)))
        new_info += ';AN_MALE=%s;AN_FEMALE=%s' % (an_male, an_female)
        new_info += ';AC_CONSANGUINEOUS=%s;AN_CONSANGUINEOUS=%s;HOM_CONSANGUINEOUS=%s' % (
            ','.join(map(str, ac_consang)), an_consang, ','.join(
                map(str, hom_consang)))
        fields[header['INFO']] = new_info
        g.write('\t'.join(fields[:8]) + '\n')

    f.close()
    g.close()
Exemple #13
0
def ack(msg_file: str = ""):
    """
    Print a signed ACK message and upload it to opentimestamps.

    Args:
        msg_file:
    """
    head_sha = _sh("git rev-parse HEAD", quiet=True, check=True)
    msg = ""
    ackr_dir = _get_current_rev_dir()
    msg_path = Path(ackr_dir) / "ack_message.txt"
    signed_path = Path(ackr_dir) / "ack_message.asc"
    tag = _get_current_ackr_tag()
    tag_url = f"https://github.com/{ACKR_GH_USER}/bitcoin/tree/{tag}"

    confdata = _parse_configure_log()
    compiler_v = confdata["clang_version"] or confdata["gcc_version"]

    header_txt = f"ACK {head_sha} ([`{ACKR_GH_USER}/{tag}`]({tag_url}))\n\n"

    if msg_file == "-":
        msg = sys.stdin.read()
    elif not msg_file:
        if not msg_path.is_file():
            msg_path.write_text(header_txt)
        editor = os.environ.get("EDITOR", "nvim")
        run(f"{editor} {msg_path}", shell=True, check=True)
        msg = msg_path.read_text()
    elif Path(msg_file).is_file():
        msg = Path(msg_file).read_text()
    else:
        die(f"bad path given: {msg_file}")

    if f"ACK {head_sha[:6]}" not in msg:
        die("message contains incorrect hash")

    msg_path.write_text(msg)
    print(f"Wrote ACK message to {msg_path}")

    signing_key = _sh("git config user.signingkey", quiet=True)

    if not signing_key:
        die("you need to configure git's user.signingkey")

    signed = True
    try:
        _sh(f"gpg -u {signing_key} -o {signed_path} --clearsign {msg_path}", check=True)
    except Exception:
        print(f"GPG signing with key {signing_key} failed!", file=sys.stderr)
        signed = False

    out = msg

    if signed:
        out += textwrap.dedent(
            """
            <details><summary>Show signature data</summary>
            <p>

            ```
            """
        )
        out += signed_path.read_text()
        out += f"""
```

</p></details>

<details><summary>Show platform data</summary>
<p>

```
Tested on {platform.platform()}

Configured with {confdata['configure_command']}

Compiled with {confdata['cxx']} {confdata['cxxflags']} i

Compiler version: {compiler_v}
```

</p></details>

"""

    print()

    print("-" * 80)
    print(out)
    print("-" * 80)

    if _sh("which wl-copy", quiet=True):
        t = pipes.Template()
        t.append("wl-copy", "--")
        f = t.open("pipefile", "w")
        f.write(out)
        f.close()

        print()
        print("Signed ACK message copied to clipboard")

    print(f"\nRunning git push origin {tag}")
    _sh(f"git push origin {tag}")
Exemple #14
0
def translate_file(input_file, output_file):
    pipe = pipes.Template()
    command = 'apertium -d /home/anna/apertium-en-es en-es'
    pipe.append(command, '--')
    pipe.copy(input_file, output_file)
Exemple #15
0
 def testSimplePipe3(self):
     with open(TESTFN, 'w') as f:
         f.write('hello world #2')
     t = pipes.Template()
     t.append(s_command + ' < $IN', pipes.FILEIN_STDOUT)
     self.assertEqual(t.open(TESTFN, 'r').read(), 'HELLO WORLD #2')
Exemple #16
0
 def to_png_file(self, fname: str):
     cmd = pipes.Template()
     cmd.append('dot -Tpng > %s' % fname, '-.')
     with cmd.open('pipefile', 'w') as f:
         f.write(self.to_dot())
Exemple #17
0
                else:
                    # EOL received

                    alldata += line + '\n'

                    if (line == '50524f4752414d'):  # "PROGRAM"
                        print("Programming session started!", file=sys.stderr)
                        numconnects += 1
                        datasetname = datetime.now().strftime(
                            "%y%m%d-%H%M%S-") + str(numconnects)

                    elif (line == '454e44'):  # "END"
                        print("Programming session ended!", file=sys.stderr)
                        #print(datasetname)

                        p = pipes.Template()
                        p.append(pathcreatehexdump + ' - > $OUT', '-f')
                        # p.debug(True)

                        outfilename = datadir + '/' + datasetname + '.data'
                        f = p.open(outfilename, 'w')
                        try:
                            f.write(alldata)
                        finally:
                            f.close()

                        #print(alldata)

                        if (len(lastfilename) > 0):
                            # compare last 2 dumps
                            print('Starting compare ' + lastfilename + ' ' +
Exemple #18
0
 def testSimplePipe2(self):
     file(TESTFN, 'w').write('hello world #2')
     t = pipes.Template()
     t.append(s_command + ' < $IN > $OUT', pipes.FILEIN_FILEOUT)
     t.copy(TESTFN, TESTFN2)
     self.assertEqual(open(TESTFN2).read(), 'HELLO WORLD #2')
def translate(input_file, output_file, directory, pair):
    pipe = pipes.Template()
    command = 'apertium -d ' + directory + ' ' + pair[0] + '-' + pair[1]
    pipe.append(command, '--')
    pipe.copy(input_file, output_file)
Exemple #20
0
 def testEmptyPipeline2(self):
     # read through empty pipe
     d = 'empty pipeline test READ'
     file(TESTFN, 'w').write(d)
     t = pipes.Template()
     self.assertEqual(t.open(TESTFN, 'r').read(), d)
Exemple #21
0
def reset_pipe(pipefile):

    t = pipes.Template()
    t.open(pipefile, 'w')
    t.reset()
Exemple #22
0
 def testEmptyPipeline3(self):
     # write through empty pipe
     d = 'empty pipeline test WRITE'
     t = pipes.Template()
     t.open(TESTFN, 'w').write(d)
     self.assertEqual(open(TESTFN).read(), d)
import serial
import pipes
import threading
from io_blueprint import IOBlueprint
from flask_socketio import SocketIO, emit

t = pipes.Template()

porteSocket = IOBlueprint('/porte')
# ser = serial.Serial('/dev/ttyUSB2', 115200)


def infoportes():
    with t.open('pipes/porte_0', 'r') as f:
        porte0 = f.read().split('--')
    with t.open('pipes/porte_1', 'r') as f:
        porte1 = f.read().split('--')
    with t.open('pipes/porte_2', 'r') as f:
        porte2 = f.read().split('--')
    with t.open('pipes/porte_3', 'r') as f:
        porte3 = f.read().split('--')
    print('{"porte": [' + str(porte0[0]) + ', ' + str(porte1[0]) + ', ' +
          str(porte2[0]) + ', ' + str(porte3[0]) + ']}')
    return '{"porte": [' + str(porte0[0]) + ', ' + str(porte1[0]) + ', ' + str(
        porte2[0]) + ', ' + str(porte3[0]) + ']}'


@porteSocket.on('porte_0')
def porte(reponse):
    with t.open('pipes/porte_0', 'w') as f:
        f.write(reponse['action'] + "--" + str(reponse['auto']))
Exemple #24
0
# Convert "arbitrary" image files to rgb files (SGI's image format).
# Input may be compressed.
# The uncompressed file type may be PBM, PGM, PPM, GIF, TIFF, or Sun raster.
# An exception is raised if the file is not of a recognized type.
# Returned filename is either the input filename or a temporary filename;
# in the latter case the caller must ensure that it is removed.
# Other temporary files used are removed by the function.
import os
import tempfile
import pipes
import imghdr
table = {}
t = pipes.Template()
t.append('fromppm $IN $OUT', 'ff')
table['ppm'] = t
t = pipes.Template()
t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
t.append('fromppm $IN $OUT', 'ff')
table['pnm'] = t
table['pgm'] = t
table['pbm'] = t
t = pipes.Template()
t.append('fromgif $IN $OUT', 'ff')
table['gif'] = t
t = pipes.Template()
t.append('tifftopnm', '--')
t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
t.append('fromppm $IN $OUT', 'ff')
table['tiff'] = t
t = pipes.Template()
 def testWriteOpenSource(self):
     # check calling open('w') on a pipe ending with
     # a source raises ValueError
     t = pipes.Template()
     t.prepend('boguscmd', pipes.SOURCE)
     self.assertRaises(ValueError, t.open, 'bogusfile', 'w')
Exemple #26
0
    def on_printOKButton_clicked(self, widget):
        """
        if print_all is set then
            print all the pages
        else
            If print_page is set then
                print the page
            else
                if 1 tree item is selected then
                    print the selected text
                else
                    print all the selected pages.
        """

        if self.gui.debug:
            print(inspect.getframeinfo(inspect.currentframe())[2])
        self.gui.sync_text_buffer()
        self.saveSettings()
        print_page, print_selection, print_all, page_feed, bold_titles, print_command = self._get_settings()

        if print_all:
            this_iter = self.gui.get_root()
        else:
            this_iter = self.gui.get_first_selected_iter()

        if not this_iter:
            self.gui.msg(_("Nothing selected"))
            return

        last_selected = self.gui.get_last_selected_iter()
        if not last_selected:
            self.gui.msg(_("Nothing selected"))
            return

        t = pipes.Template()
        t.append(print_command, "-.")
        scratch = tempfile.mkstemp(text = True)[1]
        f = t.open(scratch, "w")

        single_page = 0
        if print_page or ( print_selection and self.gui.same_iter(this_iter, last_selected) ):
            single_page = 1
        while (1):
            if print_selection and single_page: 
                body, insertion_point, selection_bound = self.gui.get_selected_text()
            else:
                body = self.gui.get_node_value(this_iter)
            if body:
                f.write(body)
            if single_page:
                break
            if print_selection:
                if self.gui.same_iter(this_iter, last_selected):
                    break
            this_iter = self.gui.get_linear_next(this_iter)
            if not this_iter:
                break
            if page_feed:
                f.write('')
        f.close()
        os.unlink(scratch)
        self.destroy()
        return
 def testRepr(self):
     t = pipes.Template()
     self.assertEqual(repr(t), "<Template instance, steps=[]>")
     t.append('tr a-z A-Z', pipes.STDIN_STDOUT)
     self.assertEqual(repr(t),
                 "<Template instance, steps=[('tr a-z A-Z', '--')]>")
Exemple #28
0
def getPaddle2Postition():
    percent = 0
    f = pipes.Template().open('percent2.txt', 'r')
    for line in f:
        setPaddle2Position(float(line))
Exemple #29
0
# Convert "arbitrary" sound files to AIFF files (Apple and SGI's audio format).
# Input may be compressed.
# Uncompressed file type may be AIFF, WAV, VOC, 8SVX, NeXT/Sun, and others.
# An exception is raised if the file is not of a recognized type.
# Returned filename is either the input filename or a temporary filename;
# in the latter case the caller must ensure that it is removed.
# Other temporary files used are removed by the function.

import os
import tempfile
import pipes
import sndhdr

table = {}

t = pipes.Template()
t.append('sox -t au - -t aiff -r 8000 -', '--')
table['au'] = t

# XXX The following is actually sub-optimal.
# XXX The HCOM sampling rate can be 22k, 22k/2, 22k/3 or 22k/4.
# XXX We must force the output sampling rate else the SGI won't play
# XXX files sampled at 5.5k or 7.333k; however this means that files
# XXX sampled at 11k are unnecessarily expanded.
# XXX Similar comments apply to some other file types.
t = pipes.Template()
t.append('sox -t hcom - -t aiff -r 22050 -', '--')
table['hcom'] = t

t = pipes.Template()
t.append('sox -t voc - -t aiff -r 11025 -', '--')
def runcommand(command):
    t = pipes.Template()
    t.append(command, '--')
    f = t.open(fifo, 'w')
    f.close()