Example #1
0
    def fetch_biodata(self):

        self.param["tipe"] = "biodata"

        try:

            profile_param = urlencode(self.param)
            response = urlopen(self.data_url, profile_param).read()
            data = loads(response)

            for profil in data:
                print "Nama                    : %s" % profil["NAMA"]
                print "Tempat/Tanggal lahir    : %s, %s" % (profil["TEMPAT_LAHIR"], profil["TANGGAL_LAHIR"])
                print "Alamat                  : %s - %s" % (profil["ALAMAT"], profil["KOTA"])
                print "Jenis kelamin           : %s" % profil["JENIS_KELAMIN"]
                print "Golongan darah          : %s" % profil["GOLONGAN_DARAH"]
                print "Warga negara            : %s" % profil["WARGA_NEGARA"]
                print "Agama                   : %s" % profil["AGAMA"]
                print "Nim                     : %s" % profil["NIM"]
                print "Email                   : %s" % profil["EMAIL"]
                print "Telepon                 : %s" % profil["TELEPON"]
                print "Ayah                    : %s" % profil["AYAH"]
                print "Ibu                     : %s" % profil["IBU"]

        except URLError:
            print "Terjadi kesalaan pada jaringan"
        except KeyboardInterrupt:
            print "Aborted.."
            sys.exit(1)
    def menu_opcoes(self):
        print ("************************* Calculadora *************************\n")
        print ("(1)Soma (2)Subtracao (3)Divisão (4)Multiplicação (5)Sair")
        operacao = input("\nQual operação deseja: ")

        if operacao == 1:
            primeiro_valor = input("\nInsira o primeiro valor: ")
            segundo_valor = input("Insira o segundo valor : ")

            print self.soma(primeiro_valor, segundo_valor)
            self.menu_opcoes()

        if operacao == 2:
            primeiro_valor = input("\nInsira o primeiro valor: ")
            segundo_valor = input("Insira o segundo valor : ")

            print self.subtracao(primeiro_valor, segundo_valor)
            self.menu_opcoes()

        if operacao == 3:
            primeiro_valor = input("\nInsira o primeiro valor: ")
            segundo_valor = input("Insira o segundo valor : ")

            print self.divisao(primeiro_valor, segundo_valor)
            self.menu_opcoes()

        if operacao == 4:
            primeiro_valor = input("\nInsira o primeiro valor: ")
            segundo_valor = input("Insira o segundo valor : ")

            print self.multiplicacao(primeiro_valor, segundo_valor)
            self.menu_opcoes()

        if operacao == 5:
            sys.exit()
def main(argv, errorlogger = None, runstatslogger = None): 
    global parser
    (opts, args) = parser.parse_args(argv)

    if not valid_arguments(opts, args):
       print usage
       sys.exit(0)

    sample_name = opts.sample_name
    output_folder = opts.output_folder
    input_folder = opts.input_folder


    _MAX = 1000000000000

    seq_count = 0
    allNames= dict()
    outputStr = ""

    if  opts.input_folder!=None:
       formats= "%s\tNumber of sequences in input file BEFORE QC (nucleotide)\t%s\n" 
       compute_fasta_stats(formats, input_folder  + PATHDELIM + sample_name + ".fasta", 'nucleotide', 995)

    formats= "%s\tNumber of sequences AFTER QC (nucleotide)\t%s\n"
    compute_fasta_stats(formats, output_folder  + PATHDELIM + sample_name + PATHDELIM + 'preprocessed' + PATHDELIM +  sample_name + ".fasta", 'nucleotide', 1000)

    formats= "%s\tNumber of translated ORFs BEFORE QC (amino)\t%s\n" 
    compute_fasta_stats(formats, output_folder  + PATHDELIM + sample_name + PATHDELIM + 'orf_prediction' + PATHDELIM +  sample_name + ".faa", 'amino', 1995)

    formats= "%s\tNumber of translated ORFs AFTER QC (amino)\t%s\n" 
    compute_fasta_stats(formats, output_folder  + PATHDELIM + sample_name + PATHDELIM + 'orf_prediction' + PATHDELIM +  sample_name + ".qced.faa", 'amino', 2000)
Example #4
0
    def fetch_nilai_mentah(self):

        self.param["tipe"] = "nilai"

        try:

            nilai_param = urlencode(self.param)
            response = urlopen(self.data_url, nilai_param).read()
            data = loads(response)

            for score in data:
                print "+ Kelas            : %s" % score["kd_kelas"]
                print "  Mata Kuliah      : %s" % score["MK"]
                print "  Kode dosen       : %s" % score["kd_dosen"]
                print "  Jumlah SKS       : %s" % score["sks"]
                print "  Nilai            : - Dasar        : %s" % score["ndasar"]
                print "                     - Menengah     : %s" % score["ntengah"]
                print "                     - Mahir        : %s" % score["nmahir"]
                print "  Index            : %s" % score["nilai_indek"]
                print

        except URLError:
            print "Terjadi kesalahan pada jaringan"
        except KeyboardInterrupt:
            print "Aborted.."
            sys.exit(1)
    def __init__(self, dbname,  blastoutput, database_mapfile, refscore_file, opts):
        self.Size = 10000
        self.dbname = dbname
        self.blastoutput = blastoutput
        self.database_mapfile =database_mapfile
        self.refscore_file = refscore_file
        self.annot_map = {} 
        self.i=0
        self.opts = opts
        self.hits_counts = {}
        self.data = {}
        self.refscores = {}

        #print "trying to open blastoutput file " + blastoutput
        try:
           query_dictionary = {}
           create_query_dictionary(self.blastoutput, query_dictionary, self.opts.algorithm) 

           self.blastoutputfile = open(self.blastoutput,'r')
           create_refscores(refscore_file, self.refscores)

#           print "Going to read the dictionary\n"
           create_dictionary(database_mapfile, self.annot_map, query_dictionary)
           query_dictionary = {}
#           print "Doing reading  dictionary\n"
        except AttributeError:
           print "Cannot read the map file for database :" + dbname
           sys.exit(0)
Example #6
0
    def createTree(self, id):
        if not id in self.parent_to_child:
            # leaf node
            if id == "1":
                print self.parent_to_child
                sys.exit(0)
            self.output += str(self.lca.translateIdToName(id))
            # self.output +=  id + ":" +  str(0)
            return

        children = self.parent_to_child[id][0]
        count = self.parent_to_child[id][1]

        # self.output += "("
        i = 0
        self.output += "("
        for child in children:
            if i > 0:
                self.output += ","
            self.createTree(child)
            i += 1
            if (i % 10000) == 0:
                print "i = " + str(i)
        self.output += ")"
        # self.output += ")" +  str(self.lca.translateIdToName(id)) + "}" +  str(count)
        self.output += str(self.lca.translateIdToName(id))
        if id == "1":
            self.output += ";"
def main(argv, errorlogger = None, runstatslogger = None): 
    global parser
    (opts, args) = parser.parse_args(argv)
    if not check_arguments(opts, args):
       print usage
       sys.exit(0)
    
    if errorlogger:
      errorlogger.write("#STEP\tPARSE_BLAST\n")

  # input file to blast with itself to commpute refscore
#    infile = open(input_fasta,'r')
    if opts.Lambda == None or opts.k == None:
      if opts.algorithm=='LAST':
          opts.Lambda = 0.300471
          opts.k = 0.103946

      if opts.algorithm=='BLAST':
          opts.Lambda = 0.267
          opts.k = 0.0410


    dictionary={}
    priority = 5000;
    for dbname, blastoutput, mapfile in zip( opts.database_name, opts.input_blastout, opts.database_map):
        temp_refscore = ""
        temp_refscore = opts.refscore_file 
        count = process_blastoutput(dbname, blastoutput,  mapfile, temp_refscore, opts, errorlogger = errorlogger)
        if runstatslogger:  
           runstatslogger.write("%s\tTotal Protein Annotations %s (%s)\t%s\n" %( str(priority), dbname, opts.algorithm, str(count)))  
    def next(self):
        if self.i < self.size:
           
           try:
              fields = [ x.strip()  for x in self.lines[self.i].split('\t')]
              #print self.fieldmap['ec'], fields, self.i,  self.blastoutput
              if self.shortenorfid:
                 self.data['query'] = ShortenORFId(fields[self.fieldmap['query']])
              else:
                 self.data['query'] = fields[self.fieldmap['query']]

              self.data['q_length'] = int(fields[self.fieldmap['q_length']])
              self.data['bitscore'] = float(fields[self.fieldmap['bitscore']])
              self.data['bsr'] = float(fields[self.fieldmap['bsr']])
              self.data['expect'] = float(fields[self.fieldmap['expect']])
              self.data['identity'] = float(fields[self.fieldmap['identity']])
              self.data['ec'] = fields[self.fieldmap['ec']]
              self.data['product'] = re.sub(r'=',' ',fields[self.fieldmap['product']])

              self.i = self.i + 1
              return self.data
           except:
              print self.lines[self.i]
              print traceback.print_exc(10)
              sys.exit(0)
              return None
        else:
           raise StopIteration()
  def setUp(self):
    try:
      self.client = blockscore.Client({'api_key': os.environ['BLOCKSCORE_API']})
    except KeyError:
      sys.stderr.write("To run tests, you must have a BLOCKSCORE_API environment variable with a test api key\n")
      sys.exit(2)

    self.test_identity = {
      'date_of_birth': '1980-01-01',
      'identification': {
        'ssn': '0000',
        'passport': '12315235'
      },
      'name': {
        'first': 'john',
        'middle': 'a',
        'last': 'doe'
      },
      'address': {
        'street1': '1 Infinite Loop',
        'street2': 'Floor 3',
        'city': 'Palo Alto',
        'state': 'ca',
        'postal_code': '94309',
        'country_code': 'us'
      },
      'note': 'test'
    }
def insert_orf_into_dict(line, contig_dict):
    rawfields = re.split("\t", line)
    fields = []
    for field in rawfields:
        fields.append(field.strip())

    if len(fields) != 9:
        return

    attributes = {}
    try:
        attributes["seqname"] = fields[0]  # this is a bit of a  duplication
        attributes["source"] = fields[1]
        attributes["feature"] = fields[2]
        attributes["start"] = int(fields[3])
        attributes["end"] = int(fields[4])
    except:
        print line
        print fields
        print attributes
        sys.exit(0)

    try:
        attributes["score"] = float(fields[5])
    except:
        attributes["score"] = fields[5]

    attributes["strand"] = fields[6]
    attributes["frame"] = fields[7]
    split_attributes(fields[8], attributes)

    if "id" in attributes:
        if not attributes["id"] in contig_dict:
            contig_dict[attributes["id"]] = {}
        contig_dict[attributes["id"]] = attributes.copy()
Example #11
0
    def __write_obj(self):
        try:
            fileObj = file(self.outputFile, 'w')
        except:
            print ("Invalide file:" + self.outputFile)
            sys.exit()
        
        fileObj.write("# Created by obj2poly from "+ str(self.inputFile) + '\n')
        fileObj.write("# Object\n")

        #dictVertices
        for vert in self.listVertices:
            fileObj.write("v ")
            fileObj.write(str(vert).replace(',', '')[1:-1] + '\n')
        fileObj.write("# ")
        fileObj.write(str(len(self.listVertices)) + ' ')
        fileObj.write("dictVertices\n")

        #dictFaces
        for face in self.listFaces:
            #index + 1
            face = [i + 1 for i in face]
            fileObj.write("f ")
            fileObj.write(str(face).replace(',', '')[1:-1] + '\n')
        fileObj.write("# ")
        fileObj.write(str(len(self.listFaces)) + ' ')
        fileObj.write("dictFaces\n")
        fileObj.close()
Example #12
0
 def setUp(self):
     try:
         self.client = blockscore.Client({'api_key': os.environ['BLOCKSCORE_API']})
     except KeyError:
         sys.stderr.write("To run tests, you must have a BLOCKSCORE_API environment variable with a test api key\n")
         sys.exit(2)
     self.test_identity = ['1980-01-01',{'ssn': '0000'},{'first': 'john', 'last': 'doe'},{'street1': '1 Infinite Loop', 'city': 'Palo Alto', 'state': 'ca', 'postal_code': '94309', 'country_code': 'us'}]
def process_blastoutput(dbname, blastoutput,  mapfile, refscore_file, opts):
    blastparser =  BlastOutputParser(dbname, blastoutput, mapfile, refscore_file, opts)

    fields = ['q_length', 'bitscore', 'bsr', 'expect', 'aln_length', 'identity', 'ec' ]
    if opts.taxonomy:
       fields.append('taxonomy')
    fields.append('product')

    output_blastoutput_parsed = blastoutput + '.parsed.txt'
    outputfile = open(output_blastoutput_parsed, 'w') 

    fprintf(outputfile, "#%s",'query')
    for field in fields:
         fprintf(outputfile,"\t%s",field)
    fprintf(outputfile, "\n")

    for data in blastparser:
        if not data:
          continue
        try:
          fprintf(outputfile, "%s",data['query'])
        except:
           print data
           sys.exit()
        for field in fields:
           fprintf(outputfile, "\t%s",data[field])
        fprintf(outputfile, "\n")

    outputfile.close()


#    add_refscore_to_file(blastoutput,refscore_file, allNames)
    return None
def main(argv, errorlogger = None, runstatslogger = None): 
    global parser
    (opts, args) = parser.parse_args(argv)
    if not check_arguments(opts, args):
       print usage
       sys.exit(0)
    
    if errorlogger:
      errorlogger.write("#STEP\tPARSE_BLAST\n")

    if opts.Lambda == None or opts.k == None:
      if opts.algorithm=='LAST':
          opts.Lambda = 0.300471
          opts.k = 0.103946

      if opts.algorithm=='BLAST':
          opts.Lambda = 0.267
          opts.k = 0.0410


    dictionary={}
    priority = 5000;
    priority1= 5500;
    for dbname, blastoutput, mapfile in zip( opts.database_name, opts.input_blastout, opts.database_map):
        temp_refscore = ""
        temp_refscore = opts.refscore_file 
        if opts.parsed_output==None:
           opts.parsed_output = blastoutput + ".parsed.txt"

        count, unique_count = process_blastoutput(dbname, blastoutput,  mapfile, temp_refscore, opts, errorlogger = errorlogger)
        if runstatslogger:  
           runstatslogger.write("%s\tTotal Protein Annotations %s (%s)\t%s\n" %( str(priority), dbname, opts.algorithm, str(count)))  
           runstatslogger.write("%s\tNumber of ORFs with hits in %s (%s)\t%s\n" %( str(priority1), dbname, opts.algorithm, str(unique_count)))  
Example #15
0
    def __write_semantic_poly(self):
        try:
            filePoly = file(self.outputFile, 'w')
        except:
            print ("Invalide file:" + self.outputFile)
            sys.exit()
        #model information
        filePoly.write('# ' + self.strModelID + '\n')
        filePoly.write('# ' + str(self.isSolid) + '\n')
        #dictVertices
        filePoly.write(str(len(self.listVertices)) + ' 3'+ ' 0' + ' 0'+'\n')
        for i in range(0, len(self.listVertices)):
            filePoly.write(str(i) +' '+ str(self.listVertices[i]).replace(',', '')[1:-1] + '\n')

        #write dictFaces
        filePoly.write(str(len(self.listFaces)) + ' 0' + '\n')
        for i in range(0, len(self.listFaces)):
            filePoly.write('1 0' + ' # ' + self.listFaceSemantics[i][0] + ' # '+ self.listFaceSemantics[i][1] + '\n')
            filePoly.write(str(len(self.listFaces[i])) + ' ')
            for j in range (0, len(self.listFaces[i])):
                filePoly.write(str(self.listFaces[i][j]) + ' ')
            filePoly.write('\n')
        filePoly.write('0\n')
        filePoly.write('0\n')
        filePoly.close()
    #end __write_semantic_poly
Example #16
0
def main():
    DBopen()
    
    optlist, args = getopt(sys.argv[1:], "adr")
    
    if optlist[0][0] == "-r":
        rebuildAliases()
        sys.exit(0)
    
    if len(optlist) != 1 or len(args) != 2:
        print "Wrong argument count!"
        sys.exit(-1)
    
    if optlist[0][0] == "-a":
        action = "add"
    elif optlist[0][0] == "-d":
        action ="del"
    
    domainName = args[0]
    realName = args[1]
    
    if action == "add":
        #addUser(domainName, realName)
        #addDomain(domainName)
        addAliases(domainName)
    elif action == "del":
        delAliases(domainName)
        delDomain(domainName)
        #delUser(domainName)
        
    DBclose()

    print "Things to do: set passwd"
    def __init__(self, dbname,  blastoutput):
        self.dbname = dbname
        self.blastoutput = blastoutput
        self.i=0
        self.SIZE = 10000
        self.data = {}
        self.fieldmap={}
        self.seq_beg_pattern = re.compile("#")
        self.lines = []

        try:
           self.blastoutputfile = open( blastoutput,'r')
           line = self.blastoutputfile.readline()
           if not self.seq_beg_pattern.search(line) :
              print "First line must have field header names and begin with \"#\""
              sys.exit(0)
           header = re.sub('#','',line)

           fields = [ x.strip()  for x in header.rstrip().split('\t')]
           k = 0 
           for x in fields:
            self.fieldmap[x] = k 
            k+=1

        except AttributeError:
           print "Cannot read the map file for database :" + dbname
           sys.exit(0)
    def __init__(self, dbname,  blastoutput):
        self.dbname = dbname
        self.blastoutput = blastoutput
        self.i=1
        self.data = {}
        self.fieldmap={}
        self.seq_beg_pattern = re.compile("#")

        try:
           self.blastoutputfile = open( blastoutput,'r')
           self.lines=self.blastoutputfile.readlines()
           self.blastoutputfile.close()
           self.size = len(self.lines)
           if not self.seq_beg_pattern.search(self.lines[0]) :
              print "First line must have field header names and begin with \"#\""
              sys.exit(0)
           header = self.lines[0].replace('#','',1)
           fields = [ x.strip()  for x in header.rstrip().split('\t')]
           k = 0 
           for x in fields:
            self.fieldmap[x] = k 
            k+=1
           print "Processing :" + dbname
           
        except AttributeError:
           print "Cannot read the map file for database :" + dbname
           sys.exit(0)
Example #19
0
  def setUp(self):
    try:
      self.client = blockscore.Client({'api_key': os.environ['BLOCKSCORE_API']})
    except KeyError:
      sys.stderr.write("To run tests, you must have a BLOCKSCORE_API environment variable with a test api key\n")
      sys.exit(2)

    self.test_company = {
      "entity_name": "BlockScore",
      "tax_id": "123410000",
      "incorporation_date": "1980-08-25",
      "incorporation_state": "DE",
      "incorporation_country_code": "US",
      "incorporation_type": "corporation",
      "dbas": "BitRemite",
      "registration_number": "123123123",
      "email": "*****@*****.**",
      "url": "https://blockscore.com",
      "phone_number": "6505555555",
      "ip_address": "67.160.8.182",
      "address_street1": "1 Infinite Loop",
      "address_street2": None,
      "address_city": "Cupertino",
      "address_subdivision": "CA",
      "address_postal_code": "95014",
      "address_country_code": "US",
    }
Example #20
0
    def is_login_auth(self):

        self.param["nim"] = self.username
        self.param["pass"] = self.password
        self.param["tipe"] = "login"

        try:

            login_param = urlencode(self.param)
            response = urlopen(self.login_url, login_param).read()
            data = loads(response)

            if data["status"] == 1:
                return True
            else:
                print "Kombinasi username & password salah"
                return False
                sys.exit(1)

        except URLError:
            print "Terjadi kesalahan pada jaringan"
            return False
        except KeyboardInterrupt:
            print "Aborted.."
            return False
            sys.exit(1)
Example #21
0
    def fetch_tak(self):

        self.param["tipe"] = "tak"
        tak_acumulate = 0

        try:

            tak_param = urlencode(self.param)
            response = urlopen(self.data_url, tak_param).read()
            data = loads(response)

            for raw in data:
                print "+ Nama bagian   : " + raw["nama_bagian"]
                print "  Jenis kegiatan: " + raw["nama_jenisKegiatan"]
                print "  Poin          : " + raw["poin"]
                print "  Tahun         : " + raw["TAHUN"]
                print "  Semester      : " + raw["semester"]
                print "  Nim           : " + raw["nim"]
                print
                tak_acumulate += int(raw["poin"])
            print "[+] Total TAK: %d poin" % tak_acumulate

        except URLError:
            print "Terjadi kesalahan pada jaringan"
        except KeyboardInterrupt:
            print "Aborted.."
            sys.exit(1)
Example #22
0
def main(argv): 
    # parse and check arguments
    (opts, args) = parser.parse_args()
    argv = check_arguments(opts)
    
    taxadict = {} # dictionary of contigs to ACTUAL taxonomies
    read_taxonomy_list(opts.taxa_list, taxadict)
    
    # Load NCBI Tree and LCA Star object
    lcastar = LCAStar(opts.ncbi_tree_file)
    print 'Done initializing LCA Star'
    
    # Set appropreate parameters
    lcastar.setLCAStarParameters(min_depth = opts.min_depth, alpha = opts.alpha, min_reads = opts.min_reads )

    print ["Contig", "LCAStar", "Majority", "LCASquared"]

    for contig in taxadict.keys():
       taxon = lcastar.lca_star(taxadict[contig])
       print 'LCA Star ' + contig + ' ' + taxon
       taxon = lcastar.lca_majority(taxadict[contig])
       print 'LCA Majority ' + contig + ' ' + taxon
       taxon = lcastar.getTaxonomy([taxadict[contig]] )
       print 'LCA Square ' + contig + ' ' + taxon

    sys.exit(-1)
    def __init__(self, dbname,  blastoutput, database_mapfile, refscore_file, opts):
        self.Size = 10000
        self.dbname = dbname
        self.blastoutput = blastoutput
        self.database_mapfile =database_mapfile
        self.refscore_file = refscore_file
        self.annot_map = {} 
        self.i=0
        self.opts = opts
        self.hits_counts = {}
        self.data = {}
        self.refscores = {}

        try:
           self.blastoutputfile = open( blastoutput,'r')
#           print "Going to read the blastout\n"
    #       self.lines=self.blastoutputfile.readlines()
           #self.blastoutputfile.close()
#           print "Doing reading  blastout\n"
    #       self.size = len(self.lines)
     
#           print "Going to read the refscores\n"
           create_refscores(refscore_file, self.refscores)
#           print "Doing reading  refscore\n"
#           print "Going to read the dictionary\n"
           create_dictionary(database_mapfile, self.annot_map)
#           print "Doing reading  dictionary\n"
        except AttributeError:
           print "Cannot read the map file for database :" + dbname
           sys.exit(0)
Example #24
0
def menu():
    global CIDR_FILTER
    global PROTOCOL_FILTER
    global PORT_FILTER

    try:
        opts, args = getopt.getopt(sys.argv[1:], "c:p:t:h", ["cidr-filter=","protocol-filter=","port-filter=","help" ])

    except getopt.GetoptError as err:
        print str(err) 
        usage()
        sys.exit(2)

    for o, a in opts:

        if o in ("-h", "--help"):
            usage()
        elif o in ("-c", "--cidr-filter"):
            CIDR_FILTER = a.lower()
        elif o in ("-p", "--protocol-filter"):
            PROTOCOL_FILTER = a.lower()
        elif o in ("-t", "--port-filter"):
            PORT_FILTER = a.lower()
        else:
            assert False, "unhandled option"
def main(argv): 
    (opts, args) = parser.parse_args()
    if check_arguments(opts, args):
       print usage
       sys.exit(0)

    input_folder = opts.input_folder
    output_file = opts.output_file

    filePATTERN = re.compile(r'.*COG[0-9]*.*\.fa');
    cogSeqMatchesPATTERN = re.compile(r'[a-zA-Z]*_(.*)__[0-9]*__*(COG[0-9]*).*.fa');
    list= []
    for file in  listdir(input_folder):
      if filePATTERN.match(file):
         hits =  cogSeqMatchesPATTERN.search( file) 
         if hits:
             list.append( (hits.group(1), hits.group(2)) )
         

    try:
        outputfile  = open(output_file, 'w')
    except:
        print "Cannot open file to MLTreeMap hits"
        sys.exit(0)




    fprintf(outputfile, "Sequences\tCOG\n")
    for seq, cog in list:
        fprintf(outputfile, "%s\t%s\n",seq, cog)

    outputfile.close()
def main(argv): 
    (opts, args) = parser.parse_args()
    if not check_arguments(opts, args):
       print usage
       sys.exit(0)

    results_dictionary={}
    dbname_weight={}
    #import traceback
    for dbname, blastoutput in zip( opts.database_name, opts.input_blastout):
        #print "Processing database " + dbname
        try:
           results_dictionary[dbname]={}
           process_parsed_blastoutput( dbname, blastoutput, opts, results_dictionary[dbname])
        except:
           #traceback.print_exc()
           print "Error: " + dbname
           pass

    create_annotation(results_dictionary, opts.input_annotated_gff, opts.output_dir)

    for dbname in results_dictionary:
       if  dbname=='cog':
          create_table(results_dictionary[dbname], opts.input_cog_maps, 'cog', opts.output_dir)

       if  dbname=='kegg':
          create_table(results_dictionary[dbname], opts.input_kegg_maps, 'kegg', opts.output_dir)
Example #27
0
    def display_loop(self):
        print(mainMenu)
        key = input('Choose an option: ')
        k = eval(key)
        if k == 1:
            self.list_items()
        if k == 2:
            self.get_new_item()
        if k == 3:
            mfr_pn = input('Enter Mrf-pn: ')
            self.fetch_item(mfr_pn)
        elif k == 4:
            if confirm_action('Do you wish to save local changes? This cannot be undone.'):
                self.item_db.save_changes()
        elif k == 5:
            self.import_db()
        elif k == 6:
            self.export_db()
        elif k == 7:
            self.purge_db()
        elif k == 8:
            sys.exit(0)

        if input('Press [q] to exit or any key to continue...') == 'q':
            sys.exit(0)
        else:
            self.display_loop()
Example #28
0
    def convert(self, url):

        try:
            response = urllib2.urlopen(url)
        except urllib2.HTTPError, e:
            print "URL Error: " + str(e.code) 
            print "      (url [" + url + "] probably wrong)"
            sys.exit(2)
Example #29
0
def check_arguments(opts):
    if opts.ncbi_tree_file == None:
       print usage
       sys.exit(-1)

    if opts.taxa_list == None:
       print usage
       sys.exit(-1)
Example #30
0
	def refresh_screen(self):
		self.screen.blit(self.raster, (0,0))
		for event in pygame.event.get():
			if event.type == KEYDOWN:
				if event.key == K_ESCAPE:
					sys.exit()
			elif event.type == QUIT:
				sys.exit()
def check_arguments(opts):
    if opts.ncbi_file == None or opts.enzymes_file == None or opts.pathways_file == None:
        print usage
        sys.exit(0)
Example #32
0
    for connector in connectors:
        name = connector[0].get_name()
        subparser = subparsers.add_parser(name, help='gelk %s -h' % name)
        # We need params for feed
        connector[0].add_params(subparser)

    args = parser.parse_args()

    app_init = datetime.now()

    backend_name = args.backend

    if not backend_name:
        parser.print_help()
        sys.exit(0)

    if 'debug' in args and args.debug:
        logging.basicConfig(level=logging.DEBUG,
                            format='%(asctime)s %(message)s')
        logging.debug("Debug mode activated")
    else:
        logging.basicConfig(level=logging.INFO,
                            format='%(asctime)s %(message)s')
    logging.getLogger("urllib3").setLevel(logging.WARNING)
    logging.getLogger("requests").setLevel(logging.WARNING)

    connector = get_connector_from_name(backend_name, connectors)
    backend = connector[0](**vars(args))
    ocean_backend = connector[1](backend, **vars(args))
    enrich_backend = connector[2](backend, **vars(args))
Example #33
0
from os import sys, system
import os
import inspect

if len(sys.argv) != 3:
    print("Please give database names as arguments. USAGE: " +
          "python import_gka_basicinfo.py dubdubdub ilp")
    sys.exit()

fromdatabase = sys.argv[1]

todatabase = sys.argv[2]

basename = "gkabasic"
scriptdir = os.path.dirname(
    os.path.abspath(inspect.getfile(inspect.currentframe())))
inputsqlfile = scriptdir + "/" + basename + "_getdata.sql"
loadsqlfile = scriptdir + "/" + basename + "_loaddata.sql"

tables = [{
    'name':
    'assessments_survey',
    'insertquery':
    "insert into replacetablename(id, name,created_at,partner_id,status_id, admin0_id, survey_on_id) values(3, 'Ganitha Kalika Andolana', '2016-05-19', 'akshara','AC', 2, 'student');"
}, {
    'name':
    'assessments_questiongroup',
    'insertquery':
    "\COPY replacetablename(id,description, name, academic_year_id, start_date, end_date,status_id,double_entry, created_at, inst_type_id,survey_id, type_id, source_id) FROM '"
    + scriptdir + "/EkStep_Concepts.csv' with csv NULL 'null';"
}, {
Example #34
0
# -*- coding: utf-8 -*-
from __future__ import print_function
from os import sys

try:
    from skbuild import setup
except ImportError:
    print('scikit-build is required to build from source.', file=sys.stderr)
    print('Please run:', file=sys.stderr)
    print('', file=sys.stderr)
    print('  python -m pip install scikit-build')
    sys.exit(1)

setup(
    name='itk-trimmedpointsetregistration',
    version='0.1.0',
    author='Samuel Gerber',
    author_email='*****@*****.**',
    packages=['itk'],
    package_dir={'itk': 'itk'},
    download_url=
    r'https://github.com/samuelgerber/ITKTrimmedPointSetRegistration',
    description=r'This is a module for trimmed point set registration',
    long_description=
    'This module implements a decorator to the PointSetMetric that overrides the accumulation of the value and derivative computation to use a trimmed number of points.',
    classifiers=[
        "License :: OSI Approved :: Apache Software License",
        "Programming Language :: Python", "Programming Language :: C++",
        "Development Status :: 4 - Beta", "Intended Audience :: Developers",
        "Intended Audience :: Education",
        "Intended Audience :: Healthcare Industry",
def main(argv):
    (opts, args) = parser.parse_args()
    if not check_arguments(opts, args):
        print usage
        sys.exit(0)
    copy_faa_gff_orf_prediction(opts.source, opts.target)
def compute_min_species(pathways, p, lca):
    taxons = []
    for rxn in pathways[p]['rxns']:
        for enzyme in pathways[p]['rxns'][rxn]:
            taxons.append(pathways[p]['rxns'][rxn][enzyme])

    indtaxons = lca.get_independent_taxons(taxons)

    #create X variables
    taxon_variable = {}
    i = 0
    for x in indtaxons:
        taxon_variable[x] = 'X' + str(i)
        i += 1

    try:
        mpsinput = open('input.mps', 'w')
    except IOError:
        print """Cannot open \'input.mps\' to write problem"""
        sys.exit(0)

    fprintf(mpsinput, "NAME%s\n", field('DISTRIBUTED', 3))

    valid_reactions = []
    for rxn in pathways[p]['rxns']:
        if pathways[p]['rxns'][
                rxn]:  # consider only reactions that have at least one ORF
            valid_reactions.append(rxn)

    #create X variables
    reaction_names = {}
    i = 0
    for x in valid_reactions:
        reaction_names[x] = 'RXN' + str(i)
        i += 1

    # compute a hash from taxons to reactions
    taxons_reactions = {}
    for rxn in pathways[p]['rxns']:
        if pathways[p]['rxns'][
                rxn]:  # consider only reactions that have at least one ORF
            for enzyme in pathways[p]['rxns'][rxn]:
                for S in lca.get_independent_parent(
                        pathways[p]['rxns'][rxn][enzyme]):
                    if not S in taxons_reactions:
                        taxons_reactions[S] = {}
                    taxons_reactions[S][rxn] = True

    # write the rows
    fprintf(mpsinput, "ROWS\n")
    str1 = get_buffer()
    str1 = writeToLine(str1, "N", offSize(1))
    str1 = writeToLine(str1, "COST", offSize(2))
    fprintf(mpsinput, "%s\n", str1.rstrip())
    for rxn in valid_reactions:
        str1 = get_buffer()
        str1 = writeToLine(str1, "G", offSize(1))
        str1 = writeToLine(str1, reaction_names[rxn], offSize(2))
        fprintf(mpsinput, "%s\n", str1.rstrip())

    #write the columns
    fprintf(mpsinput, "COLUMNS\n")
    for X in indtaxons:
        str1 = get_buffer()
        str1 = writeToLine(str1, taxon_variable[X], offSize(2))
        str1 = writeToLine(str1, "COST", offSize(3))
        str1 = writeToLine(str1, "1", offSize(4))
        fprintf(mpsinput, "%s\n", str1.rstrip())
        for rxn in taxons_reactions[X]:
            str1 = get_buffer()
            str1 = writeToLine(str1, taxon_variable[X], offSize(2))
            str1 = writeToLine(str1, reaction_names[rxn], offSize(3))
            str1 = writeToLine(str1, "1", offSize(4))
            fprintf(mpsinput, "%s\n", str1.rstrip())

    #write the columns
    fprintf(mpsinput, "RHS\n")
    for rxn in reaction_names:
        str1 = get_buffer()
        str1 = writeToLine(str1, "RHS1", offSize(2))
        str1 = writeToLine(str1, reaction_names[rxn], offSize(3))
        str1 = writeToLine(str1, "1", offSize(4))
        fprintf(mpsinput, "%s\n", str1.rstrip())

    #write the bounds
    fprintf(mpsinput, "BOUNDS\n")
    for X in indtaxons:
        str1 = get_buffer()
        str1 = writeToLine(str1, "BV", offSize(1))
        str1 = writeToLine(str1, "BND1", offSize(2))
        str1 = writeToLine(str1, taxon_variable[X], offSize(3))
        fprintf(mpsinput, "%s\n", str1.rstrip())

    fprintf(mpsinput, "ENDATA\n")
    mpsinput.close()

    command = glpsol + " --mps input.mps -o output.sol >> /dev/null"

    os.system(command)

    try:
        glpout = open('output.sol', 'r')
    except IOError:
        print """Cannot open \'ouptut.sol\' to read solution"""
        sys.exit(0)

    solLines = glpout.readlines()
    glpout.close()
    for s in solLines:
        hits = objective.search(s.strip())
        if hits:
            value = int(hits.group(1))
            break

    return value
Example #37
0
__maintainer__ = "Kishori M Konwar"
__status__ = "Release"

try:
    import cPickle
    from cStringIO import StringIO
    import os
    from os import makedirs, sys, remove, path
    import re
    from optparse import OptionParser, OptionGroup
    from LCAStar import *
    #from libs.python_modules.sysutil import getstatusoutput
except:
    print """ Could not load some user defined  module functions"""
    print """ """
    sys.exit(3)

objective = re.compile(r'Objective.*=\s*(\d*)')

usage = sys.argv[
    0] + " --ncbi-tree-file ncbi_taxfile --taxalist taxa_list_file  -o output [--alpha majority --min_depth  min_depth_cutoff --min_reads min_read]" ""

parser = OptionParser(usage)
parser.add_option("--ncbi-tree-file",
                  dest="ncbi_tree_file",
                  default=None,
                  help='name of the refseq names file [OPTIONAL]')

parser.add_option("--taxalist",
                  dest="taxa_list",
                  default=None,
__maintainer__ = "Kishori M Konwar"
__status__ = "Release"

try:
    import os
    from os import makedirs, sys, remove, path
    import re
    from optparse import OptionParser, OptionGroup

    from SpeciesComputation import *
    from metapaths_utils import fprintf, printf
    #from libs.python_modules.sysutil import getstatusoutput
except:
    print """ Could not load some user defined  module functions"""
    print """ """
    sys.exit(3)

objective = re.compile(r'Objective.*=\s*(\d*)')

usage = sys.argv[
    0] + " --ncbi-file ncbi_taxfile --pathways-file pathways_list  --enzymes-file enzymes_file" ""

glpsol = "/usr/local/bin/glpsol"

parser = OptionParser(usage)
parser.add_option("--ncbi-file",
                  dest="ncbi_file",
                  help='name of the refseq names file')

#parser.add_option("--data-file", dest="data_file",
#                  help='name of the test strings')
def main(argv, errorlogger=None, runstatslogger=None):
    global parser
    (opts, args) = parser.parse_args(argv)

    if not valid_arguments(opts, args):
        print usage
        sys.exit(0)

    min_length = opts.min_length
    outfile = open(opts.output_fasta + '.tmp', 'w')
    logfile = open(opts.log_file, 'w')
    lengthsfile = open(opts.lengths_file + '.tmp', 'w')

    if opts.map_file:
        mapfile = open(opts.map_file, 'w')
    else:
        mapfile = None

    sample_name = opts.input_fasta
    sample_name = re.sub(r'^.*/', '', sample_name, re.I)
    sample_name = re.sub(r'^.*\\', '', sample_name, re.I)
    sample_name = re.sub(r'\.fasta$', '', sample_name, re.I)
    sample_name = re.sub(r'\.fna$', '', sample_name, re.I)
    sample_name = re.sub(r'\.faa$', '', sample_name, re.I)
    sample_name = re.sub(r'\.fas$', '', sample_name, re.I)
    sample_name = re.sub(r'\.fa$', '', sample_name, re.I)

    BEFORE = 'BEFORE'
    AFTER = 'AFTER'
    NUMSEQ = "#INFO\tNumber of sequences :"
    NUMSEQ_SHORTER = "@INFO\tNumber of sequences shorter than minimum length of sequences"
    AVG_LENGTH = "@INFO\tAverage length of sequences:"
    MIN_LENGTH = "@INFO\tMinimum length of sequences:"
    MAX_LENGTH = "@INFO\tMaximum length of sequences:"

    _MAX = 1000000000000
    stats = {
        MIN_LENGTH: {
            'BEFORE': _MAX,
            'AFTER': _MAX
        },
        MAX_LENGTH: {
            'BEFORE': 0,
            'AFTER': 0
        },
        NUMSEQ: {
            'BEFORE': 0,
            'AFTER': 0
        },
        NUMSEQ_SHORTER: {
            'BEFORE': 0,
            'AFTER': 0
        },
        AVG_LENGTH: {
            'BEFORE': 0,
            'AFTER': 0
        },
    }

    length_distribution = {}
    length_cumulative_distribution = {}

    for i in range(0, 31):
        length_distribution[i] = 0
        length_cumulative_distribution[i] = 0

    seq_count = 0
    allNames = dict()
    outputStr = ""
    outputLines = []
    fastareader = FastaReader(opts.input_fasta)
    """ process one fasta sequence at a time """
    lengths_str = ""
    for record in fastareader:
        seqname = record.name
        seq = record.sequence
        length = len(seq)

        index = int(len(seq) / 50)
        if index >= 30:
            index = 30

        length_distribution[index] += 1
        if length < stats[MIN_LENGTH][BEFORE]:
            stats[MIN_LENGTH][BEFORE] = length

        if length > stats[MAX_LENGTH][BEFORE]:
            stats[MAX_LENGTH][BEFORE] = length

        if length < MIN_LENGTH:
            stats[NUMSEQ_SHORTER][BEFORE] += 1

        stats[AVG_LENGTH][BEFORE] = stats[AVG_LENGTH][BEFORE] + length

        seqvalue = filter_sequence(seq)

        stats[NUMSEQ][BEFORE] += 1

        seqlen = len(seqvalue)
        if seqlen >= min_length:

            if len(lengths_str) > 100:
                fprintf(lengthsfile, "%s\n", lengths_str)
                lengths_str = str(seqlen)
            else:
                lengths_str += '\t' + str(seqlen)

            stats[NUMSEQ][AFTER] += 1
            stats[AVG_LENGTH][AFTER] = stats[AVG_LENGTH][AFTER] + seqlen
            if mapfile == None:
                fprintf(outfile, "%s\n", seqname)
            else:
                fprintf(outfile, ">%s\n", sample_name + '_' + str(seq_count))
                key = re.sub(r'^>', '', seqname)
                fprintf(
                    mapfile, "%s\n", sample_name + '_' + str(seq_count) +
                    '\t' + key + '\t' + str(seqlen))
                seq_count += 1

            fprintf(outfile, "%s\n", seqvalue)

            if seqlen < stats[MIN_LENGTH][AFTER]:
                stats[MIN_LENGTH][AFTER] = seqlen

            if seqlen > stats[MAX_LENGTH][AFTER]:
                stats[MAX_LENGTH][AFTER] = seqlen

    fprintf(lengthsfile, "%s\n", lengths_str)

    if stats[NUMSEQ][BEFORE] > 0:
        stats[AVG_LENGTH][
            BEFORE] = stats[AVG_LENGTH][BEFORE] / stats[NUMSEQ][BEFORE]
    else:
        stats[AVG_LENGTH][BEFORE] = 0
    if stats[NUMSEQ][AFTER] > 0:
        stats[AVG_LENGTH][
            AFTER] = stats[AVG_LENGTH][AFTER] / stats[NUMSEQ][AFTER]
    else:
        stats[AVG_LENGTH][AFTER] = 0

    lengthsfile.close()
    outfile.close()

    rename(opts.output_fasta + ".tmp", opts.output_fasta)
    rename(opts.lengths_file + ".tmp", opts.lengths_file)

    #inputfile.close()
    if mapfile != None:
        mapfile.close()
    """ min length """
    if stats[MIN_LENGTH][BEFORE] == _MAX:
        stats[MIN_LENGTH][BEFORE] = 0
    if stats[MIN_LENGTH][AFTER] == _MAX:
        stats[MIN_LENGTH][AFTER] = 0

    fprintf(logfile, "@INFO\tBEFORE\tAFTER\n")
    fprintf(
        logfile, "%s\n", NUMSEQ + '\t' + str(stats[NUMSEQ][BEFORE]) + '\t' +
        str(stats[NUMSEQ][AFTER]))
    fprintf(
        logfile, "%s\n",
        NUMSEQ_SHORTER + '\t' + str(stats[NUMSEQ_SHORTER][BEFORE]) + '\t' +
        str(stats[NUMSEQ_SHORTER][AFTER]))
    fprintf(
        logfile, "%s\n", AVG_LENGTH + '\t' + str(stats[AVG_LENGTH][BEFORE]) +
        '\t' + str(stats[AVG_LENGTH][AFTER]))
    fprintf(
        logfile, "%s\n", MIN_LENGTH + '\t' + str(stats[MIN_LENGTH][BEFORE]) +
        '\t' + str(stats[MIN_LENGTH][AFTER]))
    fprintf(
        logfile, "%s\n", MAX_LENGTH + '\t' + str(stats[MAX_LENGTH][BEFORE]) +
        '\t' + str(stats[MAX_LENGTH][AFTER]))
    fprintf(logfile, "@INFO\tLOW\tHIGH\tFREQUENCY\tCUMULATIVE_FREQUENCY\n")
    #    fprintf(logfile, "#   ---\t-----\t--------\t---------\t----------\n");

    i = 30
    length_cumulative_distribution[i] = length_cumulative_distribution[i]
    i -= 1
    while i >= 0:
        length_cumulative_distribution[i] = length_cumulative_distribution[
            i + 1] + length_distribution[i]
        i -= 1

    for i in range(0, 31):
        fprintf(logfile, "   %s\n", str(i*50) + '\t' + str((i+1)*50) + '\t' +\
                 str(length_distribution[i]) +'\t' + str(length_cumulative_distribution[i]) )

    logfile.close()

    if opts.seqtype == 'nucleotide':
        priority = 1000
    else:
        priority = 2000

    if runstatslogger != None:
        if opts.seqtype == 'nucleotide':
            runstatslogger.write(
                "%s\tNumber of sequences in input file BEFORE QC (%s)\t%s\n" %
                (str(priority), opts.seqtype, str(stats[NUMSEQ][BEFORE])))
            runstatslogger.write(
                "%s\t-min length\t%s\n" %
                (str(priority + 1), str(stats[MIN_LENGTH][BEFORE])))
            runstatslogger.write(
                "%s\t-avg length\t%s\n" %
                (str(priority + 2), str(int(stats[AVG_LENGTH][BEFORE]))))
            runstatslogger.write(
                "%s\t-max length\t%s\n" %
                (str(priority + 3), str(stats[MAX_LENGTH][BEFORE])))
            runstatslogger.write(
                "%s\t-total base pairs (bp)\t%s\n" %
                (str(priority + 4),
                 str(int(stats[AVG_LENGTH][BEFORE] * stats[NUMSEQ][BEFORE]))))

            runstatslogger.write(
                "%s\tNumber of sequences AFTER QC (%s)\t%s\n" %
                (str(priority + 5), opts.seqtype, str(stats[NUMSEQ][AFTER])))
            runstatslogger.write(
                "%s\t-min length\t%s\n" %
                (str(priority + 6), str(stats[MIN_LENGTH][AFTER])))
            runstatslogger.write(
                "%s\t-avg length\t%s\n" %
                (str(priority + 7), str(int(stats[AVG_LENGTH][AFTER]))))
            runstatslogger.write(
                "%s\t-max length\t%s\n" %
                (str(priority + 8), str(stats[MAX_LENGTH][AFTER])))
            runstatslogger.write(
                "%s\t-total base pairs (bp)\t%s\n" %
                (str(priority + 9),
                 str(int(stats[AVG_LENGTH][AFTER] * stats[NUMSEQ][AFTER]))))
        else:
            runstatslogger.write(
                "%s\tNumber of translated ORFs BEFORE QC (%s)\t%s\n" %
                (str(priority), opts.seqtype, str(stats[NUMSEQ][BEFORE])))
            runstatslogger.write(
                "%s\t-min length\t%s\n" %
                (str(priority + 1), str(stats[MIN_LENGTH][BEFORE])))
            runstatslogger.write(
                "%s\t-avg length\t%s\n" %
                (str(priority + 2), str(int(stats[AVG_LENGTH][BEFORE]))))
            runstatslogger.write(
                "%s\t-max length\t%s\n" %
                (str(priority + 3), str(stats[MAX_LENGTH][BEFORE])))
            runstatslogger.write(
                "%s\t-total base pairs (bp)\t%s\n" %
                (str(priority + 4),
                 str(int(stats[AVG_LENGTH][BEFORE] * stats[NUMSEQ][BEFORE]))))
            runstatslogger.write(
                "%s\tNumber of tranlated ORFs AFTER QC (%s)\t%s\n" %
                (str(priority + 5), opts.seqtype, str(stats[NUMSEQ][AFTER])))
            runstatslogger.write(
                "%s\t-min length\t%s\n" %
                (str(priority + 6), str(stats[MIN_LENGTH][AFTER])))
            runstatslogger.write(
                "%s\t-avg length\t%s\n" %
                (str(priority + 7), str(int(stats[AVG_LENGTH][AFTER]))))
            runstatslogger.write(
                "%s\t-max length\t%s\n" %
                (str(priority + 8), str(stats[MAX_LENGTH][AFTER])))
            runstatslogger.write(
                "%s\t-total base pairs (bp)\t%s\n" %
                (str(priority + 9),
                 str(int(stats[AVG_LENGTH][AFTER] * stats[NUMSEQ][AFTER]))))
def option5():
    print("Exiting")
    sys.exit(0)
Example #41
0
from distutils.core import setup
from distutils.command.install import INSTALL_SCHEMES
from os import sys, path
import os, shutil, re
from glob import glob
for scheme in INSTALL_SCHEMES.values():
    scheme['data'] = scheme['purelib']

#  test
from imp import find_module
try:
    find_module('numpy')
except:
    sys.exit('### Error: python module numpy not found')

try:
    find_module('pyfits')
except:
    try:
        find_module('astropy')
    except:
        sys.exit('### Error: python module pyfits not found')

try:
    find_module('pyraf')
except:
    sys.exit('### Error: python module pyraf not found')

try:
    find_module('matplotlib')
except:
Example #42
0
"""idiomatic module structure"""

# imports
from os import sys

# constants
# exception classes
# interface functions
# classes
# internal functions & classes


def main():
    print("\nHello Python World\n")


if __name__ == '__main__':
    status = main()
    sys.exit(status)
def main():
    app = QApplication(sys.argv)
    wnd = ChartLab(None, None)
    wnd.show()
    sys.exit(app.exec())
Example #44
0
def signal_handler(signal, frame):
    print('You pressed Ctrl+C!')
    stop_unreal()
    sys.exit(0)