예제 #1
0
파일: a.py 프로젝트: ricksion/myPyProject
def f(cardNO_1,num):
    print("尝试数:",num,)
    cardNO_1=list(cardNO_1)
    cardNO_1.append(num)
    cardNO_1.reverse()
    print("尝试数:",num,cardNO_1)
    #resvCardNO = cardNO_1[::-1].append(num)
    #print(cardNO_1)
    jiweisum=0
    for i in range(len(cardNO_1)):
        if i%2==0:
            jiweisum+=int(cardNO_1[i])
            #print(cardNO_1[i],jiweisum)
        
    print("奇数和",jiweisum)

    oushsum=0
    for i in range(len(cardNO_1)):
        tmp=0
        if i%2==1:
            tmp=int(cardNO_1[i])*2
            if tmp>10:
                tmp=tmp-9
            
            oushsum+=tmp
    print("偶数和",oushsum)
            

    if (jiweisum+oushsum)%10==0 :
        print(num, "is the key")
        return "ok"
    else:
        print(num, "is not the key")
        cardNO_1.reverse()
        cardNO_1.pop()
def find_cycle(graph):
    # select any node to start cycle, may as well be the first
    cycle = []
    current_node = find_start_node(graph)
    # list(graph.values())[0]
    cycle.append(current_node)

    # do initial cycle
    cycle = get_path_from_node(current_node, graph, cycle)
    done = False

    while not done:
        c = cycle[:]
        for i in range(0, len(c)):
            if len(c[i].edges) > 0:
                cycle_prime = []
                cycle_prime.append(c[i])
                cycle_prime = get_path_from_node(c[i], graph, cycle_prime)
                cycle = cycle[:i] + cycle_prime + cycle[i + 1:]
                break

                # if we get here an there are no more nodes then we're done
            if i == len(c) - 1:
                done = True

    return cycle
예제 #3
0
파일: ReadXML.py 프로젝트: crobertob/RST
def get_individual_records(db, tables, table_headers, foreign_keys):
    cursor = db.cursor()
    var_string = ''
    table = ''
    refs = ''
    var_string += table_headers[0] + "."
    var_string += table_headers[1] + ", "
    var_string = var_string[:-2]
    for table_name in tables:
        table += table_name + ", "
    table = table[:-2]
    if table_headers[0].find("discrete", 0) >= 0:
        for i in range(len(foreign_keys)):
            refs += "discrete_"
            refs += foreign_keys[i][2]+ "."
            refs += foreign_keys[i][0]+ " = "
            refs += "discrete_"
            refs += foreign_keys[i][1]
            refs += ".id and "
    else:
        for i in range(len(foreign_keys)):
            refs += foreign_keys[i][2]+ "."
            refs += foreign_keys[i][0]+ " = "
            refs += foreign_keys[i][1]
            refs += ".id and "
    refs = refs[:-5]
    sql = 'SELECT {} FROM {} WHERE {}'.format(var_string, table, refs)
    logging.debug("SQL query: %s", sql)
    cursor.execute(sql)
    return cursor
예제 #4
0
파일: ReadXML.py 프로젝트: crobertob/RST
def get_records(db, tables, headers, foreign_keys):
    cursor = db.cursor()
    var_string = ''
    table = ''
    refs = ''
    for i, table_headers in enumerate(headers):
        for j in range(len(table_headers)):
            var_string += tables[i] + "."
            var_string += headers[i][j] + ", "
    var_string = var_string[:-2]
    for i in range(len(tables)):
        table += tables[i] + ", "
    table = table[:-2]
    if tables[0].find("discrete", 0) >= 0:
        for i in range(len(foreign_keys)):
            refs += "discrete_"
            refs += foreign_keys[i][2]+ "."
            refs += foreign_keys[i][0]+ " = "
            refs += "discrete_"
            refs += foreign_keys[i][1]
            refs += ".id and "
    else:
        for i in range(len(foreign_keys)):
            refs += foreign_keys[i][2]+ "."
            refs += foreign_keys[i][0]+ " = "
            refs += foreign_keys[i][1]
            refs += ".id and "
    refs = refs[:-5]
    sql = 'SELECT {} FROM {} WHERE {}'.format(var_string, table, refs)
    cursor.execute(sql)
    return cursor
예제 #5
0
파일: ReadXML.py 프로젝트: crobertob/RST
def store_db_struct(db, tables, headers, types, user_input, attributes, discretize, 
                    scripts, partition_sizes, offsets, decision, foreign_keys, references):
    cursor = db.cursor()
    cursor.execute("DROP TABLE IF EXISTS _tables")
    cursor.execute("DROP TABLE IF EXISTS _headers")
    cursor.execute("DROP TABLE IF EXISTS _foreign_keys")
    create_db_struct(db)
    
    for i in range(len(tables)):
        cursor.execute("INSERT INTO _tables "
                        "(name) "
                        "VALUES (?)",
                        (tables[i],))
    
    for i, table_headers in enumerate(headers):
        for j in range(len(table_headers)):
            cursor.execute("INSERT INTO _headers "
                            "(table_id, name, type, user_input, attributes, discretize, script, partition_size, offset, decision) "
                            "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
                            (i + 1, headers[i][j], types[i][j], user_input[i][j],
                              attributes[i][j], discretize[i][j], scripts[i][j],
                               partition_sizes[i][j], offsets[i][j], decision[i][j]))
    for i, foreign_key in enumerate(foreign_keys):
        for j in range(len(foreign_key)):
            cursor.execute("INSERT INTO _foreign_keys "
                            "(table_id, name, reference) "
                            "VALUES (?, ?, ?)",
                            (i + 1, foreign_key[j], references[i][j],))
    db.commit()
def build_alphabet(spectrum, m):
    """

    :param spectrum: an experimental spectrum
    :param m: the multiplicity threshold
    :return: a convolution spectrum, trimmed to contain only peptide masses appearing m times or more.
    """

    convolutions = dict()

    for i in range(0, len(spectrum)):
        for j in range(i + 1, len(spectrum)):
            diff = spectrum[j] - spectrum[i]
            if diff > 0 and 57 <= diff <= 200:
                if diff in convolutions.keys():
                    convolutions[diff] += 1
                else:
                    convolutions[diff] = 1

    sorted_list = sorted(convolutions.items(), key=operator.itemgetter(1), reverse=True)

    score_to_beat = sorted_list[m - 1][1]

    result = []

    for item in sorted_list:
        if item[1] >= score_to_beat:
            result.append(item[0])

    return result
예제 #7
0
파일: cpm.py 프로젝트: 1Shalafi1/CPM
 def create_paths(self):
     self.activity_list.sort(key=lambda predecessor: predecessor["Predecessor"])
     for i in range(len(self.activity_list)):
             if self.activity_list[i]["Predecessor"] is '-':
                 self.activity_list[i]["Total Time"] += self.activity_list[i]["Time"]
                 self.paths.append([self.activity_list[i]])
                 continue
             else:
                 for path in self.paths:
                     if int(self.activity_list[i]["Predecessor"]) is path[-1]["ID"]:
                         if i+1 < len(self.activity_list) and self.activity_list[i]["Predecessor"] == self.activity_list[i+1]["Predecessor"]:
                             self.paths.append(cp.copy(path))
                             self.activity_list[i]["Total Time"] += path[-1]["Total Time"]+self.activity_list[i]["Time"]
                             path.append(self.activity_list[i])
                             break
                         else:
                             self.activity_list[i]["Total Time"] += path[-1]["Total Time"]+self.activity_list[i]["Time"]
                             path.append(self.activity_list[i])
                             break
                     else:
                         for path1 in self.paths:
                             if int(self.activity_list[i]["Predecessor"]) is path1[-1]["ID"]:
                                     self.activity_list[i]["Total Time"] += path1[-1]["Total Time"]+self.activity_list[i]["Time"]
                                     path1.append(self.activity_list[i])
                                     break
                         for path1 in self.paths:
                             if int(self.activity_list[i]["Predecessor"]) is path1[-1]["ID"]:
                                     self.activity_list[i]["Total Time"] += path1[-1]["Total Time"]+self.activity_list[i]["Time"]
                                     path1.append(self.activity_list[i])
                                     break
     for i in range(len(self.paths)): print("Path %d: "%(i+1), self.paths[i])
     return self.paths
예제 #8
0
파일: ReadXML.py 프로젝트: crobertob/RST
def obtain_reduced_table(db, discrete_tables, tables, headers, foreign_keys,
                          relevant_attribute_list, dreduct, map_i, map_j):
    reduced_table = []
    table_header_names = []
    records = []
    results = []
    for attribute in dreduct:
        reduced_table.append([])
        i = map_i[attribute-1]
        j = map_j[attribute-1]
        table_header_names.append([discrete_tables[i],headers[i][j]])
    i =  map_i[len(map_i)-1]
    j =  map_j[len(map_j)-1]
    table_header_names.append([tables[i],headers[i][j]])
    logging.debug("Table-header pairs: %s", table_header_names)
    for k in range(len(table_header_names)):
        if k < len(table_header_names) - 1: 
            records.append(get_individual_records(db, discrete_tables,
                                                   table_header_names[k], foreign_keys))
        else:
            results_record = get_individual_records(db, tables, table_header_names[k], 
                                                  foreign_keys)
    #records = get_records(db, new_tables, new_headers, foreign_keys)
    for record in results_record:
        results.append(record[0])
    for i, record in enumerate(records):
        for j, value in enumerate(record):
            if relevant_attribute_list[j].count(dreduct[i]) > 0:
                reduced_table[i].append([value[0], results[j]])
                logging.debug("i: %s, j:%s Value: %s, Result: %s, Relevant: %s", i, j,
                          value[0], results[j], relevant_attribute_list[j])
    logging.debug("Reduced table: %s", reduced_table)                        
    return reduced_table
예제 #9
0
파일: ReadXML.py 프로젝트: crobertob/RST
def import_records_fromXML(db, tables, headers, types, foreign_keys, user_input):
    value_table = []
    value_row = []
    value = []
    filename = "records.xml"
    #filename = Console.get_string("Import from", "filename")
    if not filename:
        return
    try:
        tree = xml.etree.ElementTree.parse(filename)
    except (EnvironmentError,
            xml.parsers.expat.ExpatError) as err:
        print("ERROR:", err)
        return

    for i, table_header in enumerate(headers):
        reset_tables(db, [tables[i]])
        for element in tree.findall(tables[i]):
            try:
                for j in range(len(table_header)):
                    if user_input[i][j]==1:
                        if types[i][j] == "int" or types[i][j] == "float":
                            instr = 'value.append(' + types[i][j] + '(element.get("' + headers[i][j] + '")))'
                        else:
                            instr = 'value.append(element.get("' + headers[i][j] + '"))'
                        eval(instr)
                value_row.append(value)
                value = []
            except ValueError as err:
                db.rollback()
                print("ERROR:", err)
                break
        value_table.append(value_row)
        value_row = []
    logging.debug("Imported data: %s", value_table)
    ref_table_id = 0
    main_table_id = 0
    if len(foreign_keys) > 0:
        for i, current_key in enumerate(foreign_keys):
            for p, table in enumerate(tables):
                '''Store the index of the table that contains the reference'''
                if table == foreign_keys[i][1]:
                    ref_table_id = p
                if table == foreign_keys[i][2]:
                    main_table_id = p
            for k, current_value_row in enumerate(value_table[ref_table_id]):
                try:
                    key_value = get_and_set_foreign(db, tables[ref_table_id],
                                                     headers[ref_table_id], 
                                                     current_key, current_value_row)
                    value_table[main_table_id][k].append(key_value)
                    insert_sqlite(db, tables[main_table_id], headers[main_table_id], 
                                  value_table[main_table_id][k])
                except ValueError as err:
                    db.rollback()
                    print("ERROR:", err)
                    break
    else:
        db.commit()
예제 #10
0
 def __init__(self, point_map, original, generated):
     self.generated = generated
     self.original = original
     self.point_map = point_map
     self.width = len(point_map[0])
     self.height = len(point_map)
     self.cell_size = int(min(MAX_X / self.width, MAX_Y / self.height))
     self.offset = self.cell_size / 2
def score_motifs(motifs):
    motif_score = 0

    for i in range(0, len(motifs)):
        for j in range(i + 1, len(motifs)):
            motif_score += gms.get_hamming_distance(motifs[i], motifs[j])

    return motif_score
예제 #12
0
파일: ReadXML.py 프로젝트: crobertob/RST
def refresh_discrete_tables(db, tables, headers, types, foreign_keys, discretize, 
                            offsets, decision, partition_sizes, user_input):
    value_table = []
    value = []

    discrete_tables = list_discrete_tables(tables)
    reset_tables(db, discrete_tables)
    records = get_records(db, tables, headers, foreign_keys)
    
    for i in tables:
        value_table.append([])
    for record in records:
        for i, table_headers in enumerate(headers):
            try:
                for j in range(len(table_headers)):
                    if user_input[i][j] == 1:
                        if discretize[i][j] == 1:
                            if types[i][j] == "int":
                                value.append(int(ceil(int(record[i*len(table_headers) + j]/
                                                     partition_sizes[i][j]))) + offsets[i][j])
                            elif types[i][j] == "float":
                                value.append(int(ceil(float(record[i*len(table_headers) + j]/
                                                       partition_sizes[i][j]))) + offsets[i][j])
                        else:
                            value.append(record[i*len(table_headers)+j])
                value_table[i].append(value)
                value = []
            except ValueError as err:
                db.rollback()
                print("ERROR:", err)
                break
    logging.debug("Imported data: %s", value_table)
    ref_table_id = 0
    main_table_id = 0
    if len(foreign_keys) > 0:
        for i, current_key in enumerate(foreign_keys):
            for p, table in enumerate(discrete_tables):
                '''Store the index of the table that contains the reference'''
                if table == "discrete_" + foreign_keys[i][1]:
                    ref_table_id = p
                if table == "discrete_" + foreign_keys[i][2]:
                    main_table_id = p
            for k, current_value_row in enumerate(value_table[ref_table_id]):
                try:
                    key_value = get_and_set_foreign(db, discrete_tables[ref_table_id],
                                                      headers[ref_table_id], 
                                                      current_key, current_value_row)
                    value_table[main_table_id][k].append(key_value)
                    insert_sqlite(db, discrete_tables[main_table_id], headers[main_table_id], 
                                   value_table[main_table_id][k])
                except ValueError as err:
                    db.rollback()
                    print("ERROR:", err)
                    break
    else:
        db.commit()
예제 #13
0
    def _read_entity(self, data):
        import builtins
        upack = {
          5 : { 1 : '>b', 2 : '>h', 4 : '>i', 8 : '>q' },  # int
          6 : { 1 : '>B', 2 : '>H', 4 : '>I', 8 : '>Q' }   # uint
        }

        result = None

        tlf = data[self._dataoffset]
        type = (tlf & 112) >> 4
        more = tlf & 128
        len = tlf & 15
        self._dataoffset += 1

        if more > 0:
            tlf = data[self._dataoffset]
            len = (len << 4) + (tlf & 15)
            self._dataoffset += 1

        len -= 1

        if len == 0:     # skip empty optional value
            return result

        if self._dataoffset + len >= builtins.len(data):
            raise Exception("Try to read {} bytes, but only have {}".format(len, builtins.len(data) - self._dataoffset))

        if type == 0:    # octet string
            result = data[self._dataoffset:self._dataoffset+len]

        elif type == 5 or type == 6:  # int or uint
            d = data[self._dataoffset:self._dataoffset+len]

            ulen = len
            if ulen not in upack[type]:  # extend to next greather unpack unit
              while ulen not in upack[type]:
                d = b'\x00' + d
                ulen += 1

            result = struct.unpack(upack[type][ulen], d)[0]

        elif type == 7:  # list
            result = []
            self._dataoffset += 1
            for i in range(0, len + 1):
                result.append(self._read_entity(data))
            return result

        else:
            logger.warning('Skipping unkown field {}'.format(hex(tlf)))

        self._dataoffset += len

        return result
예제 #14
0
파일: ReadXML.py 프로젝트: crobertob/RST
def get_relative_discernibility(m, target):
    '''construct discernibility matrix (collection) relative to current row'''
    collection = [[] for i in enumerate(m)]
    for idx in range(len(m)):
        v = m[idx]
        for i, row in enumerate(m):
            if i <= idx:
                continue
            collection[i].append(set([(j + 1) for j in range(len(v)-1) if (v[j] != row[j] and v[target] != row[target])]))
    del collection[0]
    return collection
예제 #15
0
파일: ReadXML.py 프로젝트: crobertob/RST
def discretize_record(headers, types, discretize, offsets, decision,
                       partition_sizes, user_input, new_record):
    
    idx = 0
    discrete_record = []
    value = []
    
    for i, table_headers in enumerate(headers):
        for j in range(len(table_headers)):
            if user_input[i][j] == 1:
                if discretize[i][j] == 1:
                    if decision[i][j] == 0:
                        if types[i][j] == "int":
                            value.append(int(ceil(int(new_record[i][idx]/
                                                 partition_sizes[i][j]))) + offsets[i][j])
                            idx += 1
                        elif types[i][j] == "float":
                            value.append(int(ceil(float(new_record[i][idx]/
                                                   partition_sizes[i][j]))) + offsets[i][j])
                            idx += 1
                else:
                    value.append(new_record[i][idx])
                    idx += 1
        discrete_record.append(value)
        value = []
        idx = 0
    logging.debug("Discrete record: %s", discrete_record)
    return discrete_record
def leaderboard_sequence(spectrum, n, alphabet):
    spectrum = sorted(spectrum)
    parent_mass = max(spectrum)
    leader_board = [[]]
    leader_peptide = []

    while len(leader_board) > 0:
        leader_board = expand(leader_board, alphabet)
        # copy for loop
        # leader_score = score(leader_peptide, spectrum)
        leader_score = 0
        temp = leader_board[:]
        for peptide in temp:
            mass = sum(peptide)
            if mass == parent_mass:
                s = cyc_score(peptide, spectrum)
                if s > leader_score:
                    leader_peptide = peptide
                    leader_score = s

            elif mass > parent_mass:
                leader_board.remove(peptide)

        leader_board = trim(leader_board, spectrum, n)

    return leader_peptide
예제 #17
0
def get_profile_score(kmer, profile):
    profile_score = 1

    for i in range(0, len(kmer)):
        profile_score *= profile[kmer[i]][i]

    return profile_score
예제 #18
0
파일: ReadXML.py 프로젝트: crobertob/RST
def store_discrete_record(db, discrete_tables, headers, foreign_keys,
                         discrete_record):
    
    logging.debug("Discrete record to be added: %s", discrete_record)
    ref_table_id = 0
    main_table_id = 0
    if len(foreign_keys) > 0:
        for i, current_key in enumerate(foreign_keys):
            for p, table in enumerate(discrete_tables):
                '''Store the index of the table that contains the reference'''
                if table == "discrete_" + foreign_keys[i][1]:
                    ref_table_id = p
                if table == "discrete_" + foreign_keys[i][2]:
                    main_table_id = p
            try:
                key_value = get_and_set_foreign(db, discrete_tables[ref_table_id],
                                                  headers[ref_table_id], 
                                                  current_key, discrete_record[ref_table_id])
                discrete_record[main_table_id].append(key_value)
                insert_sqlite(db, discrete_tables[main_table_id], headers[main_table_id], 
                               discrete_record[main_table_id])
            except ValueError as err:
                db.rollback()
                print("ERROR:", err)
                break
    else:
        db.commit()
예제 #19
0
def score_motifs(motifs):
    motif_score = 0

    for i in range(1, len(motifs)):
        motif_score += get_hamming_distance(motifs[0], motifs[i])

    return motif_score
예제 #20
0
파일: ReadXML.py 프로젝트: crobertob/RST
def read_record_from_scripts(headers, user_input, modules, decision):
    idx_script = 0
    idx_decision = 0
    record = []
    value = []
    
    for i, table_headers in enumerate(headers):
        for j in range(len(table_headers)):
            if user_input[i][j] == 1:
                if decision[i][j] == 0:
                    try:
                        value.append(modules[idx_script].run())
                        idx_script += 1
                        idx_decision += 1
                    except ValueError as err:
                        print("ERROR:", err)
                        break
                else:
                    decision_script = idx_script
                    decision_i = i
                    decision_j = idx_decision
                    real_j = j
                    idx_script += 1
                    idx_decision += 1
        idx_decision = 0
        record.append(value)
        value = []
    logging.debug("Record added from scripts: %s", record)
    logging.debug("i: %s, j: %s", decision_i, decision_j)
    logging.debug("Decision script: %s", modules[decision_script])
    return record, decision_script, decision_i, decision_j, real_j
def reconstruct(kmers):
    reconstructed = ""
    for i in range(0, len(kmers) - 1):
        reconstructed += (kmers[i][0])

    reconstructed += (kmers[-1])

    return reconstructed
예제 #22
0
파일: ReadXML.py 프로젝트: crobertob/RST
def insert_sqlite(db, tablename, headers, values):
    cursor = db.cursor()
    columns = ', '.join(headers)
    columns = columns[4:]
    placeholders = ', '.join('?' * len(values))
    sql = 'INSERT INTO {} ({}) VALUES ({})'.format(tablename, columns, placeholders)
    logging.debug("SQL: %s", sql)
    cursor.execute(sql, values)
def trim(leader_board, spectrum, n):
    if len(leader_board) > n:
        leader_scores = []
        for peptide in leader_board:
            leader_scores.append((lin_score(peptide, spectrum), peptide))

        leader_scores = sorted(leader_scores, key=operator.itemgetter(0), reverse=True)
        nth_score = leader_scores[n - 1][0]

        temp = []
        for i in range(0, len(leader_scores)):
            if leader_scores[i][0] >= nth_score:
                temp.append(leader_scores[i][1])

        leader_board = temp

    return leader_board
def initialize_random_motifs(dna, k):
    result = []
    l = len(dna[0])
    i = random.randint(0, l - k)
    for row in dna:
        result.append(row[i:i + k])

    return result
예제 #25
0
파일: ReadXML.py 프로젝트: crobertob/RST
def matrix_segment_absorption(main_row, i, k, min_matrix):
    for j, row in enumerate(min_matrix, start = i):
        for l in range(len(row)):
            if i == j and k >= l or main_row[k] == set() or row[l] == set() or main_row[k].issuperset(row[l]):
                continue
            if main_row[k].issubset(row[l]):
                row[l] = main_row[k]
    return min_matrix
    def test_each_kmer(self):
        result = []
        for kmer in gms.each_kmer("ACGTTT", 3):
            result.append(kmer)

        self.assertEqual(len(result), 4)
        self.assertEqual(result[0], "ACG")
        self.assertEqual(result[3], "TTT")
def linear_spectrum(peptide):
    """
    :param peptide: string representation of a peptide
    :return: integer linear spectrum as a list of int
    """

    prefix_mass = [0]

    for i in range(0, len(peptide)):
        prefix_mass.append(prefix_mass[i] + peptide[i])

    linear_spectrum = []
    for i in range(0, len(peptide)):
        for j in range(i + 1, len(peptide) + 1):
            linear_spectrum.append(prefix_mass[j] - prefix_mass[i])

    return sorted(linear_spectrum)
예제 #28
0
def read_problem(data):
    first_line = data[0]

    try:
        circles = None
        lines = None

        if first_line.startswith("/ Meta information:"):
            data.pop(0)
            circles, lines = parse_meta_information(data)

        filtered_input = filter_input(data)
        width = int(filtered_input.pop(0).strip())
        height = int(filtered_input.pop(0).strip())

        map = []
        for x in range(0, height):
            line = filtered_input[x].strip()

            row = []
            for char in line:
                if char in '_':
                    row.append(False)
                elif char in '#':
                    row.append(True)
                else:
                    error("Invalid character: %s" % char)

            if not len(row) == width:
                error("Invalid row length %s expected %s" % (len(row), width))
                error(row)
                error(line)
                sys.exit(-1)
            map.append(row)

        if not len(row) == width:
            error("Invalid number of lines %s expected %s" % (len(map), height))
            error(row)
            error(line)
            sys.exit(-1)

        return circles, lines, map
    except ValueError:
        error("Invalid input!")
        sys.exit(0)
예제 #29
0
파일: ReadXML.py 프로젝트: crobertob/RST
def create_tables(db, tables, headers, attributes, foreign_keys, references):
    '''This function is only executed the first time the DB is created'''
    cursor = db.cursor()
    drop_tables(db, tables)
    for i, table_headers in enumerate(headers):
        elements = ""
        for j in range(len(table_headers)):
            elements += headers[i][j] + " "
            elements += attributes[i][j] + ", "
        if(i < len(foreign_keys)):
            for k in range(len(foreign_keys[i])):            
                elements += ' FOREIGN KEY ('
                elements += foreign_keys[i][k]
                elements += ') REFERENCES '
                elements += references[i][k] + ", "
        elements = elements[:-2]
        sql = 'CREATE TABLE IF NOT EXISTS {} ({})'.format(tables[i], elements) 
        cursor.execute(sql)
예제 #30
0
파일: ReadXML.py 프로젝트: crobertob/RST
def deletion_no_empty_sets(A, i, k, min_matrix):
    for j, row in enumerate(min_matrix, start = i):
        for l in range(len(row)):
            current_value = row[l].difference(A)
            if (i == j and k >= l) or row[l] == set():
                continue
            elif current_value == set():
                return False
    return True
예제 #31
0
    def _read_entity(self, data):
        upack = {
            5: {
                1: '>b',
                2: '>h',
                4: '>i',
                8: '>q'
            },  # int
            6: {
                1: '>B',
                2: '>H',
                4: '>I',
                8: '>Q'
            }  # uint
        }

        result = None
        packetstart = self._dataoffset

        tlf = data[self._dataoffset]
        type = (tlf & 112) >> 4
        more = tlf & 128
        len = tlf & 15
        self._dataoffset += 1

        if more > 0:
            tlf = data[self._dataoffset]
            len = (len << 4) + (tlf & 15)
            self._dataoffset += 1

        len -= 1

        if len == 0:  # skip empty optional value
            return result

        if self._dataoffset + len >= builtins.len(data):
            self._parse_error('Tried to read {} bytes, but only have {}',
                              [len, builtins.len(data) - self._dataoffset],
                              data, self._dataoffset, packetstart)

        elif type == 0:  # octet string
            result = data[self._dataoffset:self._dataoffset + len]

        elif type == 5 or type == 6:  # int or uint
            d = data[self._dataoffset:self._dataoffset + len]

            ulen = len
            if ulen not in upack[type]:  # extend to next greather unpack unit
                while ulen not in upack[type]:
                    d = b'\x00' + d
                    ulen += 1

            result = struct.unpack(upack[type][ulen], d)[0]

        elif type == 7:  # list
            result = []
            self._dataoffset += 1
            for i in range(0, len + 1):
                result.append(self._read_entity(data))
            return result

        else:
            self._parse_error('Skipping unkown field {}', [hex(tlf)], data,
                              self._dataoffset, packetstart)

        self._dataoffset += len

        return result
예제 #32
0
def extract_type(my_sample, play_feature_dic_new, feature_orders):
    # print (feature_weights)
    my_vote = {"Achiever": 0, "Explorer": 0, "Careless": 0, "other": 0}

    my_lst_key = ["Id_Name", "Achiever", "Explorer", "Careless", "other"]
    my_vote_id = {
        "Id_Name": my_sample[0],
        "Achiever": [],
        "Explorer": [],
        "Careless": [],
        "other": []
    }
    my_vote_id_con = {
        "Id_Name": my_sample[0],
        "Achiever": 0,
        "Achiever_Conf": 0,
        "Explorer": 0,
        "Explorer_Conf": 0,
        "Careless": 0,
        "Careless_Conf": 0,
        "other": 0,
        "other_Conf": 0
    }
    for key, item in play_feature_dic_new.items():
        # print(key)
        if my_sample[key] == 'medium':
            # there is no effect with this medium vaue
            #print('this feature is medium')
            pass
        else:
            feature_weight = feature_orders[key]
            my_ordertest = {}
            my_ordertest = sorted(feature_orders.values(), reverse=True)

            feature_order = my_ordertest.index(feature_orders[key])
            feature_weight = feature_orders[key]
            #feature_weight=feature_weights[key]
            for key1 in item.keys():
                if (item[key1] == my_sample[key]):
                    my_vote[key1] += feature_weight * (1 / (feature_order + 1))
                # my_vote[key1]+=feature_weight
                else:
                    my_vote[key1] -= feature_weight * (1 / (feature_order + 1))
                    #my_vote[key1]-=feature_weight

            #print(my_vote)
    total_weight = sum(feature_orders.values())
    for cokey in my_vote.keys():
        my_confidence = round((my_vote[cokey] / total_weight) * 100)
        my_vote_id_con.update(my_vote)
        my_vote_id_con[cokey + str('_Conf')] = my_confidence

        #my_vote_id[cokey].append(my_vote[cokey])
        #my_vote_id[cokey].append(my_confidence)

    if len(set(my_vote.values())) == 2:
        print('they are similar')
        print(my_vote)
    #return(max(my_vote,key=my_vote.get))           #returns only labels
    return (sorted(my_vote, key=my_vote.get, reverse=True)[0],
            my_vote_id_con[sorted(my_vote, key=my_vote.get, reverse=True)[0] +
                           str('_Conf')], my_vote_id_con)
예제 #33
0
        sum += temp

    return sum


strings = open("x_yurchik", "r").read().split("\n")
x = [float(x) for x in strings]

strings = open("y_yurchik", "r").read().split("\n")
y = [float(y) for y in strings]
strings = open("points2_yurchik").read().split("\n")
points = [float(points) for points in strings]

table = coef(x, y)
for list in table:
    print(len(list), list)
values = []
for point in points:
    values.append(Eval(table, x, point))

for value in values:
    print("%.2f" % value)

plt.subplot(211)
plt.plot(x, y)
plt.subplot(212)
#plt.axis([x[0], x[-1], -100, y[-1]])

plt.plot(points, values)
plt.show()
예제 #34
0
 def length(self):
     return len(self.accuracies)
예제 #35
0
 def pad_it(word):
     return word + '*'  * (n - len(word))
예제 #36
0
from builtins import len

from selenium import webdriver
from BotFunction import Main_Program

# Instagram_Info - Edit this for personal info
username = "******"
password = "******"
hash_tags = "HASH_TAGS,SEPARATED,BY,COMMAS"

web_driver = webdriver.Chrome('/web/driver/path')

hash_tags_list = hash_tags.split(',')
hash_tags_number = len(hash_tags_list)


def main():
    Main_Program.main_program(username, password, hash_tags_number,
                              hash_tags_list, web_driver)


main()
예제 #37
0
def len(obj): # @ReservedAssignment
    return builtins.len(obj)
def xml_parser(data, project_id, scan_id):
    """

    :param data:
    :param project_id:
    :param scan_id:
    :return:
    """
    fileName = 'Na'
    filePath = 'Na'
    evidenceCollected = 'Na'
    name = 'Na'
    cvssScore = 'Na'
    cvssAccessVector = 'Na'
    cvssAccessComplexity = 'Na'
    cvssAuthenticationr = 'Na'
    cvssConfidentialImpact = 'Na'
    cvssIntegrityImpact = 'Na'
    cvssAvailabilityImpact = 'Na'
    severity = 'Na'
    cwe = 'Na'
    description = 'Na'
    references = 'Na'
    vulnerableSoftware = 'Na'
    vul_col = 'Na'

    pt = data.xpath('namespace-uri(.)')
    # root = data.getroot()
    inst = []
    for scan in data:
        for dependencies in scan:
            for dependency in dependencies:
                if dependency.tag == '{%s}fileName' % pt:
                    fileName = dependency.text
                if dependency.tag == '{%s}filePath' % pt:
                    filePath = dependency.text
                if dependency.tag == '{%s}evidenceCollected' % pt:
                    evidenceCollected = dependency.text
                for vuln in dependency:
                    if vuln.tag == '{%s}vulnerability' % pt:
                        if pt == 'https://jeremylong.github.io/DependencyCheck/dependency-check.2.0.xsd':
                            for vulner in vuln:
                                if vulner.tag == '{%s}name' % pt:
                                    name = vulner.text
                                if vulner.tag == '{%s}description' % pt:
                                    description = vulner.text
                                if vulner.tag == '{%s}references' % pt:
                                    references = vulner.text
                                if vulner.tag == '{%s}vulnerableSoftware' % pt:
                                    vulnerableSoftware = vulner.text
                                for vuln_dat in vulner:
                                    if vuln_dat.tag == '{%s}cwe' % pt:
                                        cwe = vuln_dat.text
                                    if vuln_dat.tag == '{%s}severity' % pt:
                                        severity_dat = vuln_dat.text
                                        if severity_dat == 'HIGH':
                                            severity = 'High'
                                        elif severity_dat == 'MEDIUM':
                                            severity = 'Medium'
                                        elif severity_dat == 'LOW':
                                            severity = 'Low'

                        elif pt == 'https://jeremylong.github.io/DependencyCheck/dependency-check.2.2.xsd':
                            for dc22 in vuln:

                                if dc22.tag == '{%s}name' % pt:
                                    name = dc22.text

                                if dc22.tag == '{%s}description' % pt:
                                    description = dc22.text

                                if dc22.tag == '{%s}vulnerableSoftware' % pt:
                                    vulnerableSoftware = dc22.text

                                for vuln_dat in dc22:
                                    for d in vuln_dat:
                                        if d.tag == '{%s}url' % pt:
                                            references = d.text

                                    if vuln_dat.tag == '{%s}cwe' % pt:
                                        cwe = vuln_dat.text
                                    if vuln_dat.tag == '{%s}severity' % pt:
                                        severity_dat = vuln_dat.text
                                        if severity_dat == 'HIGH':
                                            severity = 'High'
                                        elif severity_dat == 'MEDIUM':
                                            severity = 'Medium'
                                        elif severity_dat == 'LOW':
                                            severity = 'Low'
                        elif pt == 'https://jeremylong.github.io/DependencyCheck/dependency-check.2.3.xsd':
                            for dc22 in vuln:

                                if dc22.tag == '{%s}name' % pt:
                                    name = dc22.text

                                if dc22.tag == '{%s}description' % pt:
                                    description = dc22.text

                                if dc22.tag == '{%s}vulnerableSoftware' % pt:
                                    vulnerableSoftware = dc22.text

                                for vuln_dat in dc22:
                                    for d in vuln_dat:
                                        if d.tag == '{%s}url' % pt:
                                            references = d.text

                                    if vuln_dat.tag == '{%s}cwe' % pt:
                                        cwe = vuln_dat.text
                                    if vuln_dat.tag == '{%s}severity' % pt:
                                        severity_dat = vuln_dat.text
                                        if severity_dat == 'HIGH':
                                            severity = 'High'
                                        elif severity_dat == 'MEDIUM':
                                            severity = 'Medium'
                                        elif severity_dat == 'LOW':
                                            severity = 'Low'

                        else:
                            for vulner in vuln:
                                if vulner.tag == '{%s}name' % pt:
                                    name = vulner.text
                                if vulner.tag == '{%s}cvssScore' % pt:
                                    cvssScore = vulner.text
                                if vulner.tag == '{%s}cvssAccessVector' % pt:
                                    cvssAccessVector = vulner.text
                                if vulner.tag == '{%s}cvssAccessComplexity' % pt:
                                    cvssAccessComplexity = vulner.text
                                if vulner.tag == '{%s}cvssAuthenticationr' % pt:
                                    cvssAuthenticationr = vulner.text
                                if vulner.tag == '{%s}cvssConfidentialImpact' % pt:
                                    cvssConfidentialImpact = vulner.text
                                if vulner.tag == '{%s}cvssIntegrityImpact' % pt:
                                    cvssIntegrityImpact = vulner.text
                                if vulner.tag == '{%s}cvssAvailabilityImpact' % pt:
                                    cvssAvailabilityImpact = vulner.text
                                if vulner.tag == '{%s}severity' % pt:
                                    severity = vulner.text
                                if vulner.tag == '{%s}cwe' % pt:
                                    cwe = vulner.text
                                if vulner.tag == '{%s}description' % pt:
                                    description = vulner.text
                                if vulner.tag == '{%s}references' % pt:
                                    references = vulner.text
                                if vulner.tag == '{%s}vulnerableSoftware' % pt:
                                    vulnerableSoftware = vulner.text

                        date_time = datetime.now()
                        vul_id = uuid.uuid4()

                        if severity == "High":
                            vul_col = "danger"

                        elif severity == 'Medium':
                            vul_col = "warning"

                        elif severity == 'Low':
                            vul_col = "info"

                        dup_data = name + fileName + severity
                        duplicate_hash = hashlib.sha256(dup_data.encode('utf-8')).hexdigest()

                        match_dup = dependencycheck_scan_results_db.objects.filter(
                            dup_hash=duplicate_hash).values('dup_hash')
                        lenth_match = len(match_dup)

                        if lenth_match == 1:
                            duplicate_vuln = 'Yes'
                        elif lenth_match == 0:
                            duplicate_vuln = 'No'
                        else:
                            duplicate_vuln = 'None'

                        false_p = dependencycheck_scan_results_db.objects.filter(
                            false_positive_hash=duplicate_hash)
                        fp_lenth_match = len(false_p)

                        if fp_lenth_match == 1:
                            false_positive = 'Yes'
                        else:
                            false_positive = 'No'

                        if cwe == 'Na':
                            cwe = name

                        save_all = dependencycheck_scan_results_db(
                            # date_time=date_time,
                            vuln_id=vul_id,
                            scan_id=scan_id,
                            project_id=project_id,
                            fileName=fileName,
                            filePath=filePath,
                            evidenceCollected=evidenceCollected,
                            name=name,
                            cvssScore=cvssScore,
                            cvssAccessVector=cvssAccessVector,
                            cvssAccessComplexity=cvssAccessComplexity,
                            cvssAuthenticationr=cvssAuthenticationr,
                            cvssConfidentialImpact=cvssConfidentialImpact,
                            cvssIntegrityImpact=cvssIntegrityImpact,
                            cvssAvailabilityImpact=cvssAvailabilityImpact,
                            severity=severity,
                            cwe=cwe,
                            description=description,
                            references=references,
                            vulnerableSoftware=vulnerableSoftware,
                            vul_col=vul_col,
                            vuln_status='Open',
                            dup_hash=duplicate_hash,
                            vuln_duplicate=duplicate_vuln,
                            false_positive=false_positive
                        )
                        save_all.save()
        all_dependency_data = dependencycheck_scan_results_db.objects.filter(scan_id=scan_id, false_positive='No')

        total_vul = len(all_dependency_data)
        total_high = len(all_dependency_data.filter(severity="High"))
        total_medium = len(all_dependency_data.filter(severity="Medium"))
        total_low = len(all_dependency_data.filter(severity="Low"))
        total_duplicate = len(all_dependency_data.filter(vuln_duplicate='Yes'))

        dependencycheck_scan_db.objects.filter(scan_id=scan_id).update(
            total_vuln=total_vul,
            SEVERITY_HIGH=total_high,
            SEVERITY_MEDIUM=total_medium,
            SEVERITY_LOW=total_low,
            total_dup=total_duplicate
        )

    subject = 'Archery Tool Scan Status - DependencyCheck Report Uploaded'
    message = 'DependencyCheck Scanner has completed the scan ' \
              '  %s <br> Total: %s <br>High: %s <br>' \
              'Medium: %s <br>Low %s' % (name, total_vul, total_high, total_medium, total_low)

    email_sch_notify(subject=subject, message=message)

    return HttpResponse(status=201)
예제 #39
0
        # If the current model has a better score than one we've already trained then save it
        if acc > best:
            best = acc
            with open(Constants.pickle_dump, "wb") as f:
                pickle.dump(linear, f)

x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(
    training_data, result_only, test_size=Constants.training_sample_size)

pickle_in = open(Constants.pickle_dump, "rb")
linear = pickle.load(pickle_in)

print(
    'The %s different coefficients (m):' %
    (len(Constants.dataset_member_names) - 1), linear.coef_)
print('Y Intercept:', linear.intercept_)

predictions = linear.predict(x_test)

for i in range(len(predictions)):
    rounded_prediction = int(round(predictions[i]))
    #rounded_prediction = 0 if rounded_prediction < 0 else rounded_prediction

    print(
        "Prediction: {0}, Actual result: {1}, Data used to make prediction: {2}"
        .format(
            str(rounded_prediction).zfill(2),
            str(y_test[i]).zfill(2), x_test[i]))

p = "studytime"
예제 #40
0
 def __str__(self):
     return "<Cube of " + len(self.clusters) + " clusters"
예제 #41
0
def Culstring_Players(My_data):

    # creating dictionary of play style with their characteristics
    play_feature_dic = {
        'items_visited_total': {
            'Achiever': 'low',
            'Explorer': 'high',
            'Careless': 'low',
            'other': 'low'
        },
        'questions_right_ratio': {
            'Achiever': 'high',
            'Explorer': 'high',
            'Careless': 'low',
            'other': 'low'
        },
        'questions_visited_total': {
            'Achiever': 'high',
            'Explorer': 'low',
            'Careless': 'low',
            'other': 'low'
        },
        'questions_wrong_ratio': {
            'Achiever': 'low',
            'Explorer': 'low',
            'Careless': 'high',
            'other': 'high'
        },
        'time_read_time_total': {
            'Achiever': 'high',
            'Explorer': 'low',
            'Careless': 'low',
            'other': 'low'
        },
        'time_nav_time_total': {
            'Achiever': 'high',
            'Explorer': 'low',
            'Careless': 'low',
            'other': 'low'
        },
        'time_map/time_total': {
            'Achiever': 'high',
            'Explorer': 'high',
            'Careless': 'low',
            'other': 'low'
        }
    }

    play_style_dic = {
        'Achiever': {
            'items_visited_total': 'low',
            'questions_right_ratio': 'high',
            'questions_visited_total': 'high',
            'questions_wrong_ratio': 'low',
            'time_read_time_total': 'high',
            'time_nav_time_total': 'low',
            'time_map/time_total': 'high'
        },
        'Explorer': {
            'items_visited_total': 'high',
            'questions_right_ratio': 'high',
            'questions_visited_total': 'low',
            'questions_wrong_ratio': 'low',
            'time_read_time_total': 'low',
            'time_nav_time_total': 'high',
            'time_map/time_total': 'high'
        },
        'Careless': {
            'items_visited_total': 'low',
            'questions_right_ratio': 'low',
            'questions_visited_total': 'low',
            'questions_wrong_ratio': 'high',
            'time_read_time_total': 'low',
            'time_nav_time_total': 'low',
            'time_map/time_total': 'low'
        },
        'other': {
            'items_visited_total': 'low',
            'questions_right_ratio': 'low',
            'questions_visited_total': 'low',
            'questions_wrong_ratio': 'high',
            'time_read_time_total': 'low',
            'time_nav_time_total': 'high',
            'time_map/time_total': 'low'
        }
    }

    #df=pd.DataFrame(data)
    #Defining Lables
    labels_Player_Style = ["Achiever", "Explorer", "CareLess", "Lost"]

    #  list_features=['items_visited_total','time_map/time_total','questions_visited_total','time_read_time_total','questions_right_ratio','time_nav_time_total'] # main order info gain
    list_features = [
        'items_visited_total', 'questions_right_ratio',
        'questions_visited_total', 'questions_wrong_ratio',
        'time_read_time_total', 'time_nav_time_total', 'time_map/time_total'
    ]  # inf gain
    #  list_features=['items_visited_total','time_map/time_total','questions_visited_total','questions_wrong_ratio','time_read_time_total','time_nav_time_total','questions_right_ratio'] # inf gain

    BnData = My_data.iloc[:, 1:8]  # selecting categorical data from dataset
    df = pd.DataFrame(BnData)
    #Assigning players type
    My_data['Player_Type'] = 'other'  # assignin other as a defult type
    counter = 0
    ThresholdValue = 0.1
    while (counter < len(list_features) + 1):
        counter = counter + 1
        print(My_data)
        ########### 3 Features to compare #############^
        if counter == 1:

            ###### new code with new dictionary

            #########

            My_data['Player_Type'] = np.where(
                (df[list_features[0]] == 'low') &
                (df[list_features[1]] == 'high'),
                'Achiever',  # 
                np.where((df[list_features[0]] == 'high') &
                         (df[list_features[1]] == 'high'), 'Explorer',
                         np.where((df[list_features[0]] == 'low') &
                                  (df[list_features[1]] == 'low'), 'Careless',
                                  np.where((df[list_features[0]] == 'low') &
                                           (df[list_features[1]] == 'high'),
                                           'other', My_data['Player_Type']))))

            print(My_data)
            if My_data[My_data['Player_Type'] ==
                       "other"].shape[0] / My_data.shape[0] < ThresholdValue:
                break

        elif counter == 2:
            My_data['Player_Type'] = np.where(
                (df[list_features[0]] == 'medium') &
                (df[list_features[2]] == 'high') &
                (My_data['Player_Type'] == 'other'), 'Achiever',
                np.where((df[list_features[0]] == 'medium') &
                         (df[list_features[2]] == 'low') &
                         (My_data['Player_Type'] == 'other'), 'Explorer',
                         My_data['Player_Type']))
            print(My_data)
            if My_data[My_data['Player_Type'] ==
                       "other"].shape[0] / My_data.shape[0] < ThresholdValue:
                break

        elif counter == 3:
            My_data['Player_Type'] = np.where(
                (df[list_features[2]] == 'medium') &
                (df[list_features[3]] == 'high') &
                (My_data['Player_Type'] == 'other'), 'Careless',
                np.where((df[list_features[2]] == 'medium') &
                         (df[list_features[3]] == 'low') &
                         (My_data['Player_Type'] == 'other'), 'Explorer',
                         My_data['Player_Type']))
            print(My_data)
            if My_data[My_data['Player_Type'] ==
                       "other"].shape[0] / My_data.shape[0] < ThresholdValue:
                break
        elif counter == 4:
            My_data['Player_Type'] = np.where(
                (df[list_features[3]] == 'medium') &
                (df[list_features[4]] == 'high') &
                (My_data['Player_Type'] == 'other'), 'Achiever',
                np.where((df[list_features[3]] == 'medium') &
                         (df[list_features[4]] == 'low') &
                         (My_data['Player_Type'] == 'other'), 'Explorer',
                         My_data['Player_Type']))
            print(My_data)
            if My_data[My_data['Player_Type'] ==
                       "other"].shape[0] / My_data.shape[0] < ThresholdValue:
                break
        elif counter == 5:
            My_data['Player_Type'] = np.where(
                (df[list_features[4]] == 'medium') &
                (df[list_features[5]] == 'high') &
                (My_data['Player_Type'] == 'other'), 'Explorer',
                np.where((df[list_features[4]] == 'medium') &
                         (df[list_features[5]] == 'low') &
                         (My_data['Player_Type'] == 'other'), 'Achiever',
                         My_data['Player_Type']))
            print(My_data)
            if My_data[My_data['Player_Type'] ==
                       "other"].shape[0] / My_data.shape[0] < ThresholdValue:
                break
        elif counter == 6:
            My_data['Player_Type'] = np.where(
                (df[list_features[5]] == 'medium') &
                (df[list_features[6]] == 'high') &
                (My_data['Player_Type'] == 'other'), 'Explorer',
                np.where((df[list_features[5]] == 'medium') &
                         (df[list_features[6]] == 'low') &
                         (My_data['Player_Type'] == 'other'), 'Careless',
                         My_data['Player_Type']))

            print(My_data)
            if My_data[My_data['Player_Type'] ==
                       "other"].shape[0] / My_data.shape[0] < ThresholdValue:
                break

    print('Player Style Assigning is done')
    return My_data
예제 #42
0
 def clean_password2(self):
     value = super(CustomSetPasswordForm, self).clean_password2()
     if value:
         if len(value) > 128:
             self._errors['password1'] = self.error_class(
                 ['Password is too long (max: 128 characters)'])
    def reconstruct(self):
        print("\tStep 4 of 4: Image reconstruction")

        # create an array as the canvas of the final image
        groundtruthImage = np.zeros((self.imageHeight, self.imageWidth))
        linedImage = np.array(self.imageData.convert('RGB'))

        for key in sorted(
                self.offsetDictionary,
                key=lambda key: builtins.len(self.offsetDictionary[key]),
                reverse=True):
            if self.offsetDictionary[key].__len__() < self.Nf * 2:
                break
            print('\t', key, self.offsetDictionary[key].__len__())
            for i in range(self.offsetDictionary[key].__len__()):
                # The original image (grayscale)
                for j in range(
                        self.offsetDictionary[key][i][1],
                        self.offsetDictionary[key][i][1] +
                        self.blockDimension):
                    for k in range(
                            self.offsetDictionary[key][i][0],
                            self.offsetDictionary[key][i][0] +
                            self.blockDimension):
                        groundtruthImage[j][k] = 255

        # creating a line edge from the original image (for the visual purpose)
        for xCoordinate in range(2, self.imageHeight - 2):
            for yCordinate in range(2, self.imageWidth - 2):
                if groundtruthImage[xCoordinate, yCordinate] == 255 and \
                        (groundtruthImage[xCoordinate + 1, yCordinate] == 0 or groundtruthImage[
                                xCoordinate - 1, yCordinate] == 0 or
                                 groundtruthImage[xCoordinate, yCordinate + 1] == 0 or groundtruthImage[
                            xCoordinate, yCordinate - 1] == 0 or
                                 groundtruthImage[xCoordinate - 1, yCordinate + 1] == 0 or groundtruthImage[
                                xCoordinate + 1, yCordinate + 1] == 0 or
                                 groundtruthImage[xCoordinate - 1, yCordinate - 1] == 0 or groundtruthImage[
                                xCoordinate + 1, yCordinate - 1] == 0):

                    # creating the edge line, respectively left-upper, right-upper, left-down, right-down
                    if groundtruthImage[xCoordinate - 1, yCordinate] == 0 and \
                                    groundtruthImage[xCoordinate, yCordinate - 1] == 0 and \
                                    groundtruthImage[xCoordinate - 1, yCordinate - 1] == 0:
                        linedImage[xCoordinate - 2:xCoordinate, yCordinate,
                                   1] = 255
                        linedImage[xCoordinate, yCordinate - 2:yCordinate,
                                   1] = 255
                        linedImage[xCoordinate - 2:xCoordinate,
                                   yCordinate - 2:yCordinate, 1] = 255
                    elif groundtruthImage[xCoordinate + 1, yCordinate] == 0 and \
                                    groundtruthImage[xCoordinate, yCordinate - 1] == 0 and \
                                    groundtruthImage[xCoordinate + 1, yCordinate - 1] == 0:
                        linedImage[xCoordinate + 1:xCoordinate + 3, yCordinate,
                                   1] = 255
                        linedImage[xCoordinate, yCordinate - 2:yCordinate,
                                   1] = 255
                        linedImage[xCoordinate + 1:xCoordinate + 3,
                                   yCordinate - 2:yCordinate, 1] = 255
                    elif groundtruthImage[xCoordinate - 1, yCordinate] == 0 and \
                                    groundtruthImage[xCoordinate, yCordinate + 1] == 0 and \
                                    groundtruthImage[xCoordinate - 1, yCordinate + 1] == 0:
                        linedImage[xCoordinate - 2:xCoordinate, yCordinate,
                                   1] = 255
                        linedImage[xCoordinate, yCordinate + 1:yCordinate + 3,
                                   1] = 255
                        linedImage[xCoordinate - 2:xCoordinate,
                                   yCordinate + 1:yCordinate + 3, 1] = 255
                    elif groundtruthImage[xCoordinate + 1, yCordinate] == 0 and \
                                    groundtruthImage[xCoordinate, yCordinate + 1] == 0 and \
                                    groundtruthImage[xCoordinate + 1, yCordinate + 1] == 0:
                        linedImage[xCoordinate + 1:xCoordinate + 3, yCordinate,
                                   1] = 255
                        linedImage[xCoordinate, yCordinate + 1:yCordinate + 3,
                                   1] = 255
                        linedImage[xCoordinate + 1:xCoordinate + 3,
                                   yCordinate + 1:yCordinate + 3, 1] = 255

                    # creating the straigh line, respectively upper, down, left, right line
                    elif groundtruthImage[xCoordinate, yCordinate + 1] == 0:
                        linedImage[xCoordinate, yCordinate + 1:yCordinate + 3,
                                   1] = 255
                    elif groundtruthImage[xCoordinate, yCordinate - 1] == 0:
                        linedImage[xCoordinate, yCordinate - 2:yCordinate,
                                   1] = 255
                    elif groundtruthImage[xCoordinate - 1, yCordinate] == 0:
                        linedImage[xCoordinate - 2:xCoordinate, yCordinate,
                                   1] = 255
                    elif groundtruthImage[xCoordinate + 1, yCordinate] == 0:
                        linedImage[xCoordinate + 1:xCoordinate + 3, yCordinate,
                                   1] = 255

        timeStamp = time.strftime("%Y%m%d_%H%M%S")
        scipy.misc.imsave(
            self.imageOutputDirectory + timeStamp + "_" + self.imagePath,
            groundtruthImage)
        scipy.misc.imsave(
            self.imageOutputDirectory + timeStamp + "_lined_" + self.imagePath,
            linedImage)

        return self.imageOutputDirectory + timeStamp + "_lined_" + self.imagePath
예제 #44
0
파일: types.py 프로젝트: ojacobson/attic
def vector_length(value):
    return b.len(value)
예제 #45
0
    def _refresh(self):
        if self.connected:
            start = time.time()
            retry = 5
            data = None
            while retry > 0:
                try:
                    data = self._read(512)
                    if builtins.len(data) == 0:
                        self.logger.error(
                            'Reading data from device returned 0 bytes!')
                    else:
                        end_pos = len(data)
                        while end_pos > 0:
                            end_pos = data.rfind(self._v1_end)
                            start_pos = data.rfind(self._v1_start, 0, end_pos)
                            if start_pos != -1 and end_pos == -1:
                                data = data[:start_pos]
                            elif start_pos != -1 and end_pos != -1:
                                chunk = data[start_pos:end_pos +
                                             len(self._v1_end) + 3]
                                self.logger.debug(
                                    'Found chunk at {} - {} ({} bytes):{}'.
                                    format(
                                        start_pos, end_pos,
                                        end_pos - start_pos,
                                        ''.join(' {:02x}'.format(x)
                                                for x in chunk)))
                                chunk_crc_str = '{:02X}{:02X}'.format(
                                    chunk[-2], chunk[-1])
                                chunk_crc_calc = self._crc16(chunk[:-2])
                                chunk_crc_calc_str = '{:02X}{:02X}'.format(
                                    (chunk_crc_calc >> 8) & 0xff,
                                    chunk_crc_calc & 0xff)
                                if chunk_crc_str != chunk_crc_calc_str:
                                    self.logger.warn(
                                        'CRC checksum mismatch: Expected {}, but was {}'
                                        .format(chunk_crc_str,
                                                chunk_crc_calc_str))
                                    data = data[:start_pos]
                                else:
                                    end_pos = 0

                    retry = 0

                except Exception as e:
                    self.logger.error(
                        'Reading data from {0} failed: {1} - reconnecting!'.
                        format(self._target, e))

                    self.disconnect()
                    time.sleep(1)
                    self.connect()

                    retry = retry - 1
                    if retry == 0:
                        self.logger.warn(
                            'Trying to read data in next cycle due to connection errors!'
                        )

            if data is not None:
                retry = 0
                values = self._parse(self._prepare(data))

                for obis in values:
                    self.logger.debug('Entry {}'.format(values[obis]))

                    if obis in self._items:
                        for prop in self._items[obis]:
                            for item in self._items[obis][prop]:
                                item(values[obis][prop], 'Sml')
            else:
                values = {}

            cycletime = time.time() - start
            self.logger.debug("cycle takes {0} seconds".format(cycletime))

            return values
예제 #46
0
파일: array.py 프로젝트: wojtask/CormenPy
 def __init__(self, elements, start=1):
     self.elements = list(elements)
     self.start = start
     self.length = len(elements)
def fire_bullets(ai_settings, screen, ship, bullets):
    if len(bullets) < ai_settings.bullets_allowed:
        new_bullet = Bullet(ai_settings, screen, ship)
        bullets.add(new_bullet)
    radius = 0.7
    stepSize = 5 / 180 * math.pi

    # assumption that start and end are in degrees
    for i in startpos:
        assert 0 <= i <= 358.0 * math.pi / 180
    for j in endpos:
        assert 0 <= j <= 358.0 * math.pi / 180

    updates = [startpos]
    deltas = tuple(map(lambda m, n: m - n, endpos, startpos))
    deltas = [i * stepSize / norm(deltas) for i in deltas]
    print(deltas)
    while True:
        acc = True
        for i in range(len(endpos)):
            acc = acc and abs(updates[-1][i] - endpos[i]) < 0.05
        if acc:
            break
        updates.append(tuple(map(lambda m, n: m + n, updates[-1], deltas)))

    # TODO: valid configuration implementation without tree argument
    # assert valid_configuration(startpos[0], startpos[1], startpos[2], startpos[3], startpos[4], startpos[5])
    # assert valid_configuration(endpos[0], endpos[1], endpos[2], endpos[3], endpos[4], endpos[5])

    finalUpdates = updates
    for i in updates:
        if not temp_valid_configuration(i[0], i[1], i[2], i[3], i[4], i[5]):
            for k in updates[updates.index(i) + 1:]:
                if temp_valid_configuration(k[0], k[1], k[2], k[3], k[4],
                                            k[5]):
예제 #49
0
 def printSubTable(self):
     for row in self.subTable:
         separator = ' '
         print(len(row), " - ", " ".join(format(x, "8.5f") for x in row))
예제 #50
0
            elif "mysql" in file_content['dependencies']:
                back_list.append(y)
            elif "pg" in file_content['dependencies']:
                back_list.append(y)
            else:
                other_list.append(y)
                # print('OTHERS')
        except:
            other_list.append(y)
            continue
    print(' ')
    return json_designer(front_list, back_list, other_list, user_name)


df1 = pd.read_json('Json files/(2019-01-01)---(2019-06-30).json')
column_names = df1['owner_name'][0:len(df1['owner_name'])]

count = 0
part = 0
print('Processing starts')
initial_start = 0
for each_name in column_names:
    start_time = time.time()
    count += 1
    try:

        if len(collect_repo(each_name)) < 1000:
            json_string = sort_repo(each_name, collect_repo(each_name))
            json_file = json.loads(json_string)

        if (len(json_file['front_list']) > 0) and (len(json_file['back_list']) > 0):
예제 #51
0
 def get_accuracy_rate(self):
     return sum(self.accuracies.values()) / len(self.accuracies) * 100
예제 #52
0
            print(front_repo['repo_name'] + ' and ' + back_repo['repo_name'] +
                  ' compared')
            if similar(front_repo['repo_name'], back_repo['repo_name']) > 0.5:
                matched_repos.append(front_repo['repo_name'] + '///' +
                                     back_repo['repo_name'])
    return matched_repos


with open(
        '/Json files/' +
        'Separated repos((2019-01-01)---(2019-06-30)(1)).json',
        'r') as filehandle:
    file_content = json.load(filehandle)

for user in file_content:
    if len(user['front_list']) != 0 and len(user['back_list']) != 0:
        preferred_users.append(user)
print('All preferred users collected.')

count = 0
for each_user in preferred_users:
    if len(front_back(each_user['front_list'], each_user['back_list'])) == 0:
        print('No matches for user: '******'user_name'])
    else:
        count += 1
        found_repos.append(
            front_back(each_user['front_list'], each_user['back_list']))

# print(similar('happy-ionic-frontend', 'happy-express-backend'))
print('############################################################')
print(found_repos)
예제 #53
0
import numpy as np
import numpy.linalg as alg
import scipy as spy
import builtins

import matplotlib.pylab as pl
import time

# sizeList = [2,3,4,5,6,7,8,9,10,50,100,200,300,500,707,900,1000,1100]
sizeList = [100]  #[15,17,20,25]
timeList = [10]
useCVX = True

np.random.seed(0)
timingVals = np.zeros([builtins.len(sizeList), builtins.len(timeList)])
for sizeTemp in range(builtins.len(sizeList)):
    for timeTemp in range(builtins.len(timeList)):

        size = sizeList[sizeTemp]
        timesteps = timeList[timeTemp]
        print("Solving for size", size, ", timesteps ", timesteps)

        samplesPerStep = 10  #int(np.log2(size))
        timeShift = timesteps / 3  #Number of steps till new covariance matrix appears
        eps = 1e-3

        # Optimization parameters
        alpha = 0.1  # Lasso parameter
        beta = 0.6  # Weight between basis nodes
        FroError = []
예제 #54
0
#!/usr/bin/python3

# generate config and genesis files for test
import json
import os
import string
import subprocess

import conf
import conf_type
import sys
from builtins import len, int, dict, open, set

nodeCount = len(conf.nodes)
# pkgen 명령 위치(path)를 적어줄 것
cliCmd = "./aergocli"


def low_bool(b):
    if b:
        return "true"
    else:
        return "false"


def generate_genesis(conf, template):
    genesis_filename = "genesis_%s.json" % conf.testname

    bps = list()
    for n in conf.nodes:
        if n.role == conf_type.Role.producer:
예제 #55
0
def count_uniq(iterable):
    '''Return the unique element count of ``iterable``.'''
    return builtins.len(builtins.set(iterable))
예제 #56
0
                else:
                    minefield.minefield[individual.get_position_y()][individual.get_position_x()] = '0'
            except IndexError:
                distance_dict[str(population_copy.index(individual))] = find_distance(minefield.get_exit_x(),
                                                                                      minefield.get_exit_y(),
                                                                                      individual.get_position_x(),
                                                                                      individual.get_position_y())
                population.remove(individual)
                rows -= 1
                row -= 1

            row += 1

        column += 1

        print(len(population))
        print(len(distance_dict))

        if reached_end:
            break

        if len(distance_dict) >= individuals_per_generation:
            break

        print()
        print()
        print()
        print()

        minefield.print()
        minefield.remove_zeros()
예제 #57
0
def len(channel):
    return builtins.len(channel.buffer)
예제 #58
0
def len(s):
    '''Replacement for the built-in :func:`len() <python:len>` function.'''
    return builtins.len(s)
예제 #59
0
        _VSCODE_columnNames = list(_VSCODE_df)

    # Compute the index column. It may have been renamed
    _VSCODE_indexColumn = _VSCODE_df.index.name if _VSCODE_df.index.name else "index"
    _VSCODE_columnTypes = _VSCODE_builtins.list(_VSCODE_df.dtypes)
    del _VSCODE_df

    # Make sure the index column exists
    if _VSCODE_indexColumn not in _VSCODE_columnNames:
        _VSCODE_columnNames.insert(0, _VSCODE_indexColumn)
        _VSCODE_columnTypes.insert(0, "int64")

    # Then loop and generate our output json
    _VSCODE_columns = []
    for _VSCODE_n in _VSCODE_builtins.range(
            0, _VSCODE_builtins.len(_VSCODE_columnNames)):
        _VSCODE_column_type = _VSCODE_columnTypes[_VSCODE_n]
        _VSCODE_column_name = str(_VSCODE_columnNames[_VSCODE_n])
        _VSCODE_colobj = {}
        _VSCODE_colobj["key"] = _VSCODE_column_name
        _VSCODE_colobj["name"] = _VSCODE_column_name
        _VSCODE_colobj["type"] = str(_VSCODE_column_type)
        _VSCODE_columns.append(_VSCODE_colobj)
        del _VSCODE_column_name
        del _VSCODE_column_type

    del _VSCODE_columnNames
    del _VSCODE_columnTypes

    # Save this in our target
    _VSCODE_targetVariable["columns"] = _VSCODE_columns
예제 #60
0
def process_images(path, rgb_line_ending, depth_line_ending, manual):
    """ Finds images at the specified [path] and runs the edge detection code to
    generate predictions on RGB, D, and RGD versions of the image.
    Requires: Files of the same object MUST have the same name + an additional
    line ending. The line endings for the rgb and depth frames MUST BE DISTINCT.
    Ex. path="home/images", rgb_line_ending=".png", depth_line_ending="depth.png"
    Each object MUST HAVE BOTH A RGB AND DEPTH IMAGE.
    """
    # TODO: add a toggle to store manual/user input also

    formats = ["rgb", "d", "rgd"]
    # store image names of incorrect preds
    incorrect_preds = [[], [], []]
    manual_incorrect_preds = [[], [], []]
    # store accuracy percentages
    # [rgb, d, rgd] holds sum of pred evals. divide by num preds later
    percentages = [0, 0, 0]
    manual_percentages = [0, 0, 0]

    # stores list of unique image names with line ending removed
    image_names = []
    # checks for image files in immediate directory
    for entry in os.scandir(path):
        if entry.is_file():
            image_name = ""
            if entry.name.endswith(rgb_line_ending):
                # remove line ending
                image_name = entry.name[0:-len(rgb_line_ending)]
                image_names.append(image_name)
            elif entry.name.endswith(depth_line_ending):
                # remove line ending
                image_name = entry.name[0:-len(depth_line_ending)]
                image_names.append(image_name)

    # get unique vals
    image_names = set(image_names)
    # print(image_names)

    # loop over image names
    for image_name in image_names:
        # Generate all the actual images
        # rgb image stored in bgr order because OpenCV is just like that
        bgr = cv2.imread(path + image_name + rgb_line_ending)
        depth = process_depth(path + image_name, depth_line_ending)

        # If you haven't already, do any cropping/scaling necessary (BIGBIRD)

        # resizing to fit on screen when displaying output image
        whratio = bgr.shape[1] / bgr.shape[0]
        # need to scale down labels also - new width/old width
        scale = int(whratio * 400) / bgr.shape[1]
        # resize takes parameter order (img, width, height)
        bgr = cv2.resize(bgr, (int(whratio * 400), 400))
        depth = cv2.resize(depth, (int(whratio * 400), 400))

        b, g, r = cv2.split(bgr)
        dgr = cv2.merge((depth, g, r))

        depth3channel = cv2.merge((depth, depth, depth))

        images = [bgr, depth3channel, dgr]  # iterate over for comparison
        width, height = bgr.shape[1], bgr.shape[0]
        for i in range(3):
            pred_coords = grab_points(0, 0, width, height, images[i])
            # TODO: search image_name.txt file for labels and eval pred
            pred_grasp_valid = eval_pred(images[i], pred_coords,
                                         path + image_name + "_grasps.txt",
                                         scale)
            cv2.imshow("preds", images[i])
            cv2.waitKey(0)
            if pred_grasp_valid:
                print(image_name + " " + str(i) + " was a valid grasp")
                percentages[i] += 1
            else:
                print(image_name + " " + str(i) + " was NOT a valid grasp")
                incorrect_preds[i].append(image_name)

            if manual:
                # user input to confirm whether grasp was correct
                userinput = input("Is the grasp valid? (y/n) \n")
                while userinput != "y" and userinput != "n":
                    print("Invalid input. Please enter y/n")
                    userinput = input("Is the grasp valid? (y/n) \n")

                if userinput == "y":
                    manual_percentages[i] += 1
                else:
                    manual_incorrect_preds[i].append(image_name)

    print("Checked predictions on ", len(image_names), " images.")
    print("Automated rgb, d, rgd percentages: ",
          np.array(percentages) / len(image_names))
    # print incorrectly classified image names, and the channel format
    print("Automatically-identified incorrect preds ", incorrect_preds)

    if manual:
        print("Manual rgb, d, rgd percentages: ",
              np.array(manual_percentages) / len(image_names))
        print("Manually-identified incorrect preds ", manual_incorrect_preds)