Beispiel #1
0
    def report_for_file(self, file_name):
        print 'report for: ' + file_name
        windows = self._windows
        methods = self._methods
        report = Report(file_name, windows, methods)

        raw_txt = Reader.readFromFile(file_name)
        #print raw_txt

        words = Reader.extractWords(raw_txt, "russian")

        keywords = Reader.meter(words)
        self._keywords = keywords
        self._terms = words

        #инициализация отчета термами с tf
        for term in self._terms:
            report.add_term_tf(term, keywords[term])

        for window in windows:
            for method in methods:
                print method, window
                (array, graph) = self.get_rw_for(method, window)
                report._graph = graph #todo graph как св-во, пересмотреть логику
                for v in array:
                    term = v.term_value
                    report.add_term_rw_stats(term, method, window, v.term_weight_rw)

        self._reports[file_name] = report
Beispiel #2
0
def sign_up(new_username,new_password,gender,Countryin,Targetcountry):
    previousCredentials=read.read_file('./util/credentials.txt')
    new_credentials='%s,%s,%s,%s,%s'%(new_username,new_password,gender,Countryin,Targetcountry)
    if new_username=='' or new_password=='' or new_username in previousCredentials or new_username.find(',')!=-1 or new_password.find(',')!=-1:
        return 'Invalid signup credentials'
    else:
        read.write_file('./util/credentials.txt','\n'+new_credentials,'a')
        return 'Successfully signed up'
Beispiel #3
0
def read_data():
    global patients_log
    Reader.readLog(file='Input/Log-completo.csv', start=2014, finish=2015, cv_frequency=0)
    patients_log = Reader.patients

    global patients_dm, patients_dm_periods, patients_age, patients_gender
    patients_dm, patients_dm_periods, patients_age, patients_gender = DMCompensation_filter.set_data()

    patients_official_list()
Beispiel #4
0
def file_report():
    raw_txt = Reader.readFromFile('data/test1')
    words = Reader.extractWords(raw_txt)
    keywords = Reader.meter(words)
    window = 3

    graph_rw = TermGraph()

    stopwords = nltk.corpus.stopwords.words('english')
    words_clean = []
    for word in words:
        if word not in stopwords:
            words_clean.append(word)
    words = words_clean
    length = words.__len__()
   # print length, len(keywords), window
   # print words

    for i in xrange(0, length-1, 2):
        print '----', i, '----'
        print words[i:i+window]
        v1 = Vertex(words[i], keywords[words[i]])
        v2 = Vertex(words[i+1], keywords[words[i+1]])
        v3 = Vertex(words[i+2], keywords[words[i+2]])
        e1 = Edge(v1, v2)
        e2 = Edge(v1, v3)
        e3 = Edge(v2, v3)
        graph_rw.add_edge(e1)
        graph_rw.add_edge(e2)
        graph_rw.add_edge(e3)


    graph_rw.recalc_edges()

    mode = 'rw'
    graph_rw.recalc_vert_weights(mode)

    for i in xrange(0,100):
        graph_rw.recalc_vert_weights(mode)


    array = graph_rw._verticles.values()
    array.sort(comparator)

    #report
    file = './test_results/' + mode + '-win3.csv'
    toCsv = ''
    for v in array:
        num_joins = graph_rw._verticlesJoins[v.term_value].__len__()
        toCsv += str(v.term_value) + ',' +str(v.term_weight_rw) + ',' + str(v.term_weight_tf) + '\n'
        print v, v.term_weight_rw, num_joins, v.term_weight_tf
        print graph_rw._verticlesJoins[v.term_value]
Beispiel #5
0
def task1_2(ap=0.5):
    reader = Reader(PATH_DATASET, ['car'], "perFrame")
    gt = reader.get_annotations()
    det = reader.get_det("yolo")

    grouped = defaultdict(list)
    for box in gt:
        grouped[box.frame].append(box)
    orderedict = OrderedDict(sorted(grouped.items()))
    rec, prec, ap = voc_eval(det, orderedict, ap, is_confidence=True)
    print(ap)

    return
Beispiel #6
0
def file_report():
    raw_txt = Reader.readFromFile('data/test1')
    words = Reader.extractWords(raw_txt)
    keywords = Reader.meter(words)
    window = 3

    graph_rw = TermGraph()

    stopwords = nltk.corpus.stopwords.words('english')
    words_clean = []
    for word in words:
        if word not in stopwords:
            words_clean.append(word)
    words = words_clean
    length = words.__len__()
    # print length, len(keywords), window
    # print words

    for i in xrange(0, length - 1, 2):
        print '----', i, '----'
        print words[i:i + window]
        v1 = Vertex(words[i], keywords[words[i]])
        v2 = Vertex(words[i + 1], keywords[words[i + 1]])
        v3 = Vertex(words[i + 2], keywords[words[i + 2]])
        e1 = Edge(v1, v2)
        e2 = Edge(v1, v3)
        e3 = Edge(v2, v3)
        graph_rw.add_edge(e1)
        graph_rw.add_edge(e2)
        graph_rw.add_edge(e3)

    graph_rw.recalc_edges()

    mode = 'rw'
    graph_rw.recalc_vert_weights(mode)

    for i in xrange(0, 100):
        graph_rw.recalc_vert_weights(mode)

    array = graph_rw._verticles.values()
    array.sort(comparator)

    #report
    file = './test_results/' + mode + '-win3.csv'
    toCsv = ''
    for v in array:
        num_joins = graph_rw._verticlesJoins[v.term_value].__len__()
        toCsv += str(v.term_value) + ',' + str(v.term_weight_rw) + ',' + str(
            v.term_weight_tf) + '\n'
        print v, v.term_weight_rw, num_joins, v.term_weight_tf
        print graph_rw._verticlesJoins[v.term_value]
    def run(self, flag):
        nx_graphs, _ = Reader.multi_readG(self.path)

        if flag == "LN":
            r_t = Reader.true_cluster(self.path).tolist()
            print(clustering(r_t))
            cluster_true = [r[0] - 1 for r in r_t]
            k_list = [k for k in range(2, 11)]
        else:
            cluster_true = []
            k_list = [2, 3, 6, 8]
            for i in range(29):
                if i < 12:
                    cluster_true.append(0)
                else:
                    cluster_true.append(1)

        w_dict = Reader.weight(self.path)
        print(nx_graphs[0])
        MK_G = Node2Vec_LayerSelect.Graph(nx_graphs, self.p, self.q, 0.5)
        MK_G.preprocess_transition_probs(w_dict, 2)
        MK_walks = MK_G.simulate_walks(self.num_walks, self.walk_length)

        MK_words = []
        for walk in MK_walks:
            MK_words.extend([str(step) for step in walk])

        M_L = Word2Vec.Learn(MK_words)
        M_matrix, M_mapping = M_L.train()

        result = {}
        for k in k_list:
            cluster_trained = KMeans(
                n_clusters=k, random_state=0).fit_predict(M_matrix).tolist()

            length = min(len(cluster_true), len(cluster_trained))

            r = normalized_mutual_info_score(cluster_true[0:length],
                                             cluster_trained[0:length])
            f = f1_score(cluster_true[0:length],
                         cluster_trained[0:length],
                         average='micro')
            print(cluster_trained)
            print(cluster_true)

            result[k] = (r, f)
            #pickle.dump(cluster_trained, open(self.path+str(k)+'.pickle', '+wb'))

        print(result)
Beispiel #8
0
def signup():
    previousCredentials=Reader.read_file('credentials.txt')
    if request.method=="GET":
        return render_template('signup.html')
    elif request.method=="POST":
        new_user=request.form['nuser']
        new_pswd=request.form['npswd']
        new_credentials='%s,%s'%(new_user,new_pswd)
        if new_user=='' or new_pswd=='' or new_user in previousCredentials or new_user.find(',')!=-1 or new_pswd.find(',')!=-1:
            return render_template('form.html', error='Invalid signup credentials')
        else:
            Reader.write_file('credentials.txt',new_credentials,'a')
            return render_template('form.html', error='Successfully signed up')
    else:
        return 'yo'
Beispiel #9
0
class BackTestor:
  def __init__(self, order_path):
    self.r = Reader()
    self.t = Trader()
    self.LoadOrder(order_path)

  def LoadOrder(self, order_path):
    self.r.load_order_file(order_path)
    for i in range(self.r.get_ordersize()):
      o = self.r.read_border(i)
      if o.price > 0:
        self.t.RegisterOneTrade(o.contract, o.size if o.side == 1 else -o.size, o.price)

  def Plot(self):
    self.t.PlotStratPnl()
Beispiel #10
0
def write_entity_test(verbose):
    relative_file_path = "../../resources/sql/accountSchema.sql"

    file_contents = Reader.open_file(relative_file_path)
    parser = Reader.SQL_Parser(file_contents)
    table_name = parser.parse_table()
    table_attributes = parser.parse_attributes()

    t = Table.Table(table_name, file_contents, table_attributes)

    w = Writer.Entity_Writer(t)

    w.write_entity("some_file")

    return 0
Beispiel #11
0
def open_file_test(verbose):
    relative_file_path = "../../resources/sql/accountSchema.sql"

    file_contents = Reader.open_file(relative_file_path)
    if (len(file_contents) != 0):
        return 1
    return 0
Beispiel #12
0
 def clear(self):
     self.reader = Reader()
     self.order_file_size = 0
     self.shot_file_size = 0
     self.pos = {}
     self.net_pnl = {}
     self.gross_pnl = {}
     self.ticker_strat_map = {}
     self.avgcost = {}
     self.gross_time_allpnl_map = {}
     self.net_time_allpnl_map = {}
     self.strat_data_map = {}
     self.gross_strat_pnl_map = {}
     self.net_strat_pnl_map = {}
     self.pnl_contract = set([])
     self.Caler = CALER(contract_config_path)
Beispiel #13
0
def run_epoch(session, model, data, eval_op, epoch_num):
    """Runs the model on the given data."""
    epoch_size = ((len(data) // model.batch_size) - 1) // model.num_steps
    start_time = time.time()
    costs = 0.0
    iters = 0
    state = session.run(model.initial_state)
    for step, (x, y) in enumerate(
            Reader.ptb_iterator(data, model.batch_size, model.num_steps)):
        fetches = [model.cost, model.final_state, model.logits, eval_op]
        feed_dict = {}
        feed_dict[model.input_data] = x
        feed_dict[model.targets] = y
        for i, (c, h) in enumerate(model.initial_state):
            feed_dict[c] = state[i].c
            feed_dict[h] = state[i].h
        cost, state, logits, _ = session.run(fetches, feed_dict)
        costs += cost
        iters += model.num_steps
        # Rani: show the actual prediction
        # decodedWordId = int(np.argmax(logits))
        # print(" ".join([inverseDictionary[int(x1)] for x1 in np.nditer(x)]) + \
        #       " got:" + inverseDictionary[decodedWordId] + " expected:" + inverseDictionary[int(y)])

        #if verbose and step % (epoch_size // 10) == 10:
        if step % 10 == 0:
            print("epoch: %d\t%.3f perplexity: %.3f speed: %.0f wps" %
                  (epoch_num, step * 1.0 / epoch_size, np.exp(costs / iters),
                   iters * model.batch_size / (time.time() - start_time)))
    print("costs is %d and iters is %d" % (costs, iters))
    return np.exp(costs / iters)
Beispiel #14
0
def main():
    #modeOfSubstitution=0-> delete none values
    #modeOfSubstitution=1 -> substitute for mean
    #modeOfSubstitution=2 -> substitute for mode
    modeOfSubstition = 2
    numPartitions = 6
    containID = 1

    reader = Reader.Reader('breastCancer.data', 'breastCancerInfo.data',
                           modeOfSubstition, containID)

    #Different methods for generating the set
    trainSet, testSet = reader.leaveOneOut()

    #partitions=reader.crossValidation(numPartitions)
    #testSet,trainSet=generateSets(partitions)

    if testSet[0] == [-1]:
        print "Numero de particiones erroneo"
    else:
        attributesUsed = initListOfAttributes(reader.canUse)
        #fullTreeID3=treeGenerationID3(trainSet,attributesUsed)
        fullTreeID45 = treeGenerationID45(trainSet, attributesUsed)
        print fullTreeID45.childs['9'].connectionName
        solveForLeave1Out(fullTreeID45, testSet, attributesUsed)
        #solveForCrossValidation(fullTreeID3,testSet,attributesUsed)
        #printTree(fullTreeID3)
        print "Generacion del arbol completada"
Beispiel #15
0
def worker(chunk, sect_dir, opcode_dir, dll_dir):
    widgets = [
        'Worker {}: '.format(chunk[0]), ' ',
        Percentage(), ' ',
        Bar(), ' ',
        ETA()
    ]
    pbar = ProgressBar(widgets=widgets, maxval=len(chunk[1])).start()
    for idx, item in enumerate(chunk[1]):
        # Filenames
        fn = item.split(os.sep)[-1].split(".")[0]

        # Read asm code
        asm = r.readAsmCode(item)

        # Extract features
        opcodeDict, DLLDict, sectionDict = extractAssemblyFeatures(asm)

        # Save features
        pickle.dump(opcodeDict, open(os.path.join(opcode_dir, fn), "wb"))
        pickle.dump(DLLDict, open(os.path.join(dll_dir, fn), "wb"))
        pickle.dump(sectionDict, open(os.path.join(sect_dir, fn), "wb"))

        # Update progressbar
        pbar.update(idx + 1)
    pbar.finish()
def run():
    target = 50047984

    data = []
    for number in Reader.read("input"):
        data.append(int(number))

    sums = set()
    for i in range(len(data)):
        if (target - data[i]) in sums:
            group, sum = [], 0
            for j in range(i):
                group.append(data[i - j])
                sum += data[i - j]
                if sum == target:
                    return min(group) + max(group)

        else:
            newSums = set()
            for number in sums:
                newSums.add(number + data[i])
            newSums.add(data[i])
            sums = newSums

    return "Invalid"
Beispiel #17
0
def run():
    seen = set()
    highest = 0
    lowest = 1000000

    for data in Reader.read("input"):

        lower, upper = 0, 127
        for i in range(7):
            h = (upper - lower) // 2
            if data[i] == "F":
                upper = lower + h
            elif data[i] == "B":
                lower = upper - h

        left, right = 0, 7
        for i in range(7, 10):
            h = (right - left) // 2
            if data[i] == "L":
                right = left + h
            elif data[i] == "R":
                left = right - h

        seat = (lower * 8) + left
        highest = max(highest, seat)
        lowest = min(lowest, seat)
        seen.add(seat)

    for i in range(lowest + 1, highest):
        if i not in seen:
            return i
Beispiel #18
0
    def train_from_grid(self,
                        outputSamples="training/samples.data",
                        outputResponses="training/responses.data"):
        dimCase = self.stockDim
        self.samples = np.empty((0, self.stockDim**2))
        self.responses = []
        for i in range(6, 10):
            reader = Reader.Reader()
            reader.load_image("data/sudokus/sudoku" + str(i) + ".png")
            reader.clean_image()
            reader.rectify_perspective()
            reader.cut_image_from_clean()
            for img in reader.cases:
                contours, hierarchy = cv2.findContours(copy.deepcopy(img),
                                                       cv2.RETR_LIST,
                                                       cv2.CHAIN_APPROX_SIMPLE)
                maxArea = -1
                biggest = None
                for ctn in contours:
                    area = cv2.arcLength(ctn, True)
                    if area > maxArea and area > 25:
                        maxArea = area
                        biggest = ctn
                x, y, w, h = cv2.boundingRect(biggest)
                img = img[y:y + h, x:x + w]

                img = cv2.resize(img, (self.stockDim, self.stockDim))

                img = img.reshape((1, img.size))
                img = np.float32(img)

                self.responses.append(i)
                self.samples = np.append(self.samples, img, 0)
    def run(self):
        print("[log]Generator start...")
        while (not globcfg.event.is_set()):
            print(
                "[log]currentRunThread: Reader= {readCount}, Writer= {writeCount}"
                .format(readCount=globcfg.currentRunThreadCount['Reader'],
                        writeCount=globcfg.currentRunThreadCount['Writer']))
            genterate_time = getRandomInterval(globcfg.lamGen)
            globcfg.generateTime_lock.acquire()
            globcfg.generate_time_globalCopy = genterate_time
            globcfg.generateTime_lock.release()
            globcfg.event.wait(genterate_time)
            choice = random.randint(0, 1)
            # generate a new thread

            if (choice):
                print("[log]Generate thread {number} : {name}".format(
                    number=globcfg.threadNumber, name="Reader"))
                self.gui.change_state("R", globcfg.threadNumber,
                                      self.gui.nowhere, self.gui.scheduling)
                globcfg.waitingList.append(
                    Reader.Reader(self.book, self.lock, globcfg.threadNumber,
                                  self.gui))  #new Reader
            else:
                print("[log]Generate thread {number} : {name}".format(
                    number=globcfg.threadNumber, name="Writer"))
                self.gui.change_state("W", globcfg.threadNumber,
                                      self.gui.nowhere, self.gui.scheduling)
                globcfg.waitingList.append(
                    Writer.Writer(self.book, self.lock, globcfg.threadNumber,
                                  self.gui))  #new Writer
            globcfg.threadNumber += 1
Beispiel #20
0
 def call(self, *arg, **kw):
     if debugCall: print "Call:", arg, kw
     id = thread.get_ident()
     if id not in self.readlocks:
         if debugThreads: print "Call: Making readlock for", id
         self.readlocks[id] = threading.Condition()
     if debugThreads: print "Call: Locking readlock for", id
     self.readlocks[id].acquire()
     if debugThreads: print "Call: Locked readlock for", id
     if self.err is not None:
         if debugThreads: print "Call: Releasing readlock for", id
         self.readlocks[id].release()
         if debugThreads: print "Call: Released readlock for", id
         raise self.err
     try:
         self.write(('RPCCall', id, arg, kw))
         while self.err is None and id not in self.res:
             if debugThreads: print "Call: Waiting on readlock for", id
             self.readlocks[id].wait()
             if debugThreads: print "Call: Waiting on readlock for", id, "done"
         res = self.res[id]
         del self.res[id]
         err = self.err
     finally:
         if debugThreads: print "Call: Releasing readlock for", id
         self.readlocks[id].release()
         if debugThreads: print "Call: Released readlock for", id
     if err:
         raise err
     res = Reader.extend(res, self.extension.parse)
     if debugResult: print "Result:", res
     return res
Beispiel #21
0
def RemoveFromList(ListSize):
    MsgToReturn = ""
    if ListSize > 1:
        IndexToRemove = 0
        while True:
            ItemToRemove = r.intReader("POSICION DE ELEMENTO A REMOVER: ",
                                       "VALOR NO VALIDO")
            for x in range(ListSize):
                if ItemToRemove == ListIndexes[
                        x] and ItemToRemove > 100 and ItemToRemove <= 200:
                    if (x + 1) < ListSize and ItemToRemove == ListIndexes[x +
                                                                          1]:
                        IndexToRemove = x + 1
                        break
                    else:
                        IndexToRemove = x
                        break
            if IndexToRemove > 0:
                break
            else:
                print("ID DE POSICION FUERA DE RANGO")
        # deletes items requested on IndexToRemove at lists
        ListStorage.pop(IndexToRemove)
        ListIndexes.pop(IndexToRemove)
    else:
        MsgToReturn = "     UNDERFLOW ERROR"
    # Returns string
    return MsgToReturn
Beispiel #22
0
def main():
    """This is the main function for the program, input the selected method you want to run"""
    programchoose = input(
        "Type 1 for single day, variable, hmo analysis, 2 for average across HMO analysis and 3 for average across timeslot analysis"
    )
    if (programchoose == 1):
        ch.chooseafile()
    elif (programchoose == 2):
        dc.findDaysInCommon()
    elif (programchoose == 3):
        rd.main()
    else:

        return 0

    return
Beispiel #23
0
class SavingReader(Reader.Reader):
    def __init__(self, context, url, *args, **kw):
        self.__filename = kw['filename']
        del kw['filename']
        apply(Reader.Reader.__init__, (self, context, '') + args, kw)
        context.rmreader(self)
        self.url = url
        self.restart(url)

    def handle_meta(self, errcode, errmsg, headers):
        if not self.handle_meta_prelim(errcode, errmsg, headers):
            return
        # now save:
        self.stop()
        try:
            self.save_file = open(self.__filename, "wb")
        except IOError, msg:
            self.context.error_dialog(IOError, msg)
            return
        #
        # add to history without destroying any title already known:
        #
        history = grailutil.get_grailapp().global_history
        title, when = history.lookup_url(self.url)
        history.remember_url(self.url, title or '')
        #
        Reader.TransferDisplay(self.last_context, self.__filename, self)
Beispiel #24
0
def GlobalLoop(pathparking,pathdemand):

    [stamps,parking]=R.datareader(pathparking,pathdemand)
    robots=SU.setuprobots(2,stamps[0])
    customers=SU.Get_customers(pathdemand)

    # print(stamps)
    # print(customers)

    for tf in stamps:
        # print("tf")
        typeaction = GI.CheckTypeAction(customers, tf)
        asignedspot=SS.asignswapspot(parking)
        target = GI.GetCustomerId(customers,tf)

        # print(typeaction)
        if(typeaction):
            parking[asignedspot]=target
            print("depose")
            place=SF.Findplace(parking, stamps, customers,tf)
            neworder=MO.Task(asignedspot,place,tf,target)
            GO.giveorder(robots,neworder)
            parking[asignedspot]="none"
            parking[place]=target

        elif(typeaction == False):
            print("retrieve")
            place=GI.Retrievelocation(parking,target)
            print(place)
            print("asingnedSPOT IS",asignedspot)
            print("PLACEIS",place)
            EC.extractcar(customers,asignedspot,parking,robots,place,tf,stamps,target)
            parking[asignedspot]= target
            parking[place]="none"
Beispiel #25
0
def grep(inputFn, outputFn, pattern):
    file = r.readAsmCode(inputFn)
    out = open(outputFn, "w")
    for row in file:
        if pattern in row:
            out.write(row)
            out.write("\n")
    out.close()     
Beispiel #26
0
def eval_ast(ast, env_):
    _type = Reader.getType(ast)
    if _type == "symbol":
        return env_.get(ast)
    if _type == "list":  # call eval on all using list comprehension
        return [EVAL(element, env_) for element in ast]
    else:
        return ast
Beispiel #27
0
def signup():
    previousCredentials = Reader.read_file('credentials.txt')
    if request.method == "GET":
        return render_template('signup.html')
    elif request.method == "POST":
        new_user = request.form['nuser']
        new_pswd = request.form['npswd']
        new_credentials = '%s,%s' % (new_user, new_pswd)
        if new_user == '' or new_pswd == '' or new_user in previousCredentials or new_user.find(
                ',') != -1 or new_pswd.find(',') != -1:
            return render_template('form.html',
                                   error='Invalid signup credentials')
        else:
            Reader.write_file('credentials.txt', new_credentials, 'a')
            return render_template('form.html', error='Successfully signed up')
    else:
        return 'yo'
Beispiel #28
0
def merge_g(path):
    nx_graphs, _ = Reader.multi_readG(path)
    m_g = nx.Graph()
    for g in nx_graphs:
        m_g.add_nodes_from(g.nodes())
        m_g.add_edges_from(g.edges())

    pickle.dump(m_g, open(path+'\\'+'merged_graph.txtnx_graph.pickle', '+wb'))
Beispiel #29
0
def validate(username, password):
    if username=='' or password=='':
        return False
    creds=read.getCsvDict('./util/credentials.txt')
    if username in creds.keys() and creds[username][0]==password:
        return True
    else:
        return False
Beispiel #30
0
    def load_data(self):
        train_edges = []
        path_pk = "baselines.pkl"
        if os.path.exists(path_pk):
            print("The pkl file has existed!")
            with open(path_pk, 'rb') as f:
                (nx_graph, merge_graph, pos_edge_list, neg_edge_list,
                 nodes_attr) = pk.load(f)
        else:
            path = "Sampling_graph/Datasets_With_Attributes/" + os.path.basename(
                self.path) + ".graph"
            nx_graph, merge_graph, pos_edge_list, neg_edge_list, nodes_attr = Reader.data_load(
                path)
            with open(path_pk, 'wb') as f:
                pk.dump((nx_graph, merge_graph, pos_edge_list, neg_edge_list,
                         nodes_attr), f)
        # 对网络中的节点标签进行修改,需要进行排序
        test_edges, test_labels = get_selected_edges(pos_edge_list,
                                                     neg_edge_list)
        nodes = sorted(list(merge_graph.nodes()))
        if nodes[0] > 0:
            train_edges.extend([[i, e[0] - 1, e[1] - 1, 1]
                                for i in range(len(nx_graph))
                                for e in nx_graph[i].edges()])
            train_merge = nx.relabel_nodes(merge_graph, lambda x: int(x) - 1)
            train_nxgraph = [
                nx.relabel_nodes(g, lambda x: int(x) - 1) for g in nx_graph
            ]
            test_edges = [[e[0] - 1, e[1] - 1] for e in test_edges]
            nodes = list(train_merge.nodes())
        else:
            train_edges.extend([[i, e[0], e[1], 1]
                                for i in range(len(nx_graph))
                                for e in nx_graph[i].edges()])
            train_nxgraph = copy.deepcopy(nx_graph)
            train_merge = copy.deepcopy(merge_graph)

        # 有的节点编号并不是连续的,下面语句是为了使节点的编号连续
        if isinstance(test_edges[0], list):
            restru_test_edges = [(e[0], e[1]) for e in test_edges]
        else:
            restru_test_edges = [(nodes.index(e[0]), nodes.index(e[1]))
                                 for e in test_edges]
        str_graph = nx.relabel_nodes(train_merge, lambda x: str(x))

        # 下面操作的是opennet定义的网络,为了使用现有的单层网络算法做对比
        G = opgraph.Graph()
        DG = str_graph.to_directed()
        G.read_g(DG)
        nx_para_graph = []
        for g in train_nxgraph:
            str_graph = nx.relabel_nodes(g, lambda x: str(x))
            G = opgraph.Graph()
            DG = str_graph.to_directed()
            G.read_g(DG)
            nx_para_graph.append(G)

        return train_nxgraph, restru_test_edges, train_merge, test_edges, train_edges, test_labels
Beispiel #31
0
 def read_data(self, file_name):
     """
     将读取的数据保存在array中
     :return:
     """
     data = Reader.read_data(file_name)
     for i in range(self.n):
         self.x[i] = np.array(data[i][:15])
         self.y[i] = np.array(data[i][15:])
Beispiel #32
0
def run():
    data = [0]
    for number in Reader.read("input"):
        data.append(int(number))

    data = sorted(data)
    target = data[-1] + 3

    return trace(len(data), target, data)
Beispiel #33
0
def getCallNames(fn):
    text = r.readAsmCode(fn)
    callDict = {}

    for row in text:
        if "(" in row:
            callName = row.split("__stdcall")[1].strip().split("(")[0]
            callDict[callName] = callDict.get(callName, 0) + 1
    return callDict
Beispiel #34
0
    def __init__(self):
        '''controls and connects everything'''
        self.Instructions_memory = Reader.CardReader()
        print("Ingresa nombre del codigo (sin el .code)")
        file_name = input()
        data = [""] * 8  #creating data

        self.Instructions_memory.change_file(file_name)

        self.ALU = ALU.ALU()
        self.ram = RAM()
        self.registerA = Registers(data)
        self.registerB = Registers(data)
        self.registerC = Registers(data)
        self.registerD = Registers(data)

        self.instruction_register = Registers(
            data, 8)  #shows the selected data that was located in RAM

        self.output_register = Registers(data)

        data_bios = Reader.YamlReader().yaml_loader()
        ### variables to assign to CPU parts
        ramdata = data_bios.get('RAM_NUMBERS')
        ramdata = ramdata.split(' ')
        clockdata = data_bios.get('clock')
        self.clock_time = int(clockdata)
        visualizationdata = data_bios.get('visualization')
        ##asigns all ram data to each ram space
        for i in range(16):
            adress = bin(i)[2:]
            if len(adress) < 4:
                if len(adress) == 1:
                    adress = "000" + adress
                if len(adress) == 2:
                    adress = "00" + adress
                if len(adress) == 3:
                    adress = "0" + adress
            self.ram.write_enable(adress, ramdata[i])
        self.visualization_code = visualizationdata['code']
        self.visualization_ram = visualizationdata['RAM']
        self.visualization_registers = visualizationdata['Registers']
        self.visualization_clock = visualizationdata['clock']
        self.visualization_alu = visualizationdata['ALU']
Beispiel #35
0
def eval_ast(ast, env_):
    if callable(ast):  # if it's a function just return it
        return ast
    _type = Reader.getType(ast)
    if _type == "symbol":
        return env_.get(ast)
    if _type == "list":  # call eval on all using list comprehension
        return [EVAL(element, env_) for element in ast]
    else:
        return ast  # literal value
def getFunctionNames(fn):
    text = r.readAsmCode(fn)
    funcDict = {}

    for row in text:
        if "PRESS" in row:
            funcName = row[row.index("FUNCTION"):row.index("PRESS")].split(
                ".")[0].split(" ")[-1]
            funcDict[funcName] = funcDict.get(funcName, 0) + 1
    return funcDict
def parse_text_plain(*args, **kw):
    headers = args[0].context.get_headers()
    ctype = headers.get('content-type')
    if ctype:
        ctype, opts = grailutil.conv_mimetype(ctype)
        if opts.get('format'):
            how = str.lower(opts['format'])
            if how == "flowed":
                from filetypes import FlowingText
                return FlowingText.FlowingTextParser(args, kw)
    return Reader.TextParser(args, kw)
def main(args):

    if '.h5' in args.mask_path:
        args.mask_path = args.mask_path.replace('.h5', '')

    reader = nd.Reader("", args.mask_path, args.image_path)

    LaunchInstanceSegmentation(reader, args.image_type, args.fov,
                               args.range_of_frames[0],
                               args.range_of_frames[1], args.threshold,
                               args.min_seed_dist, args.path_to_weights)
def generatePassports():
    current = {}
    for data in Reader.read("input"):
        if data == "":
            yield current
            current = {}
        else:
            for pair in data.split(" "):
                key, value = pair.split(":")
                current[key] = pair
    yield current
Beispiel #40
0
 def dispatchThread(msg, callindex):
     msg = Reader.extend(msg, self.extension.parse)
     if debugDispatch or profileDispatch: print "Dispatch " + str(callindex[0]) + ":", msg
     try:
         res = self.dispatch(*msg[1], **msg[2])
     except:
         exc_type, exc_value = sys.exc_info()[:2]
         e = exc_value or exc_type
         if Grimoire.Utils.isInstance(e, *debugExceptions) and not Grimoire.Utils.isInstance(e, *dontDebugExceptions):
             import traceback
             traceback.print_exc()
         res = Types.RaiseException()
     self.write(('RPCCallReturn', msg[0], res))
Beispiel #41
0
    def readerThread(self):
        unknown = Exception('Unknown exception in reader thread')
        reads = 0
        while 1:
            reads += 1
            try:
                try:
                    if profileRead:
                        print "Read:", reads
                        p = hotshot.Profile(profileRead + str(reads))
                        try:
                            msg = p.runcall(Reader.read, self.buffer)
                        finally:
                            p.close()
                    else:
                        msg = Reader.read(self.buffer)
                    self.err = None
                    if debugRead: print "Read", reads, ":", msg
                except:
                    self.err = sys.exc_value
                    raise sys.exc_type, sys.exc_value, sys.exc_traceback
                try:
                    if msg[0] == 'RPCCall':
                        th = threading.Thread(
                            target = self.dispatchThread,
                            args = (msg[1:],),
                            name = 'Dispatch handler for %s' % threading.currentThread().getName())
                        if debugThreads: print "Starting dispatch thread %s" % th
                        th.start()
                    elif msg[0] == 'RPCCallReturn':
                        self.res[msg[1]] = msg[2]
                        if debugThreads: print "Reader thread: Locking readlock", msg[1]
                        self.readlocks[msg[1]].acquire()
                        if debugThreads: print "Reader thread: Notifying readlock", msg[1]
                        self.readlocks[msg[1]].notify()
                        if debugThreads: print "Reader thread: Unlocking readlock", msg[1]
                        self.readlocks[msg[1]].release()
                        if debugThreads: print "Reader thread: Unlocked readlock", msg[1]
                    else:
                        raise IOError('Unknown rpc message type', msg)
                except:
                    self.err = sys.exc_value

            finally:
                if self.err:
                    if debugThreads: print "Reader thread: Notifying all readlocks"
                    for readlock in self.readlocks.values():
                        readlock.acquire()
                        readlock.notify()
                        readlock.release()
                    if debugThreads: print "Reader thread: Notifying all readlocks done"
Beispiel #42
0
def main():
    #host = "localhost"
    gui = GUI()
    port = int(textport)
    addr = (host, port)
    buf = 1024
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

    sock.connect(addr)
    #sock.send("C1200");
    r = Reader(sock, gui)
    print "are u sure?"
    r.start()
    print "did u block?"
    gui.root.mainloop()

    #write as well as read from socket.

    #while(1):
    #    k = raw_input()
    #    sock.send(k)

    sys.exit()
Beispiel #43
0
def changeSettings(old_user,new_username,new_password,gender,Countryin,Targetcountry):
    previousCredentials=read.getCsvDict('./util/credentials.txt')
    newCredentials=''
    if new_username=='':
        newCredentials+=old_user+','
    else:
        newCredentials+=new_username+','
    if new_password=='':
        newCredentials+=previousCredentials[old_user][0]+','
    else:
        newCredentials+=new_password+','
    if gender=='blank':
        newCredentials+=previousCredentials[old_user][1]+','
    else:
        newCredentials+=gender+','
    if Countryin=='':
        newCredentials+=previousCredentials[old_user][2]+','
    else:
        newCredentials+=Countryin+','
    if Targetcountry=='':
        newCredentials+=previousCredentials[old_user][3]
    else:
        newCredentials+=Targetcountry
    
    del previousCredentials[old_user]
    newCredentials=newCredentials.split(',')
    previousCredentials[newCredentials[0]]=newCredentials[1:]
    Credentials=''
    for i in previousCredentials.keys():
        Credentials+=i+','
        for u in previousCredentials[i]:
            Credentials+=u+','
        
        Credentials=Credentials[:-1]+'\n'
        
    read.write_file('./util/credentials.txt',Credentials,'w')
Beispiel #44
0
def log_in():
    request_data=request.form#Takes the immutable dictionary of the user's inputs and saves it in a variable
    if request.method=="GET":
        return render_template('form.html', error='Please enter from the form on the main page')#Tells the user to go through the main page if they entered directly
    elif request.method=="POST":#Verifies the username and password against constants at the top
        credentials=Reader.getCsvDict('credentials.txt')
        if request_data['user'] in credentials and request_data['user']!='':
            if request_data['pswd']==credentials[request_data['user']][0]:
                return 'success'
            else:
                return render_template('form.html', error='Invalid username or password')
        else:
            return render_template('form.html', error='Invalid username or password')
    else:
        return 'yo'
Beispiel #45
0
 def open(self):
     Assert(self.state == WAIT)
     resturl, method, params, data = self.args
     if data:
         Assert(method=="POST")
     else:
         Assert(method in ("GET", "POST"))
     if type(resturl) == type(()):
         host, selector = resturl    # For proxy interface
     else:
         host, selector = splithost(resturl)
     if not host:
         raise IOError, "no host specified in URL"
     i = string.find(host, '@')
     if i >= 0:
         user_passwd, host = host[:i], host[i+1:]
     else:
         user_passwd = None
     if user_passwd:
         import base64
         auth = string.strip(base64.encodestring(user_passwd))
     else:
         auth = None
     self.h = MyHTTP(host)
     self.h.putrequest(method, selector)
     self.h.putheader('User-agent', GRAILVERSION)
     if auth:
         self.h.putheader('Authorization', 'Basic %s' % auth)
     if not params.has_key('host'):
         self.h.putheader('Host', host)
     if not params.has_key('accept-encoding'):
         encodings = Reader.get_content_encodings()
         if encodings:
             encodings.sort()
             self.h.putheader(
                 'Accept-Encoding', string.join(encodings, ", "))
     for key, value in params.items():
         if key[:1] != '.':
             self.h.putheader(key, value)
     self.h.putheader('Accept', '*/*')
     self.h.endheaders()
     if data:
         self.h.send(data)
     self.readahead = ""
     self.state = META
     self.line1seen = 0
     if self.reader_callback:
         self.reader_callback()
Beispiel #46
0
def generate_chains( fname, key_size ):
    chains = {}
    text = reader.read_file( fname )
    
    words = text.split()
    i = 0
    while i < len(words) - key_size:
        key = ' '.join( words[i : i+key_size] ) 
        value = words[i + key_size]
        if key in chains:
            chains[ key ].append( value )
        else:
            new_list = []
            new_list.append( value )
            chains[ key ] = new_list
        i+= 1
    return chains
Beispiel #47
0
 def similiarity_of_texts(self, txt1, txt2):
     #print 'report for: ' + txt1
     #print 'report for: ' + txt2
     window = self._windows[0]
     method = self._methods[0]
     raw_txt_1 = Reader.readFromFile(txt1)
     raw_txt_2 = Reader.readFromFile(txt2)
     words_1 = Reader.extractWords(raw_txt_1, "russian")
     words_2 = Reader.extractWords(raw_txt_2, "russian")
     keywords_1 = Reader.meter(words_1)
     keywords_2 = Reader.meter(words_2)
     (gr_1) = self.get_graph_for(words_1, keywords_1, method, window)
     (gr_2) = self.get_graph_for(words_2, keywords_2, method, window)
     mesuare_terms = TermGraph.compare_graphs_terms(gr_1, gr_2)
     mesuare_edges = TermGraph.compare_graphs_edges(gr_1, gr_2)
     return (mesuare_terms, mesuare_edges)
Beispiel #48
0
    def get_category_top_tf(self, categories, cat_test, mode='rw_oc', window=3):
        if not self.classifier_tf_cache:
            gr_array = {}
            for category in categories:
                gr_array[category] = []

            for category in categories:
                files = Reader.batchReadReuters('training', [category])

                for file_name in files:
                    raw_txt = Reader.readFromFile('/home/dales3d/nltk_data/corpora/reuters/' + file_name)
                    words = Reader.extractWords(raw_txt)
                    keywords = Reader.meter(words)
                    if file_name not in self.graph_cache.keys():
                        gr = self.get_graph_for(words, keywords, mode, window)
                        self.graph_cache[file_name] = gr
                    else:
                        #print 'tf from cache: ', file_name
                        gr = self.graph_cache[file_name]
                    gr_array[category].append(gr)

            top_words_tf = set()
            gr_ver_arr = []
            for k in gr_array.keys():
                gr_ver_arr += gr_array[k]
            for gr in gr_ver_arr:
                top_tf = gr.getTopVerts("tf", 100)
                for w_tf in top_tf:
                    top_words_tf.add(w_tf)

            tf_dict = {}
            for v in top_words_tf:
                if v.term_value in tf_dict:
                    tf_dict[v.term_value] += v.term_weight_tf
                    #if v.term_weight_tf > tf_dict[v.term_value]:
                    #    tf_dict[v.term_value] += v.term_weight_tf
                else:
                    tf_dict[v.term_value] = v.term_weight_tf

            sort_tf = sorted(tf_dict.items(), key=lambda x: x[1], reverse=True)
            sort_tf = sort_tf[:1000]
            self.tf_features_cache = sort_tf

            training_set = []
            for category in categories:
                for gr in gr_array[category]:
                    top_tf = gr.getTopVerts("tf", 100)
                    features = {}
                    file_top_words = set()
                    for w_tf in top_tf:
                        file_top_words.add(w_tf.term_value)
                    for term in sort_tf:
                        features[term[0]] = (term[0] in file_top_words)
                    training_set.append((features, category))

            self.classifier_tf_cache = nltk.NaiveBayesClassifier.train(training_set)
        #from cache
        sort_tf = self.tf_features_cache
        #test
        #cat_test = "jobs"
        files = Reader.batchReadReuters('test', [cat_test])
        #gr_tests
        results = {}
        for category in categories:
            results[category] = 0
        for f in files:
            cats = reuters.categories(f)
            raw_txt = Reader.readFromFile('/home/dales3d/nltk_data/corpora/reuters/' + f)
            words = Reader.extractWords(raw_txt)
            keywords = Reader.meter(words)
            if f not in self.graph_cache.keys():
                gr = self.get_graph_for(words, keywords, mode, window)
                self.graph_cache[f] = gr
            else:
                gr = self.graph_cache[f]
            top_tf = gr.getTopVerts("tf", 1000)
            features = {}
            file_top_words = set()
            for w_tf in top_tf:
                file_top_words.add(w_tf.term_value)
            for term in sort_tf:
                features[term[0]] = (term[0] in file_top_words)
            result = self.classifier_tf_cache.classify(features)
            if result in cats:
                results[cat_test] += 1
            else:
                results[result] += 1
        print results
        sum = 0
        for cat_key in results.keys():
            sum += results[cat_key]
        print float(results[cat_test])/sum * 100
    scheight_mm = float(w.winfo_screenmmheight())
    scwidth_mm = float(w.winfo_screenmmwidth())
    vert_pixels_per_in = scheight / (scheight_mm / 25)
    horiz_pixels_per_in = scwidth / (scwidth_mm / 25)
    result = (72.0 / horiz_pixels_per_in), (72.0 / vert_pixels_per_in)
##     print "scaling adjustments:", result
    return result


def PrintDialog(context, url, title):
    try:
        infp = context.app.open_url_simple(url)
    except IOError, msg:
        context.error_dialog(IOError, msg)
        return
    content_encoding, transfer_encoding = Reader.get_encodings(infp.info())
    try:
        ctype = infp.info()['content-type']
    except KeyError:
        ctype, encoding = context.app.guess_type(url)
        if not content_encoding:
            content_encoding = encoding
    if not ctype:
        MaybePrintDialog(context, url, title, infp)
        return
    if not Reader.support_encodings(content_encoding, transfer_encoding):
        # create an alert of some sort....
        return
    ctype, ctype_params = grailutil.conv_mimetype(ctype)
    mod = context.app.find_type_extension("printing.filetypes", ctype)
    if ctype != "application/postscript" and not mod.parse:
from random import random
import Reader


Dict=Reader.getCsvDict('data/wordlist.csv')
Subjects=Dict['nouns']
Verbs=Dict['verbs']
Adjectives=Dict['adjectives']
Adverbs=Dict['adverbs']
Objects=Subjects



"""
lines=Reader.getCsvList('wordlist.csv')

Subjects=lines[0]
Subjects.pop(0)
Verbs=lines[1]
Verbs.pop(0)
Adjectives=lines[2]
Adjectives.pop(0)
Adverbs=lines[3]
Adverbs.pop(0)
Objects=Subjects
"""



#Subjects=["Stanley","Lev","Spencer","Josh","Michael","Topher","JonAlf","Chris","Abdullah","Zane","Justin"]
#Verbs=["walks","runs","strolls","saunters","travels","drives","presents","explains","projects"]
Beispiel #51
0
Dates = [str(c) for c in Dates]
DateTime = []
for date in Dates:
	for time in searchTimes:
		print int(date[0:4]),int(date[4:6]),int(date[6:8])
		DateTime.append(pandas.datetime(int(date[0:4]),int(date[4:6]),int(date[6:8]),int(time[0][0:2]),0,0))

print DateTime

clusters = range(668)               #import this from files and user input
times = [12,24,48,72]
analyses = ['sd','cusum']
filters = ['gaussian','uniform']

userin1 = raw_input("Input the file location of the data: ")
data = rd.readFile(userin1)
userin2 = raw_input("Where do you want to put the new data? ")

count = 0
for t1 in times:
	for t2 in times:
		for f1 in filters:
			for f2 in filters:
				with open(userin2 + '/Cluster0011_'+f1+'_'+f2+'_'+str(t1)+'_'+str(t2)+'.csv','wb') as csvfile:
					sd = [str(DateTime[x]) for x in data[11][t1][f1][analyses[0]]] #sd list
					cu = [str(DateTime[x]) for x in data[11][t1][f1][analyses[1]]] #cusum list
					
					count += len(sd)+len(cu)
					csvwriter = csv.writer(csvfile,delimiter=',')
					csvwriter.writerow(['SD']+sd)
					csvwriter.writerow(['Cusum']+cu)
def set_only_dm_data(clusters_file, clusters, dm_file, center_param, min_age, max_age, start, finish, min_number_of_total_tests):
    global patients
    Reader.read_clusters(clusters_file, clusters)
    patients = Reader.read_dm_compensation_t90(dm_file, center_param, min_age, max_age, start, finish,
                                               min_number_of_total_tests)
 def wrap_parser(self, parser):
     # handle the content-encoding and content-transfer-encoding headers
     headers = self.infp.info()
     content_encoding, transfer_encoding = Reader.get_encodings(headers)
     return Reader.wrap_parser(parser, self.ctype,
                               content_encoding, transfer_encoding)
Beispiel #54
0
 def sendData(self,command):
     sock.send(command);
     r = Reader(sock)
     r.start()
Beispiel #55
0
    def get_category_graph(self, categories, cat_test, mode='rw_oc', window=3):
        gr_array = {}

        #print '--indexing--'
        for category in categories:
            files = Reader.batchReadReuters('training', [category])
            #print 'category: ', category
            big_cat_raw_txt = ''
            #print '1) read files: start'
            for file_name in files:
                big_cat_raw_txt += Reader.readFromFile('/home/dales3d/nltk_data/corpora/reuters/' + file_name)
            #print '1) read files: finished'
            #print '2) preprocess text: start'
            words = Reader.extractWords(big_cat_raw_txt)
            keywords = Reader.meter(words)
            #print '2) preprocess text: finished'
            #print '3) term weighting: start'
            if category in self.graph_cache.keys():
                gr = self.graph_cache[category]
            else:
                gr = self.get_graph_for(words, keywords, mode, window)
                self.graph_cache[category] = gr
            #print '3) term weighting: finished'
            gr_array[category] = gr

        files = Reader.batchReadReuters('test', [cat_test])
        #gr_tests
        results = {}
        results[''] = 0
        for category in categories:
            results[category] = 0
        for f in files:
            #print '---', f, '---'
            cats = reuters.categories(f)
            raw_txt = Reader.readFromFile('/home/dales3d/nltk_data/corpora/reuters/' + f)
            words = Reader.extractWords(raw_txt)
            keywords = Reader.meter(words)
            if f not in self.graph_cache.keys():
                gr = self.get_graph_for(words, keywords, mode, window)
                self.graph_cache[f] = gr
            else:
                gr = self.graph_cache[f]

            sim = {}
            max_res = 0.0
            max_cat = ''
            for category in categories:
                gr_1 = gr_array[category]
                gr_2 = gr
                #(mes_l, mes_r) = TermGraph.compare_graphs_terms_with_weight(gr_1, gr_2)
                mesuare_terms = TermGraph.compare_graphs_terms_with_weight(gr_1, gr_2)
                mesuare_edges = TermGraph.compare_graphs_edges(gr_1, gr_2)
                res = mesuare_terms * (1 + mesuare_edges)
                if max_res < res:
                    max_cat = category
                    max_res = res
                sim[category] = res

            if max_cat in cats:
                results[cat_test] += 1
                #print max_res
            else:
                results[max_cat] += 1
                #print max_res
                print 'no ok'
        print results
        sum = 0
        for cat_key in results.keys():
            sum += results[cat_key]
        print float(results[cat_test])/sum * 100
Beispiel #56
0
 def run(self):
     print "Supervisor, checking in! pid:", os.getpid()
     # If image directory does not exist yet, create it!
     config = Utility.getProjectConfig()
     imgdir = config.get('directories','imagedirectory')
     print imgdir
     if not os.path.isdir(imgdir):
         try:
             os.makedirs(imgdir)
         except OSError as exception:
             if exception.errno != errno.EEXIST:
                 raise
     guiProcess = Process(target = self.startGUI)
     guiProcess.start()
     # Establish whether we have stable internet
     stableInternetCounter = 0
     stableInternet = False  
     for i in range(Utility.STABLE_INTERNET_COUNT):
         if Utility.checkInternetConnection():
             stableInternetCounter += 1
     if (stableInternetCounter >= Utility.STABLE_INTERNET_COUNT):
         self.statusQueue.put(Utility.QMSG_IDLE)
         stableInternet = True
     else:
         self.statusQueue.put(Utility.QMSG_INTERNET_NO)
         stableInternet = False
     # Initialize with all images currently on camera
     self.statusQueue.put(Utility.QMSG_SCAN)
     initialScanFail = True
     try:
         Reader.camera_filenames_to_file(Utility.OLD_PICS_FILE_NAME)
         self.statusQueue.put(Utility.QMSG_SCAN_DONE)
         initialScanFail = False
     except:
         self.statusQueue.put(Utility.QMSG_SCAN_FAIL)
         initialScanFail = True
     time.sleep(Utility.POLL_TIME)
     handlerProcess = None
     handlerDelayed = False
     readerProcess = None
     while True:
         if not self.guiQueue.empty():
             job = self.guiQueue.get()
             if job == Utility.QMSG_START:
                 if initialScanFail:
                     try: # OH GOD PLEASE REFACTOR THIS DUPLICATED CODE !!!!!!!!!!!!!!
                         Reader.camera_filenames_to_file(Utility.OLD_PICS_FILE_NAME)
                         self.statusQueue.put(Utility.QMSG_SCAN_DONE)
                         initialScanFail = False
                     except:
                         self.statusQueue.put(Utility.QMSG_SCAN_FAIL)
                         initialScanFail = True
                     continue # cannot complete job as normal if no baseline scan
                     # REFACTOR THIS CODE, DON'T FORGET! try Supervisor.initialScan()
                 print "Supervisor handles Upload job here"
                 readerProcess = Process(target = self.startReader)
                 self.readerQueue.put(Utility.QMSG_SCAN)
                 self.statusQueue.put(Utility.QMSG_SCAN)
                 readerProcess.start()
                 # wait for reader to finish scanning
                 readerProcess.join()
                 scanMsg = Utility.readMessageQueue(self.readerQueue)
                 if scanMsg == Utility.QMSG_SCAN_FAIL:
                     self.statusQueue.put(Utility.QMSG_SCAN_FAIL)
                     scanMsg = 0 
                     continue # failed, tell GUI but ignore the rest of this job
                 elif scanMsg == Utility.QMSG_SCAN_DONE:
                     self.statusQueue.put(Utility.QMSG_SCAN_DONE)
                     scanMsg = 0
                 else:
                     print "Something went wrong with the ReaderMsgQueue!"
                 if stableInternet: # only start Handler if stable connection
                     handlerProcess = Process(target = self.startHandler)
                     self.handlerQueue.put(Utility.QMSG_HANDLE)
                     handlerProcess.start()
                     handlerDelayed = False
                 else:
                     time.sleep(Utility.POLL_TIME)
                     self.statusQueue.put(Utility.QMSG_INTERNET_NO)
                     handlerDelayed = True
             elif job == Utility.QMSG_SETTINGS:
                 print "Supervisor handles Settings job here if needed"
             else:
                 raise Exception('Supervisor.run:  unexpected object in queue')
         # endif self.guiQueue.empty()
         
         # Start upload if delayed and internet is now stable
         if handlerDelayed and stableInternet:
             handlerProcess = Process(target = self.startHandler)
             self.handlerQueue.put(Utility.QMSG_HANDLE)
             handlerProcess.start()
             handlerDelayed = False   
         
         time.sleep(Utility.POLL_TIME) # wait for handlerProcess to actually start
         if not self.handlerQueue.empty():
             handlerMsg = Utility.readMessageQueue(self.handlerQueue) 
             if handlerMsg == Utility.QMSG_UPLOAD:
                 self.statusQueue.put(Utility.QMSG_UPLOAD) 
             elif handlerMsg == Utility.QMSG_UPLOAD_DONE:
                 self.statusQueue.put(Utility.QMSG_UPLOAD_DONE)
             elif handlerMsg == Utility.QMSG_HANDLE_NONE:
                 self.statusQueue.put(Utility.QMSG_HANDLE_NONE)
             else:
                 self.statusQueue.put("Unknown Message from handlerQueue")
         
         # Check current internet connection, allowing for some fluctuation in results
         if Utility.checkInternetConnection():
             if stableInternetCounter < Utility.STABLE_INTERNET_COUNT:
                 stableInternetCounter += 1
             else:
                 stableInternet = True
                 self.statusQueue.put(Utility.QMSG_INTERNET_YES)
             #print 'DEBUG: checkInternetConnection() == True'
         else:
             if (stableInternetCounter > 0): # only count down to 0
                 stableInternetCounter -= 1 
             if stableInternetCounter < Utility.STABLE_INTERNET_COUNT/2:
                 stableInternet = False
                 self.statusQueue.put(Utility.QMSG_INTERNET_NO)
             #print 'DEBUG: checkInternetConnection() == False'
         print 'DEBUG: stableInternet:', stableInternet, 'stableInternetCounter:', stableInternetCounter    
     
         time.sleep(Utility.POLL_TIME)
Beispiel #57
0
	    	            #print 'word: ', word.encode('utf8')
	    		    if not stream.has_key(word):
	    		        stream[word] = 0
	    		    stream[word] += 1
	    	            new_word = w
	    		    word = ''
	    	        else:
	    		    break
	            else:
	    	        #print 'find: ', new_word.encode('utf8')
	    	        word = new_word
	    	        break
	    break
	return stream
	
		
if __name__ == '__main__':
    token = Tokenizer(Dictionary('lexdb.dat'))
    reader = Reader('.')
    for (filename, fullpath, filesize, create_time) in reader.txt_files(): 
	print filename
	stream = token.parse(fullpath)
	print len(stream)
	for key in stream.keys():
	    print '%s: %s' % (key, stream[key])
	print '-----------------------'