示例#1
0
def noLabelsExist():
    clusters = state.get('clusters')
    mustLabelModels = state.get('mustLabelModels')
    if len(mustLabelModels[NO_LABEL_STRING]):
        return True

    return (NO_LABEL_STRING in clusters) and len(clusters[NO_LABEL_STRING])
示例#2
0
def on_dh_B(contact, data):
    """
    Handler for the Diffie-Hellman B param.
    Ensures the state is correct and mixes.
    Creates the next level of crypto (aes-dht).
    """
    import state
    dh_crypto = contact.channels['bytelynx'].crypto

    # Extract the key
    dh_crypto.B = data['dh_B']
    shared = contact.channels['bytelynx'].crypto.key
    sha_func = SHAModes[KEY_SIZE * 8]
    s_hash = sha_func(shared.to_bytes(KEY_SIZE, 'little')).digest()

    # Create our AES crypto
    aes_crypto = contact.create_channel('aes-dht').crypto
    aes_crypto.set_key(s_hash)
    # print("AES key: %s" % aes_crypto.key)

    # Try to send our A, if needed
    try:
        state.get().net.send_data(contact, 'dh.mix', {'dh_B': dh_crypto.A})
    # Happens if we have already sent an A
    # Send a DHT hello
    except StateError:
        state.get().net.send_data(contact, 'dht.ping', {})
示例#3
0
def popModelsAndCheckQuery(modelStrings, query, lockR, lockW, validModels):
    tempDirPath = state.get('tempDirPath')
    tempFilePath = join(tempDirPath, uuid.uuid4().hex + '.las')
    totalNumOfModels = state.get('numOfInputModels')
    labelledModelIds = state.get('labelledModelIds')
    maxModelsAtOnce = MODELS_PER_PROC

    while True:
        currModels = list()
        lockR.acquire()
        if (not len(modelStrings)):
            lockR.release()
            return

        numOfModels = min(maxModelsAtOnce, len(modelStrings))
        # print(numOfModels)
        for idx in range(numOfModels):
            currModels.append(modelStrings.pop())
        # print(len(modelStrings))
        lockR.release()

        modelObjs = list(map(utils.computeModelObjFromModelStr, currModels))
        validCurrModels = getValidModels(modelObjs, query, tempFilePath)
        nonLabelledModels = list(
            filter(lambda m: m.modelId not in labelledModelIds,
                   validCurrModels))

        lockW.acquire()
        validModels.extend(nonLabelledModels)
        utils.printProgressBar(totalNumOfModels, numOfIterations=numOfModels)
        lockW.release()
示例#4
0
    def helpPopup():
        infoText = (
            'The following predicates may be used:\n\n' +
            ' * comp(V, compC), where compC is a constant component name\n')

        # This is because "are_connected" is in the default background knowledge
        # so it can be used not matter if it goes in the mode bias or not
        if "are_connected" not in state.get('relevantPatterns'):
            infoText += ' * ' + PATTERNS_SIGANTURES["are_connected"] + '\n'

        for pattern in state.get('relevantPatterns'):
            infoText += ' * ' + PATTERNS_SIGANTURES[pattern] + '\n'

        helpDialog = Toplevel()
        helpDialog.geometry("400x300")
        helpDialog.title("Help")

        helpFrame = Frame(helpDialog)
        helpFrame.pack(expand=True, fill=BOTH)
        helpFrame.grid_propagate(False)
        helpFrame.grid_rowconfigure(0, weight=1)
        helpFrame.grid_columnconfigure(0, weight=1)

        helpMsg = Message(helpFrame, text=infoText, width=390)
        helpMsg.pack()

        helpBtn = Button(helpDialog, text="Ok", command=helpDialog.destroy)
        helpBtn.pack(side=BOTTOM)

        noteText = ('\nNote: V, V0, V1, etc. denote component variables.\n')
        noteMsg = Message(helpDialog, text=noteText, width=390)
        noteMsg.pack(side=BOTTOM)
示例#5
0
def parseInputFile():
    print('* Parsing input file...\n')
    modelStrings = utils.getModelsStrings(state.get('inputFilePath'))
    state.set('numOfInputModels', len(modelStrings))

    # Note that we want to randomize order of models, regardless of whether
    # we select a subsample of MAX_RELEVANT_MODELS or we keep all samples
    # This helps after clustering is done, because we won't have to generate
    # a random index within each cluster, since the models in that cluster will
    # already be in random order after this step, so we can just pop the first/last one
    randomSampleSize = min(len(modelStrings), MAX_RELEVANT_MODELS)
    modelStrings = random.sample(modelStrings, randomSampleSize)

    models = computeAllModelObjects(modelStrings)
    state.set('sampleModelIds', utils.getModelIds(models))

    if not state.get('prenamedComponents'):
        compNames = set(
            list(map(lambda comp: comp.name, state.get('componentTypes'))))
        state.set('componentNames', compNames)

    print('* Setting up model selection algorithm...')
    compositionVectors = computeModelsCompositionVectors(models)
    labels = clusterModels(compositionVectors)

    return computeClusterToModelMapping(models, labels)
示例#6
0
def showExpectedLabelsDistribution():
    clusters = state.get('clusters')
    userLabelCounters = state.get('userLabelCounters')
    labels = state.get('labels')

    userLabels = list(filter(lambda l: userLabelCounters[l] > 0, labels))
    clusterLabels = list(clusters.keys())

    allLabels = list(set(userLabels + clusterLabels))

    values = list()
    for l in allLabels:
        val = 0

        if l in userLabels:
            val += userLabelCounters[l]
        if l in clusterLabels:
            val += len(clusters[l])

        values.append(val)

    title = 'Expected labels distribution with current hypotheses'
    # print(allLabels)
    # print(values)

    generatePieChart(allLabels, values, title=title)
示例#7
0
def setComponentName(comp, compIdToNameMap, prenamed, nameComponents):
    if (prenamed):
        if (comp.compId not in compIdToNameMap):
            raise ValueError('No name for ' + comp.compId + '!')
        comp.name = compIdToNameMap[comp.compId]
        state.get('componentNames').add(comp.name)

        return

    # if not prenamed:

    compTypes = state.get('componentTypes')
    try:
        idx = compTypes.index(comp)
        comp.name = compTypes[idx].name
    except ValueError:
        if nameComponents:
            generateCompDiagram(comp)
            comp.name = input(
                ' - Found new component, please provide a name for it: ')
            print()
        else:
            unnamedTypesCounter = state.get('unnamedTypesCounter')
            comp.name = 'type' + str(unnamedTypesCounter)
            state.set('unnamedTypesCounter', unnamedTypesCounter + 1)
        compTypes.append(comp)
示例#8
0
def multipleLabelsExist():
    clusters = state.get('clusters')
    mustLabelModels = state.get('mustLabelModels')
    if len(mustLabelModels[MULTIPLE_LABELS_STRING]):
        return True

    return (MULTIPLE_LABELS_STRING in clusters) and len(
        clusters[MULTIPLE_LABELS_STRING])
示例#9
0
def generateNegExample(model):
    noise = state.get('noise')
    eId = 'e' + str(len(
        state.get('labelledModelIds'))) + '@' + str(EXAMPLE_PENALTY)
    eStr = '#pos('
    if noise:
        eStr += eId + ', '
    return eStr + '{}, {' + ILASP_LABEL_STRING + '}, {' + generateContext(
        model) + '}).'
示例#10
0
def getRemainingModelsList():
    clusters = state.get('clusters')
    clusterKeys = list(clusters.keys())
    allModels = list()

    for ck in clusterKeys:
        allModels += clusters[ck]

    return allModels + state.get('skippedModels')
示例#11
0
def search_contact():
    hash_ = input("Enter the hash [rand]: ")
    if (not hash_):
        size = state.get().config['kademlia']['keysize'] // 8
        hash_ = Hash(os.urandom(size))
    else:
        hash_ = Hash(base64.b64decode(bytes(hash_, 'UTF-8')))
    s = state.get()
    s.kademlia.init_search(hash_)
示例#12
0
def search_contact():
    hash_ = input("Enter the hash [rand]: ")
    if not hash_:
        size = state.get().config["kademlia"]["keysize"] // 8
        hash_ = Hash(os.urandom(size))
    else:
        hash_ = Hash(base64.b64decode(bytes(hash_, "UTF-8")))
    s = state.get()
    s.kademlia.init_search(hash_)
示例#13
0
def on_dh_g(contact, data):
    """
    Handler for Diffie-Hellman g params.
    Ensures the state is correct and mixes.
    """
    import state
    crypto = contact.channels['bytelynx'].crypto
    crypto.g = data['dh_g']
    state.get().net.send_data(contact, 'dh.mix', {'dh_B': crypto.A})
示例#14
0
def runILASPCommands(labelsToUpdateHypotheses):
    utils.printTitle(
        'Please wait while the hypotheses are being computed, this might take a while.'
    )

    backGroundStr = utils.getBackgroundString()
    biasConstantsStr = utils.computeBiasConstants()
    genericBiasStr = utils.getBiasString().replace('$$CONSTANTS$$',
                                                   biasConstantsStr)
    outputs = {}
    lock = Lock()
    threads = list()

    for label in labelsToUpdateHypotheses:
        threads.append(
            Thread(target=runILASPCMDInThread,
                   args=(backGroundStr, genericBiasStr, label, outputs, lock),
                   daemon=True))

    for thread in threads:
        thread.start()

    for thread in threads:
        thread.join()

    try:
        utils.updateHypotheses(outputs)
        state.get('hypothesesToUpdate').clear()
    except ExitError as e:
        print(
            '** Error: No hypotheses covering ALL manual classifications were found.\n'
        )
        if state.get('noise'):
            # Technically, shouldn't be able to get here;
            # If we do, raise the error in order to fully exit
            raise e

        print('Would you like to:')
        print(
            "(1) Continue search for hypotheses with BEST coverage of manual classifications?"
        )
        print('(2) Exit?')
        while True:
            try:
                ans = int(input('Your answer (1/2): '))
                if (ans < 1 or ans > 2):
                    raise ValueError
                break
            except ValueError:
                continue
        if (ans == 2):
            raise e
        else:
            state.set('noise', True)
            utils.noisifyExamplesFiles()
            runILASPCommands(state.get('labels'))
示例#15
0
def printClusters():
    clusters = state.get('clusters')
    clusterWeights = state.get('clusterWeights')
    clusterKeys = list(clusters.keys())

    print('\nClusters:')
    for ck in clusterKeys:
        print(
            str(ck) + ': ' + str(list(map(lambda m: m.labels, clusters[ck]))))
        print(str(ck) + ' weight: ' + str(clusterWeights[ck]))
    print()
示例#16
0
def computeLabelsForModelObj(modelObj, tempFilePath='', forceCompute=False):
    labelsForModel = list()

    if (state.get('labelPredictionsUpdated') and not forceCompute):
        labelsForModel = modelObj.labels
    else:
        tempFilePath = tempFilePath or join(state.get('tempDirPath'),
                                            uuid.uuid4().hex + '.las')
        labelPredsForModel = computeLabelPredsForModels([modelObj],
                                                        tempFilePath)
        labelsForModel = list(map(getLabelFromLabelPred, labelPredsForModel))

    return labelsForModel
示例#17
0
def computeAllModelObjects(models):
    allModels = list()
    utils.initProgressBar()
    canPrintProgressBar = state.get('prenamedComponents') or (
        not state.get('nameComponents'))

    for modelStr in models:
        newModelObj = utils.computeModelObjFromModelStr(modelStr)
        allModels.append(newModelObj)
        if canPrintProgressBar:
            utils.printProgressBar(len(models))

    return allModels
示例#18
0
def removeModelFromLists(mId):
    clusters = state.get('clusters')
    noLabelMusts = state.get('mustLabelModels')[NO_LABEL_STRING]
    multipleLabelsMusts = state.get('mustLabelModels')[MULTIPLE_LABELS_STRING]
    queryCache = state.get('queryCache')
    skippedModels = state.get('skippedModels')

    for ck in list(clusters.keys()):
        removeModelFromList(mId, clusters[ck])
    removeModelFromList(mId, noLabelMusts)
    removeModelFromList(mId, multipleLabelsMusts)
    removeModelFromList(mId, queryCache)
    removeModelFromList(mId, skippedModels)
示例#19
0
 def on_channel_finalization(self, channel):
     # If we had messages waiting on the creation of this channel
     if len(self.sent_msg_queue[channel.mode]) > 0:
         import state
         net = state.get().net
         for msg_name, data in self.sent_msg_queue[channel.mode]:
             net.send_data(self, msg_name, data)
     if len(self.recv_msg_queue[channel.mode]) > 0:
         import state
         net = state.get().net
         for addr, raw_data in self.recv_msg_queue[channel.mode]:
             net.on_data(addr, raw_data)
     self.channel_finalization(channel.mode)
示例#20
0
 def on_channel_finalization(self, channel):
     # If we had messages waiting on the creation of this channel
     if len(self.sent_msg_queue[channel.mode]) > 0:
         import state
         net = state.get().net
         for msg_name, data in self.sent_msg_queue[channel.mode]:
             net.send_data(self, msg_name, data)
     if len(self.recv_msg_queue[channel.mode]) > 0:
         import state
         net = state.get().net
         for addr, raw_data in self.recv_msg_queue[channel.mode]:
             net.on_data(addr, raw_data)
     self.channel_finalization(channel.mode)
示例#21
0
def printHypotheses():
    hypotheses = state.get('hypotheses')
    labels = state.get('labels')

    for labelId in range(len(labels)):
        label = labels[labelId]
        print("* Hypothesis for label '" + label + "':\n")
        if label in hypotheses.keys():
            hyp = hypotheses[label].replace(ILASP_LABEL_STRING, label)
            print(hyp)
        printSepLine(50)

        if labelId < (len(labels) - 1):
            print()
示例#22
0
def go_to_sleep():
    """
    Put the device to sleep for a specified number of seconds
    """
    wait_time = (clock_controller.get_time() - state.get('last_spray_time')) % 60
    # if more than 60 seconds to wait for interval,
    # and wait_time smaller than 20 seconds. Extend wait time
    next_spray_time = state.get('last_spray_time') + state.get('interval')
    next_wake_time = clock_controller.get_time() + wait_time
    if wait_time < 20 and  next_wake_time + 60 < next_spray_time:
        wait_time += 60
    nucleo_controller.reset_clock_time(wait_time)
    # it should die after reaching here
    while True: # spin lock to allow time for the nucleo to kill this processor
        pass
示例#23
0
def preProcessingFunc():
    numOfArgs = len(sys.argv)
    mainScriptPath = sys.argv[0]

    try:
        inputFilePath = sys.argv[numOfArgs - 1]
    except IndexError:
        raise RuntimeError('No file with models provided!')

    utils.setParamsFromArgs(sys.argv[1:(numOfArgs - 1)])

    state.set('mainScriptPath', utils.getAbsPath(mainScriptPath))
    state.set('inputFilePath', utils.getAbsPath(inputFilePath))
    state.set('outputFilePath', utils.computeClassFilePath())

    utils.printTitle('Pre-processing of given file is about to begin.')
    tempDirPath = utils.createTempDirectory(mainScriptPath)
    state.set('tempDirPath', tempDirPath)

    if (not state.get('prenamedComponents')):
        nameComponentsInput = None
        while (nameComponentsInput != 'y' and nameComponentsInput != 'n'):
            nameComponentsInput = input(
                'Would you like to ' +
                'name the components for more human-readable class hypotheses? '
                + '(y/n) ').lower()
            print()
        state.set('nameComponents', nameComponentsInput == 'y')

    clustersMap = preProcessing.parseInputFile()
    state.set('clusters', clustersMap)
    utils.initClusterWeights()

    utils.printTitle('Pre-processing of file complete!')

    while not len(state.get('relevantPatterns')):
        setRelevantPatterns()

    labels = utils.getAllLabelsFromUser()
    labelExamplesPaths = utils.createLabelExampleFiles(labels)

    state.set('labels', labels)
    state.set('labelExamplesPaths', labelExamplesPaths)
    utils.initUserLabelCounters()

    utils.setDefaultQuery()

    utils.printTitle('Thank you, classification process will now begin.')
示例#24
0
def checkQueryCorrectness(query):
    queryFilePath = join(state.get('tempDirPath'), QUERY_FILE_NAME)
    file = open(queryFilePath, 'w')
    file.write(query)
    file.close()

    clingoCmd = list(GENERIC_CLINGO_CMD)
    clingoCmd.append(queryFilePath)
    out, err = Popen(clingoCmd,
                     stdout=PIPE,
                     stderr=PIPE,
                     universal_newlines=True).communicate()

    if 'ERROR' in err:
        print('\n*** ALERT: Syntax error in query.')
        return False

    queryRules = getQueryRules(query)
    spacedRules = list(map(addSpaces, queryRules))
    for idx in range(len(queryRules)):
        rule = queryRules[idx]
        spacedRule = spacedRules[idx]

        if ':-' not in rule:
            print("\n*** ALERT: No rules without body allowed: '" + rule + "'")
            return False

        i = rule.index(':-')
        if (' ' + QUERY_KEY_HEAD + ' ') in spacedRule[i:]:
            print("\n*** ALERT: '" + QUERY_KEY_HEAD +
                  "' not allowed in rule body: '" + rule + "'")
            return False

    return True
示例#25
0
def checkAndRecluster():
    if (state.get('labelPredictionsUpdated')):
        return

    allModels = getRemainingModelsList()
    allModelsNum = len(allModels)
    lockR = Lock()
    lockW = Lock()
    newClusters = {}
    initProgressBar()

    threads = list()
    for tIdx in range(CLASSIFICATION_THREADS):
        threads.append(
            Thread(target=computeLabelsForNewModels,
                   args=(allModels, newClusters, allModelsNum, lockR, lockW),
                   daemon=True))

    printTitle('Recalibrating algorithm with new hypotheses, please wait.')

    for thread in threads:
        thread.start()

    for thread in threads:
        thread.join()

    updateClusters(newClusters)
    state.set('skippedModels', [])
    state.set('labelPredictionsUpdated', True)
    showExpectedLabelsDistribution()
示例#26
0
def computeClassFilePath():
    inputFileAbsPath = state.get('inputFilePath')
    fileName = basename(inputFileAbsPath).split('.', 1)[0]
    fileDir = dirname(inputFileAbsPath)
    rand = uuid.uuid4().hex[:5]

    return join(fileDir, fileName + '_classified_' + rand + '.pl')
示例#27
0
def generateCompDiagram(comp):
    tempDirPath = state.get('tempDirPath')
    filepath = join(tempDirPath, uuid.uuid4().hex + '.gv')
    indexTracker = {}
    g = Graph('G', filename=filepath)
    g.format = 'png'

    with g.subgraph(name='clusterComponent') as c:
        for group in comp.groups:
            with c.subgraph(name='cluster' + uuid.uuid4().hex) as a:
                a.attr(color='blue')
                for port in group.ports:
                    name = port.portType
                    idx = 0
                    if not (name in indexTracker):
                        indexTracker[name] = 1
                    else:
                        idx = indexTracker[name]
                        indexTracker[name] += 1
                    if (port.direction == 'in'):
                        a.node_attr.update(color='green', style='filled')
                    elif (port.direction == 'out'):
                        a.node_attr.update(color='red', style='filled')
                    else:
                        a.node_attr.update(color='orange')
                    a.node(name + '_' + str(idx), label=name, style='filled')

    g.render()
    Popen(["xdg-open", filepath + '.png'])
示例#28
0
def computeLabelPredsForModels(models, tempFilePath):
    labels = state.get('labels')
    classifProg = getBackgroundString()
    hypothesesStr = computeHypothesesString()

    for modelObj in models:
        classifProg += generateModelString(modelObj) + '\n\n'

    classifProg += hypothesesStr
    classifProg += '#show label/2.'

    file = open(tempFilePath, 'w')
    file.write(classifProg)
    file.close()

    clingoCmd = list(GENERIC_CLINGO_CMD)
    clingoCmd.append(tempFilePath)

    out, err = Popen(clingoCmd,
                     stdout=PIPE,
                     stderr=PIPE,
                     universal_newlines=True).communicate()
    # raise RuntimeError only if actual error, not warnings, have occured
    if 'ERROR' in err:
        raise RuntimeError('Error encountered while classifying models.')

    labelledModels = list(filter(lambda w: w.startswith('label'), out.split()))

    return labelledModels
示例#29
0
def runILASPCMDInThread(backGroundStr, genericBiasStr, label, outputs, lock):
    ILASPFileStr = backGroundStr

    examplesStr = utils.getExamplesString(label)
    ILASPFileStr += examplesStr

    biasStr = genericBiasStr.replace('$$LABEL$$', ILASP_LABEL_STRING)
    ILASPFileStr += biasStr

    programPath = utils.createILASPProgram(label, ILASPFileStr)

    if (state.get('noise')):
        ILASPCmd = list(GENERIC_ILASP3_CMD)
    else:
        ILASPCmd = list(GENERIC_ILASP2i_CMD)
    ILASPCmd.append(programPath)

    out, err = Popen(ILASPCmd, stdout=PIPE,
                     universal_newlines=True).communicate()
    if err:
        raise RuntimeError('Error encountered while generating hypotheses.')

    lock.acquire()
    outputs[label] = out
    lock.release()
示例#30
0
def updateCache(query):
    genericQuery = computeGenericQuery(query)
    modelStrings = utils.getModelsStrings(state.get('inputFilePath'))
    utils.initProgressBar()
    numOfThreads = CLASSIFICATION_THREADS
    lockR = Lock()
    lockW = Lock()

    # validModels will contain all models 'valid' in the sense that they comply with
    # the query constraints, and are also not among the already labelled models
    validModels = list()

    threads = list()
    for tIdx in range(numOfThreads):
        threads.append(
            Thread(target=popModelsAndCheckQuery,
                   args=(modelStrings, genericQuery, lockR, lockW,
                         validModels),
                   daemon=True))
    utils.printTitle(
        'Searching for complying models, this might take a while.')
    for thread in threads:
        thread.start()

    for thread in threads:
        thread.join()

    randomSampleSize = min(len(validModels), QUERY_CACHE_SIZE)
    cache = random.sample(validModels, randomSampleSize)
    state.set('queryCache', cache)
    state.set('ranAQuery', True)

    # Update preQuery to have it appear by default in the query editor
    # next time the user wants to use it
    state.set('prevQuery', query)
示例#31
0
def check_to_spray():
    """
    See if server requested spray or past spray interval time
    """
    # check if within 10 seconds of next spray time
    if clock_controller.get_time() + 10 >= state.get('last_spray_time') + state.get('interval'):
        motor_driver.spray()
        state.update(dict(
            last_spray_time=clock_controller.get_time(),
            spray_now=False
        ))
    elif state.get('spray_now'):
        motor_driver.spray()
        state.update(dict(
            spray_now=False,
        ))
    return 'SLEEP'
示例#32
0
def getBlankLabelsCounter():
    labels = state.get('labels')
    counter = {NO_LABEL_STRING: 0, MULTIPLE_LABELS_STRING: 0}

    for label in labels:
        counter[label] = 0

    return counter
示例#33
0
def print_contacts():
    s = state.get()
    for contact in s.net._contacts.net_contacts:
        print(contact)
        print("Last Seen: %s" % contact.last_seen)
        print("Liveliness: %s" % contact.liveliness)
        print("Ping: %s" % contact.ping)
        print("Channels")
        for name, channel in contact.channels.items():
            print("\t%s: Ct:%s ID:%s" % (name, len(channel.packets), channel._pkt_id))
        print("-" * 20)
示例#34
0
def on_hello(contact, data):
    """
    Handler for client hellos.
    Adds the client hash to the contact object.

    If the hash already exists, initiates a DH exchange.
    """
    import state
    s = state.get()
    crypto = contact.channels['bytelynx'].crypto
    if contact.set_hash(data['hash']):
        s.net.send_data(contact, 'hello', {'hash': s.contact.hash})
    elif crypto.is_free:
        s.net.send_data(contact, 'dh.g', {'dh_g': crypto.g})
示例#35
0
def add_contact():
    s = state.get()
    ip = input("IP Address [127.0.0.1]: ")
    if not ip:
        ip = "127.0.0.1"
    port = int(input("Port: "))
    contact = s.net._contacts.translate(Address(ip, port))
    s.net.send_data(contact, "hello", {"hash": s.contact.hash})

    # Make it so we seed once we join the DHT
    def start_search(contact):
        s.kademlia.buckets.on_added -= start_search
        s.kademlia.init_search(s.contact.hash)

    s.kademlia.buckets.on_added += start_search
示例#36
0
    def send_data(self, contact, msg_name, data):
        """
        Sends packet to a given contact.

        :param contact: The contact to send data to.
        :type contact: :class:`~common.contact`
        :param msg_name: The unique name of the message.
        :type msg_name: str.
        :param data: The data to send.
        :type data: dict.
        """
        msg = self.protocol.messages[msg_name]
        Logger.debug("-> %s: %s" % (contact.address, msg))
        # Get a packet id for this message
        try:
            channel = contact.channels[msg.mode]
        # If we have not extablished a channel yet
        except KeyError as e:
            if msg.mode == 'aes-dht':
                Logger.info("Establishing AES-DHT channel with %s" % contact.address)
                contact.add_sent_msg(msg.mode, msg_name, data)
                # Send a 'Hello'
                import state
                s = state.get()
                self.send_data(contact, 'hello', {'hash': s.contact.hash})
            else:
                raise e
        else:
            pkt_id = channel.pkt_id
            data[Tags.pktid.value] = pkt_id

            # Encode data
            payload = msg.encode(contact, data)

            # Send data to a contact
            self._server.send(contact.address, payload)

            # Check if this message is sent 'reliably'
            if msg.is_pongable:
                pkt = SentPacket(pkt_id, payload, contact, channel)
                channel.packets[pkt_id] = pkt
                self.watcher.add_packet(pkt)
def get_canonical_url(url):

	# Check if the result is cached in the state
	hash_key = md5.new(url).hexdigest()
	result = yield state.get("canonical_url_%s" % hash_key)
	if result is not None:
		raise gen.Return(result)

	sub_process = process.Subprocess(
		args= "curl -sIL '%s' | grep -i ^location:" % url,
		shell= True,
		stdout= process.Subprocess.STREAM,
		stderr= process.Subprocess.STREAM)
	
	result, error = yield [
        gen.Task(sub_process.stdout.read_until_close),
        gen.Task(sub_process.stderr.read_until_close)
        ]
	logger.info("Subprocess result: " + str(result))
	logger.info("Subprocess error: " + str(error))

	result = result.splitlines()
	if len(result) >= 1:
		result = result[-1]
		result = re.match("location:\s*(.*)$", result, re.IGNORECASE)
		if result and result.groups()[0]:
			result = result.groups()[0]
		else:
			result = url
	else:
		# Either the url is canonical or we are broken
		result = url

	# Cache the result
	if result is not None:
		state.set("canonical_url_%s" % hash_key, result)

	raise gen.Return(result)
示例#38
0
文件: state_test.py 项目: Xe/code
import noddy
import state

print("1 testing state")

state.set("foo", "bar")
print("  ", state.get("foo"))
print("  ", state.get("bar"))

print("   pass")

print("2 test noddy")

n = noddy.Noddy("Rick", "James", 5)
print("  ", n.first, n.last, n.number)
print("  ", n.name())

print("   pass")

print(dir(state))
print(dir(noddy))
	values = eval(answer)
	if DEV:
		print("recv", answer)
		print("values",values)
	encvalue=values[0]
	enc= (encvalue%24)/24.0
	if encvalue <0:
		enc=enc-1

	pot=values[1]/100.0
	
	if DEV:
		reverse=False
	else:
		vjoy = joysticks.get("vJoy Device")
		reverse=state.get("rev",False)

	buttonRot= values[2][0]

	if(buttonRot and pot<=0.2 and not reverse):
		reverse = True
	elif(buttonRot and pot>=-0.2 and reverse):
		reverse = False
	sendstr="set:io4_3="+str(int(reverse))+"\n"
	
	if(reverse):
		pot*=-1
	buttons=values[2][1:]
	for i,x in enumerate(buttons):
		buttons[i]=bool(x) 
		if not DEV:
示例#40
0
def on_message(me, myself, msg):
    inbound = json.loads(msg.payload)
    light = inbound[0]
    func = inbound[1]
    try:
        data = inbound[2]
    except IndexError:
        data = ""

    hub = lights[light]['hub']
    group = str(lights[light]['group'])
    light_type = str(lights[light]['type'])
    try:
        attributes = state.get(light).attributes
        current_status = state.get(light).state
    except:
        attributes = {}
        current_status = ""
        attributes['brightness'] = 0
        if light_type == "white":
            for x in range(0, 10):
                logger.debug("Step down")
                send_command(hub, light, commands["white_brightnessdown"])
        if light_type == "rgbw":
            brightness = brightness_map[0]
            command = commands["rgbw_brightness"]
            command[1] = brightness
            send_command(hub, light, command)

    print light + ' ' + func + ' ' + data

    if func == current_status:
        logger.info("Sorry we are already at status " + str(func) + " for this light")
        return False

    command = ""

    # On or full
    if func == "on" or func == "full":
        print light_type + "_" + group + "_" + func
        command = commands[light_type + "_" + group + "_" + func]
        if func == "on":
            state_data = "on"
        if func == "full":
            state_data = "on"
            attributes['brightness'] = 10

    # Off
    if func == "off":
        send_command(hub, light, commands[light_type + "_" + group + "_on"])
        time.sleep(0.2)
        set_brightness(hub, light, light_type, attributes['brightness'], "0")
        time.sleep(0.2)
        command = commands[light_type + "_" + group + "_off"]
        state_data = "off"
        attributes['brightness'] = 0

    # Brightness
    if func == "brightness":
        set_brightness(hub, light, light_type, attributes['brightness'], data)
        attributes['brightness'] = data
        state_data = "on"

    if command is not "":
        send_command(hub, light, command)

    if attributes:
        state.set("limitlessLED", light, state_data, json.dumps(attributes))
    else:
        state.set("limitlessLED", light, state_data)
示例#41
0

def search_contact():
    hash_ = input("Enter the hash [rand]: ")
    if not hash_:
        size = state.get().config["kademlia"]["keysize"] // 8
        hash_ = Hash(os.urandom(size))
    else:
        hash_ = Hash(base64.b64decode(bytes(hash_, "UTF-8")))
    s = state.get()
    s.kademlia.init_search(hash_)


# TODO: cmd arg for config path
if __name__ == "__main__":
    s = state.get()
    # Our own contact is always in the buckets
    if len(s.kademlia.buckets) > 1:
        s.kademlia.init_search(s.contact.hash)
        s.kademlia.init_search(Hash(os.urandom(s.bitsize // 8)))

    main_menu = Menu(
        [
            MenuOption("Client Status", print_status),
            MenuOption("Bucket Status", print_buckets),
            MenuOption("Shorty Status", print_shortlists),
            MenuOption("Add Contact", add_contact),
            MenuOption("Search for Contact", search_contact),
            MenuOption("Print Contacts", print_contacts),
        ]
    )
示例#42
0
def print_buckets():
    s = state.get()
    for idx, bucket in enumerate(s.kademlia.buckets._buckets):
        if len(bucket.contacts) > 0:
            print(idx, bucket.contacts)
示例#43
0
def print_shortlists():
    s = state.get()
    for item in s.kademlia.shortlists._shortlists:
        print(s.kademlia.shortlists._shortlists[item])
示例#44
0
def print_status():
    s = state.get()
    print("\t", s.contact.address)
    print("\t", s.contact.hash.base64)