Exemple #1
0
def logicProcess(statement):
    try:
        if statement[2] == "==":
            return (statement[0] == statement[1])
        elif statement[2] == "!=":
            return (statement[0] != statement[1])
        elif statement[2] == ">":
            return (statement[0] > statement[1])
        elif statement[2] == ">=":
            return (statement[0] >= statement[1])
        elif statement[2] == "<":
            return (statement[0] < statement[1])
        elif statement[2] == "<=":
            return (statement[0] <= statement[1])
        elif statement[2] == "in":
            return (statement[0] in statement[1])
        elif statement[2] == "not in":
            return (statement[0] not in statement[1])
        elif statement[2].startswith("match"):
            statement[1] = statement[2].split("\"")[1]
            if re.search(statement[1], statement[0]):
                return True
            else:
                return False
        elif statement[2].startswith("not match"):
            statement[1] = statement[2].split("\"")[1]
            if re.search(statement[1], statement[0]):
                return False
            else:
                return True
    except:
        logging.debug(
            "logicProcess process failed, statement='{0}'".format(statement),
            5)
        return False
Exemple #2
0
def ifEval(logicString, dicts={}):
    functionSafeList = function.systemFunctions
    if "if " == logicString[:3]:
        tempLogic = logicString[3:]
        logicMatches = re.finditer(
            r'((\"(.*?[^\\])\"|([a-zA-Z0-9]+(\[(.*?)\])+)|([a-zA-Z0-9]+(\((.*?)(\)\)|\)))+)|\[(.*?)\]|([a-zA-Z0-9]*)))\s?( not match | match | not in | in |==|!=|>=|>|<=|<)\s?((\"(.*?[^\\])\"|([a-zA-Z0-9]+(\[(.*?)\])+)|([a-zA-Z0-9]+(\((.*?)(\)\)|\)))+)|\[(.*?)\]|([a-zA-Z0-9]*)))',
            tempLogic)
        for index, logicMatch in enumerate(logicMatches, start=1):
            statement = [
                logicMatch.group(1).strip(),
                logicMatch.group(14).strip(),
                logicMatch.group(13).strip()
            ]
            # Cast typing statement vars
            for x in range(0, 2):
                statement[x] = helpers.typeCast(statement[x], dicts,
                                                functionSafeList)
            tempLogic = tempLogic.replace(logicMatch.group(0),
                                          str(logicProcess(statement)))
        # Checking that result only includes True, False, ( ), or, and,
        if re.search('^(True|False|\(|\)| |or|and|not)*$', tempLogic):
            result = eval(
                tempLogic)  # Can be an unsafe call be very careful with this!
            logging.debug(
                "Action logicEval completed, result='{0}'".format(result), 10)
            return result
        else:
            logging.debug(
                "Action logicEval tempLogic contains unsafe items, tempLogic='{0}'"
                .format(tempLogic), 3)
    else:
        # Return true if string does not start "if " e.g. if is not defined
        return True
    # Default False
    return False
Exemple #3
0
 def getObjectValue(self,
                    cacheName,
                    uid,
                    setFunction,
                    *args,
                    sessionData=None):
     authedCacheName = self.checkSessionData(cacheName, sessionData)
     if cacheName == None:
         return
     if len(args) > 0:
         newObject = setFunction(uid, sessionData, *args)
     else:
         newObject = setFunction(uid, sessionData)
     newObjectSize = helpers.getObjectMemoryUsage(newObject)
     currentCacheSzie = helpers.getObjectMemoryUsage(
         self.objects[authedCacheName]["objects"])
     memoryNeeded = (currentCacheSzie + newObjectSize
                     ) - self.objects[authedCacheName]["maxSize"]
     if memoryNeeded > 0:
         if not self.reduceSize(
                 cacheName, memoryNeeded, sessionData=sessionData):
             logging.debug(
                 "ERROR - Cache store full and unable to free enough space for new object, name={0}"
                 .format(authedCacheName), 5)
             return (False, newObject)
     return (True, newObject)
 async def _process_erc1155_batch_event(self, event: LogReceipt, blockData: BlockData) -> List[RetrievedTokenTransfer]:
     blockNumber = blockData['number']
     transactionHash = event['transactionHash'].hex()
     registryAddress = chain_util.normalize_address(event['address'])
     logging.debug(f'------------- {transactionHash} ------------')
     if len(event['topics']) < 4:
         logging.debug('Ignoring event with less than 4 topics')
         return []
     operatorAddress = chain_util.normalize_address(event['topics'][1].hex())
     fromAddress = chain_util.normalize_address(event['topics'][2].hex())
     toAddress = chain_util.normalize_address(event['topics'][3].hex())
     data = event['data']
     data = textwrap.wrap(data[2:], 64)
     data = [int(f'0x{elem}',16) for elem in data]
     lengthOfArray = data[2]
     tokenIds = data[3:3+lengthOfArray]
     lengthOfValue = data[3+lengthOfArray]
     amounts = data[4+lengthOfArray:4+lengthOfArray+lengthOfValue]
     dataDict = {tokenIds[i]: amounts[i] for i in range(len(tokenIds))}
     ethTransaction = await self._get_transaction(transactionHash=transactionHash, blockData=blockData)
     gasLimit = ethTransaction['gas']
     gasPrice = ethTransaction['gasPrice']
     value = ethTransaction['value']
     transactions = [RetrievedTokenTransfer(transactionHash=transactionHash, registryAddress=registryAddress, fromAddress=fromAddress, toAddress=toAddress, operatorAddress=operatorAddress, tokenId=id, amount=amount ,value=value, gasLimit=gasLimit, gasPrice=gasPrice, blockNumber=blockNumber, tokenType='erc1155batch') for (id, amount) in dataDict.items()]
     return transactions
 async def _process_erc721_single_event(self, event: LogReceipt, blockData: BlockData) -> List[RetrievedTokenTransfer]:
     blockNumber = blockData['number']
     transactionHash = event['transactionHash'].hex()
     registryAddress = chain_util.normalize_address(event['address'])
     logging.debug(f'------------- {transactionHash} ------------')
     if registryAddress == self.cryptoKittiesContract.address:
         # NOTE(krishan711): for CryptoKitties the tokenId isn't indexed in the Transfer event
         decodedEventData = self.cryptoKittiesTransferEvent.processLog(event)
         event['topics'] = [event['topics'][0], HexBytes(decodedEventData['args']['from']), HexBytes(decodedEventData['args']['to']), HexBytes(decodedEventData['args']['tokenId'])]
     if registryAddress == self.cryptoPunksContract.address:
         # NOTE(krishan711): for CryptoPunks there is a separate PunkBought (and PunkTransfer if its free) event with the punkId
         ethTransactionReceipt = await self.get_transaction_receipt(transactionHash=transactionHash)
         decodedEventData = self.cryptoPunksBoughtEvent.processReceipt(ethTransactionReceipt)
         if len(decodedEventData) == 1:
             event['topics'] = [event['topics'][0], HexBytes(decodedEventData[0]['args']['fromAddress']), HexBytes(decodedEventData[0]['args']['toAddress']), HexBytes(decodedEventData[0]['args']['punkIndex'])]
         else:
             decodedEventData = self.cryptoPunksTransferEvent.processReceipt(ethTransactionReceipt)
             if len(decodedEventData) == 1:
                 event['topics'] = [event['topics'][0], HexBytes(decodedEventData[0]['args']['from']), HexBytes(decodedEventData[0]['args']['to']), HexBytes(decodedEventData[0]['args']['punkIndex'])]
     if len(event['topics']) < 4:
         logging.debug('Ignoring event with less than 4 topics')
         return []
     fromAddress = chain_util.normalize_address(event['topics'][1].hex())
     toAddress = chain_util.normalize_address(event['topics'][2].hex())
     tokenId = int.from_bytes(bytes(event['topics'][3]), 'big')
     ethTransaction = await self._get_transaction(transactionHash=transactionHash, blockData=blockData)
     operatorAddress = ethTransaction['from']
     gasLimit = ethTransaction['gas']
     gasPrice = ethTransaction['gasPrice']
     value = ethTransaction['value']
     transactions = [RetrievedTokenTransfer(transactionHash=transactionHash, registryAddress=registryAddress, fromAddress=fromAddress, toAddress=toAddress, operatorAddress=operatorAddress, tokenId=tokenId, value=value, amount=1, gasLimit=gasLimit, gasPrice=gasPrice, blockNumber=blockNumber, tokenType='erc721')]
     return transactions
Exemple #6
0
def main(args):
    logging.print('''                   
 ____              ____              
|    \ ___ ___ ___|    \ ___ ___ ___ 
|  |  | -_| -_| . |  |  |  _| . | . |
|____/|___|___|  _|____/|_| |___|  _|
              |_|               |_|  
    ''')

    app = Flask(__name__)
    
    try: 
        # Load the models
        models.load_models()
        logging.warn('All models loaded')

        # Patch our payloads - will be moved.
        deepdrop.patch_payloads(config.payload_files, config.domain)
        logging.warn(f'Payloads patched for {config.domain}')

        if args.debug:
            captains_key = str(uuid.uuid4())
            app.config['CAPTAINS_KEY'] = captains_key
            app.debug = True

            logging.debug(captains_key)

        # Setup our routes
        routing.setup_routes(app)

    except Exception as e:
        logging.error(str(e))

    app.run('0.0.0.0', 80, threaded=False, use_reloader=False) # No threading because https://github.com/keras-team/keras/issues/2397
Exemple #7
0
def registerModel(name, className, classType, location, hidden=False):
    # Checking that a model with the same name does not already exist ( this is due to identification within GUI, future changes could be made to allow this?? )
    results = _model().query(query={"name": name})["results"]
    if len(results) == 0:
        return _model().new(name, className, classType, location, hidden)
    else:
        logging.debug(
            "Register model failed as it already exists modelName='{0}', className='{1}', classType='{2}', location='{3}'"
            .format(name, className, classType, location), 4)
Exemple #8
0
 def clearCache(self, cacheName, sessionData=None):
     authedCacheName = self.checkSessionData(cacheName, sessionData)
     if authedCacheName == None:
         return
     if cacheName == "ALL":
         for cacheName, cacheValue in self.objects.items():
             self.objects[cacheName]["objects"].clear()
     elif authedCacheName in self.objects:
         self.objects[authedCacheName]["objects"].clear()
         logging.debug(
             "Cache store cleared, name={0}".format(authedCacheName), 20)
Exemple #9
0
 def checkSessionData(self, cacheName, sessionData):
     if sessionData:
         authedCacheName = "{0},-,{1}".format(sessionData["_id"], cacheName)
         if sessionData["_id"] == self.objects[authedCacheName]["userID"]:
             return authedCacheName
         else:
             logging.debug(
                 "ERROR - Cache store access denied due to mismatched ID, name={0}, userID={1}"
                 .format(cacheName, sessionData["_id"]), 5)
             return None
     else:
         return cacheName
     return None
Exemple #10
0
    def update_task_influence(self, influence):
        'update dynamic goal values depending on current situation'
        # assess situation
        my_tiles = [loc for loc in influence.map if math.fabs(influence.map[loc]) > 0.01]
        total_tile_count = self.gamestate.cols * self.gamestate.rows
        self.winning_percentage = float(len(my_tiles))/total_tile_count
        logging.debug('currently owning %d in %d tiles, ratio: %f' % 
			(len(my_tiles), total_tile_count, self.winning_percentage))
        logging.debug('my ant_hill is at %s' % str(self.gamestate.my_hills()))
        logging.debug('known enemy hill: %s' % str(self.gamestate.enemy_hills()))
        
        # alter aggressiveness as situation changes
        self.my_fighter_value = 0 - 1 - (self.winning_percentage / 0.3 % 1)
        self.enemy_ant_value = 0 - (self.winning_percentage / 0.3 % 1) * 2
        
        # hill defense
        if len(self.gamestate.my_hills()) == 1:
            my_hill = self.gamestate.my_hills()[0]
            for enemy_ninja in [ant for ant, owner in self.gamestate.enemy_ants() if self.gamestate.manhattan_distance(my_hill, ant) < 8]:
                influence.map[my_hill] += self.enemy_ninja_value
        
        ## create route task
        # find area with highest ant density
        high_dense_loc = max(influence.map, key=influence.map.get)
        high_dense_val = influence.map[high_dense_loc]
        # find closest desirable area using bfs
        route = path.bfs_findtask(self.gamestate, influence, high_dense_loc, 500)
        # setup task, only long distance ones count
        if len(route) > 8:
            self.route_task = (route, high_dense_val)
            
        logging.debug('found route_task: %s' % str(self.route_task))
Exemple #11
0
def start():
    global workers
    # Creating instance of workers
    try:
        if workers:
            workers.kill(workers.workerID)
            logging.debug(
                "Workers start requested, Existing thread kill attempted, workerID='{0}'"
                .format(workers.workerID), 6)
            workers = None
    except NameError:
        pass
    workers = workerHandler(workerSettings["concurrent"])
    logging.debug("Workers started, workerID='{0}'".format(workers.workerID),
                  6)
    return True
Exemple #12
0
def deregisterModel(name, className, classType, location):
    loadModels = _model().query(query={"name": name})["results"]
    if loadModels:
        loadModels = loadModels[0]
        # This really does need to clean up the models objects that are left
        #from core.models import trigger, action
        #trigger._action().api_delete(query={"classID" : ObjectId(loadModels["_id"]) })
        #action._action().api_delete(query={"classID" : ObjectId(loadModels["_id"]) })
        results = _model().api_delete(query={
            "name": name,
            "classType": classType
        })
        if results["result"]:
            return True
    logging.debug(
        "deregister model failed modelName='{0}', className='{1}', classType='{2}', location='{3}'"
        .format(name, className, classType, location), 4)
Exemple #13
0
    def reduceSize(self, cacheName, amountToFree, sessionData=None):
        authedCacheName = self.checkSessionData(cacheName, sessionData)
        if authedCacheName == None:
            return
        logging.debug(
            "Cache store attempting to reduce memory, name={0}, amount={1}".
            format(authedCacheName, amountToFree), 20)
        # No objects to clear
        if len(self.objects[authedCacheName]["objects"]) == 0:
            return False

        now = time.time()
        amountReduced = 0
        poplist = []
        accessCount = {}
        for cacheObjectKey, cacheObjectValue in self.objects[authedCacheName][
                "objects"].items():
            if cacheObjectValue["cacheExpiry"] < now:
                amountReduced += helpers.getObjectMemoryUsage(cacheObjectValue)
                poplist.append(cacheObjectKey)
            else:
                if cacheObjectValue["accessCount"] not in accessCount:
                    accessCount[cacheObjectValue["accessCount"]] = []
                accessCount[cacheObjectValue["accessCount"]].append(
                    cacheObjectKey)
            if amountReduced >= amountToFree:
                break
        if amountReduced < amountToFree:
            for count in accessCount.keys():
                for item in accessCount[count]:
                    amountReduced += helpers.getObjectMemoryUsage(
                        self.objects[authedCacheName]["objects"][item])
                    poplist.append(item)
                    if amountReduced >= amountToFree:
                        break
                if amountReduced >= amountToFree:
                    break

        for item in poplist:
            del self.objects[authedCacheName]["objects"][item]

        if amountReduced >= amountToFree:
            return True

        return False
Exemple #14
0
 def check(self):
     password = auth.getPasswordFromENC(self.splunkPassword)
     secure = not self.insecure
     s = splunk.splunkClass(self.splunkHost,
                            self.splunkPort,
                            self.splunkUsername,
                            password,
                            secure=secure,
                            ca=self.ca)
     if not s:
         if logging.debugEnabled:
             logging.debug(
                 "Unable to authenticate to Splunk instance. actionID={0}".
                 format(self._id), 1)
         return
     jobID = s.startJob(self.searchQuery)
     if s.waitJob(jobID):
         pollResult = s.getJob(jobID)
         self.result["events"] = pollResult["results"]
Exemple #15
0
        def forceTriggers(triggerID):
            data = json.loads(api.request.data)
            if data["action"] == "trigger":
                class_ = trigger._trigger().getAsClass(id=triggerID)[0]
                if class_:
                    if class_.startCheck == 0:
                        class_.startCheck = time.time()
                        maxDuration = 60
                        if type(class_.maxDuration
                                ) is int and class_.maxDuration > 0:
                            maxDuration = class_.maxDuration
                        try:
                            events = json.loads(data["events"])
                        except json.decoder.JSONDecodeError:
                            events = [data["events"]]
                            # Ensure we run even if no event data was sent
                            if events == [""]:
                                events = ["1"]
                        if type(events) != list:
                            events = [events]
                        class_.workerID = workers.workers.new(
                            "trigger:{0}".format(triggerID),
                            class_.notify, (events, ),
                            maxDuration=maxDuration)
                        class_.update(["startCheck", "workerID"])
                    else:
                        logging.debug(
                            "Error unable to force trigger, triggerID={0} as it is already running."
                            .format(triggerID))
                        return {
                            "result": False,
                            "reason": "Trigger already running"
                        }, 403
                else:
                    logging.debug(
                        "Error unable to force trigger, triggerID={0} as its triggerID cannot be loaded."
                        .format(triggerID))
                    return {
                        "result": False,
                        "reason": "triggerID could not be loaded"
                    }, 404

            return {"result": True}, 200
Exemple #16
0
def loadClusterMember():
    clusterMember = _clusterMember().getAsClass(
        query={"systemID": systemSettings["systemID"]})
    if len(clusterMember) == 1:
        clusterMember = clusterMember[0]
    elif len(clusterMember) > 1:
        logging.debug("ERROR: Duplicated systemID found.", -1)
        return None
    else:
        clusterMember = _clusterMember().new(
            systemSettings["systemID"]).inserted_id
        clusterMember = _clusterMember().getAsClass(id=clusterMember)
        clusterMember = clusterMember[0]

    clusterMember.syncCount = 0
    clusterMember.bindAddress = apiSettings["core"]["bind"]
    clusterMember.bindPort = apiSettings["core"]["port"]
    clusterMember.systemUID = str(uuid.uuid4())
    clusterMember.update(["syncCount", "systemUID", "bindAddress", "bindPort"])
    return clusterMember
Exemple #17
0
 def newCache(self,
              cacheName,
              maxSize=10485760,
              cacheExpiry=60,
              sessionData=None):
     userID = None
     if sessionData:
         if "_id" in sessionData:
             userID = sessionData["_id"]
             cacheName = "{0},-,{1}".format(userID, cacheName)
     if cacheName not in self.objects:
         self.objects[cacheName] = {
             "objects": {},
             "maxSize": maxSize,
             "cacheExpiry": cacheExpiry,
             "userID": userID
         }
         logging.debug(
             "New cache store created, name={0}, maxSize={1}, cacheExpry={2}, userID={3}"
             .format(cacheName, maxSize, cacheExpiry, userID), 20)
    def update_strategy_influence(self, influence):
        "update dynamic goal values depending on current situation"
        # assess situation
        my_tile_count = len([v for v in np.ravel(np.fabs(influence.map)) if v > 0.01])
        total_tile_count = self.gamestate.cols * self.gamestate.rows
        self.gamestate.winning_percentage = float(my_tile_count) / total_tile_count
        logging.debug(
            "currently owning %d in %d tiles, ratio: %f"
            % (my_tile_count, total_tile_count, self.gamestate.winning_percentage)
        )
        logging.debug("my ant_hill is at %s" % str(self.gamestate.my_hills()))
        logging.debug("known enemy hill: %s" % str(self.gamestate.enemy_hills()))

        # alter aggressiveness as situation changes
        self.my_fighter_value = 0 - 1 - (self.gamestate.winning_percentage / 0.3 % 1)
        self.enemy_ant_value = 0 - (self.gamestate.winning_percentage / 0.3 % 1) * 2

        # hill defense against ninja
        if len(self.gamestate.my_hills()) == 1:
            my_hill = self.gamestate.my_hills()[0]
            for enemy_ninja in [
                ant
                for ant, owner in self.gamestate.enemy_ants()
                if self.gamestate.manhattan_distance(my_hill, ant) < 10
            ]:
                # send ants to intercept, this will cause ant on ther far side of hill also *get* it
                interception_loc = tuple([x - (x - y) / 2 for x, y in zip(my_hill, enemy_ninja)])
                influence.map[interception_loc] += self.enemy_ninja_value
 async def _process_erc1155_single_event(self, event: LogReceipt, blockData: BlockData) -> List[RetrievedTokenTransfer]:
     blockNumber = blockData['number']
     transactionHash = event['transactionHash'].hex()
     registryAddress = chain_util.normalize_address(event['address'])
     logging.debug(f'------------- {transactionHash} ------------')
     if len(event['topics']) < 4:
         logging.debug('Ignoring event with less than 4 topics')
         return []
     operatorAddress = chain_util.normalize_address(event['topics'][1].hex())
     fromAddress = chain_util.normalize_address(event['topics'][2].hex())
     toAddress = chain_util.normalize_address(event['topics'][3].hex())
     data = event['data']
     data = textwrap.wrap(data[2:], 64)
     data = [int(f'0x{elem}',16) for elem in data]
     tokenId = data[0]
     amount = data[1]
     ethTransaction = await self._get_transaction(transactionHash=transactionHash, blockData=blockData)
     gasLimit = ethTransaction['gas']
     gasPrice = ethTransaction['gasPrice']
     value = ethTransaction['value']
     transactions = [RetrievedTokenTransfer(transactionHash=transactionHash, registryAddress=registryAddress, fromAddress=fromAddress, toAddress=toAddress, operatorAddress=operatorAddress, tokenId=tokenId, amount=amount, value=value, gasLimit=gasLimit, gasPrice=gasPrice, blockNumber=blockNumber, tokenType='erc1155single')]
     return transactions
Exemple #20
0
 def bulkNew(self, occurrenceObj, match, data, acl, bulkClass):
     self.acl = acl
     self.name = occurrenceObj.name
     self.match = match
     self.occurrenceTime = int(time.time())
     self.lullTime = (self.occurrenceTime + occurrenceObj.lullTime)
     self.occurrenceActionID = occurrenceObj._id
     self.occurrenceFlowID = data["flow_id"]
     self.lullTimeExpired = occurrenceObj.lullTimeExpiredCount
     if "callingTriggerID" in data:
         if data["callingTriggerID"] != "":
             self.triggerID = data["callingTriggerID"]
         else:
             logging.debug("Error using callingTriggerID as it is blank")
             self.triggerID = data["triggerID"]
     else:
         self.triggerID = data["trigger_id"]
     if self.triggerID == "":
         return None
     self.data = data
     self.lastLullCheck = int(time.time())
     return super(_occurrence, self).bulkNew(bulkClass)
Exemple #21
0
 def update_task_influence(self, influence):
     'update dynamic goal values depending on current situation'
     pass
     # assess situation
     my_tiles = [loc for loc in influence.map if math.fabs(influence.map[loc]) > 0.01]
     total_tile_count = self.gamestate.cols * self.gamestate.rows
     logging.debug('currently owning %d in %d tiles, ratio: %f' % 
         (len(my_tiles), total_tile_count, float(len(my_tiles))/total_tile_count))
     logging.debug('my ant_hill is at %s' % str(self.gamestate.my_hills()))
     logging.debug('known enemy hill: %s' % str(self.gamestate.enemy_hills()))
Exemple #22
0
def setup():
    systemAbout = _system().query(query={
        "name": "about",
        "systemID": systemSettings["systemID"]
    })["results"]
    if len(systemAbout) < 1:
        systemAbout = _system().new("about").inserted_id
        systemAbout = _system().get(systemAbout)
        systemAbout.systemID = systemSettings["systemID"]
        systemAbout.update(["systemID"])
    else:
        systemAbout = systemAbout[0]["_id"]
        systemAbout = _system().get(systemAbout)

    upgrade = False
    install = True
    if "version" in systemAbout.data:
        install = False
        if systemVersion > systemAbout.data["version"]:
            upgrade = True

    if install:
        logging.debug("Starting system install", -1)
        if systemInstall():
            # Set system version number if install and/or upgrade
            systemAbout.data["version"] = systemVersion
            systemAbout.systemID = systemSettings["systemID"]
            systemAbout.update(["data", "systemID"])
            logging.debug("Starting system install completed", -1)
        else:
            sys.exit("Unable to complete install")
    elif upgrade:
        logging.debug("Starting system upgrade", -1)
        systemUpgrade(systemAbout.data["version"])
        if systemUpgrade(systemAbout.data["version"]):
            # Set system version number if install and/or upgrade
            systemAbout.data["version"] = systemVersion
            systemAbout.update(["data"])
            logging.debug("Starting system upgrade completed", -1)
        else:
            sys.exit("Unable to complete upgrade")
Exemple #23
0
 def update_task_influence(self, influence):
     'update dynamic goal values depending on current situation'
     # assess situation
     my_tiles = [loc for loc in influence.map if math.fabs(influence.map[loc]) > 0.01]
     total_tile_count = self.gamestate.cols * self.gamestate.rows
     self.winning_percentage = len(my_tiles), total_tile_count, float(len(my_tiles))/total_tile_count
     logging.debug('currently owning %d in %d tiles, ratio: %f' % self.winning_percentage)
     logging.debug('my ant_hill is at %s' % str(self.gamestate.my_hills()))
     logging.debug('known enemy hill: %s' % str(self.gamestate.enemy_hills()))
     # if winning 
     # if losing
     # unsure
     
     ## create route task
     # find area with highest ant density
     high_dense_loc = max(influence.map, key=influence.map.get)
     high_dense_val = influence.map[high_dense_loc]
     # find closest desirable area using bfs
     route = path.bfs_findtask(self.gamestate, influence, high_dense_loc, 500)
     # setup task
     if len(route) > 3:
         self.route_task = (route, high_dense_val)
     logging.debug('found route_task: %s' % str(self.route_task))
Exemple #24
0
 def update_task_influence(self, influence):
     'update dynamic goal values depending on current situation'
     # assess situation
     my_tiles = [loc for loc in influence.map if math.fabs(influence.map[loc]) > 0.01]
     total_tile_count = self.gamestate.cols * self.gamestate.rows
     logging.debug('currently owning %d in %d tiles, ratio: %f' % 
         (len(my_tiles), total_tile_count, float(len(my_tiles))/total_tile_count))
     logging.debug('my ant_hill is at %s' % str(self.gamestate.my_hills()))
     logging.debug('known enemy hill: %s' % str(self.gamestate.enemy_hills()))
     # if winning 
     # if losing
     # unsure
     
     ## send reinforcements
     # find area with highest ant density
     # find most desirable area (most likely due to heavy combat)
     # find path
     # setup task
 def update_task_influence(self, influence):
     'update dynamic goal values depending on current situation'
     # assess situation
     my_tiles = np.fabs(influence.map) > CUTOFF
     total_tile_count = self.gamestate.cols * self.gamestate.rows
     self.winning_percentage = float(len(my_tiles))/total_tile_count
     logging.debug('currently owning %d in %d tiles, ratio: %f' % 
         (len(my_tiles), total_tile_count, self.winning_percentage))
     logging.debug('my ant_hill is at %s' % str(self.gamestate.my_hills()))
     logging.debug('known enemy hill: %s' % str(self.gamestate.enemy_hills()))
     
     # alter aggressiveness as situation changes
     self.my_fighter_value = 0 - 1 - (self.winning_percentage / 0.3 % 1)
     self.enemy_ant_value = 0 - (self.winning_percentage / 0.3 % 1) * 2
     
     # hill defense
     if len(self.gamestate.my_hills()) == 1:
         my_hill = self.gamestate.my_hills()[0]
         for enemy_ninja in [ant for ant, owner in self.gamestate.enemy_ants() if self.gamestate.manhattan_distance(my_hill, ant) < 8]:
             influence.map[my_hill] += self.enemy_ninja_value
Exemple #26
0
def start():
    global cluster
    try:
        if workers.workers:
            try:
                # Creating instance of cluster
                if cluster:
                    workers.workers.kill(cluster.workerID)
                    logging.debug(
                        "Cluster start requested, Existing thread kill attempted, workerID='{0}'"
                        .format(cluster.workerID), 6)
                    cluster = None
            except NameError:
                pass
            cluster = _cluster()
            logging.debug(
                "Cluster started, workerID='{0}'".format(cluster.workerID), 6)
            return True
    except AttributeError:
        logging.debug("Cluster start requested, No valid worker class loaded",
                      4)
        return False
def schedule_func():
    for message in messages:
        logging.debug("message: {0}".format(message))
        for client in web_socket_clients:
            client.write_message(message)
        messages.remove(message)
 def on_close(self):
     if self in web_socket_clients:
         web_socket_clients.remove(self)
         logging.debug("Client Disconnected, Total Clients: {}".format(len(web_socket_clients)))
 def open(self):
     if self not in web_socket_clients:
         web_socket_clients.append(self)
         logging.debug("Client Connected, Total Clients: {}".format(len(web_socket_clients)))
Exemple #30
0
    def check(self):

        if not self.humioOverrideSettings:
            if "ca" in humioSettings:
                h = humio.humioClass(humioSettings["host"],
                                     humioSettings["port"],
                                     humioSettings["apiToken"],
                                     humioSettings["secure"],
                                     humioSettings["ca"],
                                     humioSettings["requestTimeout"])
            else:
                h = humio.humioClass(
                    humioSettings["host"],
                    humioSettings["port"],
                    humioSettings["apiToken"],
                    humioSettings["secure"],
                    requestTimeout=humioSettings["requestTimeout"])
        else:
            humioTimeout = 30
            if self.humioTimeout > 0:
                humioTimeout = self.humioTimeout
            if not hasattr(self, "plain_humioAPIToken"):
                self.plain_humioAPIToken = auth.getPasswordFromENC(
                    self.humioAPIToken)
            if "ca" in humioSettings:
                h = humio.humioClass(self.humioHost, self.humioPort,
                                     self.plain_humioAPIToken, True,
                                     humioSettings["ca"], humioTimeout)
            else:
                h = humio.humioClass(self.humioHost,
                                     self.humioPort,
                                     self.plain_humioAPIToken,
                                     True,
                                     requestTimeout=humioTimeout)

        if "000000000001010000000000" in self._id:
            self.humioJob = ""

        if not self.humioJob or not self.searchLive:
            logging.debug(
                "Humio No Existing Job Found, class={0}".format(
                    self.parse(True)), 10)
            kwargs = {}
            # Skipping any undefined search values
            if self.searchQuery:
                kwargs["searchQuery"] = self.searchQuery
            if self.searchStart:
                kwargs["searchStart"] = self.searchStart
            if self.searchLive:
                kwargs["searchLive"] = self.searchLive
            if self.searchEnd:
                kwargs["searchEnd"] = self.searchEnd
            createJobResult = h.createJob(self.searchRepository, **kwargs)
            if createJobResult[0] == 200:
                self.humioJob = createJobResult[1]
                self.update(["humioJob"])
                logging.debug(
                    "Humio Job Created, jobID={0}, class={1}".format(
                        self.humioJob, self.parse(True)), 8)
                time.sleep(self.jobStartWaitTime)
            else:
                raise humio.jobCreateException(self._id, self.name,
                                               self.searchQuery)

        if self.humioJob:
            logging.debug(
                "Humio polling..., class={0}".format(self.parse(True)), 15)
            wait = False
            if not self.searchLive:
                wait = True
            pollResult = h.pollJob(self.searchRepository, self.humioJob, wait)
            if pollResult[0] == 200 and "events" in pollResult[1]:
                if self.onlyNew:
                    events = []
                    if len(pollResult[1]["events"]) > 0:
                        for event in pollResult[1]["events"]:
                            if int(event["@timestamp"]
                                   ) > self.lastEventTimestamp:
                                events.append(event)
                        self.lastEventTimestamp = int(
                            pollResult[1]["events"][-1]["@timestamp"]) / 1000
                        self.update(["lastEventTimestamp"])
                    self.result["events"] = events
                else:
                    self.result["events"] = pollResult[1]["events"]
                self.result["plugin"]["humio"] = {
                    "searchQuery": self.searchQuery,
                    "searchRepository": str(self.searchRepository)
                }
            else:
                self.humioJob = ""
                self.update(["humioJob"])
                raise humio.jobPollException(self._id, self.name,
                                             self.humioJob)
    def run(self,data,persistentData,actionResult):
        eventTitle = helpers.evalString(self.eventTitle,{"data" : data})
        eventType = helpers.evalString(self.eventType,{"data" : data})
        eventSubType = helpers.evalString(self.eventSubType,{"data" : data})
        layer = self.layer
        accuracy = self.accuracy
        impact = self.impact
        benign = self.benign
        timeToLive = self.timeToLive
        uid = helpers.evalString(self.uid,{"data" : data})
        eventValues = helpers.evalDict(self.eventValues,{"data" : data})

        uid = "{0}-{1}-{2}-{3}".format(self._id,eventType,eventSubType,uid)

        data["var"]["event"] = {}

        data["var"]["event"]["type"] = eventType
        data["var"]["event"]["eventSubType"] = eventSubType
        data["var"]["event"]["layer"] = layer
        data["var"]["event"]["accuracy"] = accuracy
        data["var"]["event"]["impact"] = impact
        data["var"]["event"]["benign"] = benign

        try:
            score = ((accuracy*(impact*layer))/benign)
        except ZeroDivisionError:
            score = 0
        data["var"]["event"]["score"] = score

        cacheUID = "{0}-{1}-{2}".format(data["conductID"],data["flow_id"],uid)
        foundEvent = cache.globalCache.get("eventCache",cacheUID,getEvent,data["conductID"],data["flow_id"],uid,eventType,eventSubType,extendCacheTime=True,customCacheTime=timeToLive,nullUpdate=True)
        if foundEvent != None:
            try:
                persistentData["plugin"]["event"].append(foundEvent)
            except:
                persistentData["plugin"]["event"] = [foundEvent]
            arrayIndex = len(persistentData["plugin"]["event"])-1
            actionResult["eventIndex"] = arrayIndex
            if foundEvent._id != "":
                if foundEvent.expiryTime > time.time():
                    changes = False
                    for key,value in eventValues.items():
                        if key in foundEvent.eventValues:
                            if value != foundEvent.eventValues[key]:
                                changes = True
                                break
                        else:
                            changes = True
                            break
                        
                    if changes:
                        foundEvent.updateRecord(self.bulkClass,eventValues,accuracy,impact,layer,benign,score,int( time.time() + timeToLive ),self.history)
                  
                        actionResult["result"] = True
                        actionResult["rc"] = 202
                        return actionResult

                    else:
                        foundEvent.expiryTime = int(time.time() + timeToLive)
                        foundEvent.bulkUpdate(["expiryTime"],self.bulkClass)

                    actionResult["result"] = True
                    actionResult["rc"] = 302
                    return actionResult
                else:
                    cache.globalCache.delete("eventCache",cacheUID)
            else:
                logging.debug("Event Update Failed - NO ID, actionID='{0}'".format(self._id),7)
                actionResult["result"] = False
                actionResult["rc"] = 500
                return actionResult
        
        eventObject = event._event().bulkNew(self.bulkClass,self.acl,data["conductID"],data["flow_id"],eventType,eventSubType,int( time.time() + timeToLive ),eventValues,uid,accuracy,impact,layer,benign,score,data,eventTitle)
        cache.globalCache.insert("eventCache",cacheUID,eventObject,customCacheTime=timeToLive)
        try:
            persistentData["plugin"]["event"].append(eventObject)
        except:
            persistentData["plugin"]["event"] = [eventObject]
        arrayIndex = len(persistentData["plugin"]["event"])-1
        actionResult["eventIndex"] = arrayIndex
        actionResult["result"] = True
        actionResult["rc"] = 201
        return actionResult
Exemple #32
0
# Plugin support - not dynamic yet
#from plugins.occurrence.web import occurrence as occurrencePages
#api.webServer.register_blueprint(occurrencePages.occurrencePages)

#from plugins.ansible.web import ansible as ansiblePages
#api.webServer.register_blueprint(ansiblePages.ansiblePages)

#from plugins.asset.web import asset as assetPages
#api.webServer.register_blueprint(assetPages.assetPages,url_prefix='/plugin')

from core import logging, db

# Installing
if "webui" not in db.list_collection_names():
    logging.debug("DB Collection webui Not Found : Creating...")
    model.registerModel("flowData", "_flowData", "_document",
                        "core.models.webui")

from core import audit, helpers, plugin, auth

from core.models import conduct, action, trigger, webui


@api.webServer.route("/")
def mainPage():
    from system import install
    return render_template("main.html",
                           conducts=conduct._conduct().query(
                               api.g["sessionData"],
                               query={"name": {
Exemple #33
0
    def run(self, data, persistentData, actionResult):
        searchQuery = helpers.evalString(self.searchQuery, {"data": data})
        searchRepository = helpers.evalString(self.searchRepository,
                                              {"data": data})
        searchStart = helpers.evalString(self.searchStart, {"data": data})
        searchEnd = helpers.evalString(self.searchEnd, {"data": data})

        if not self.humioOverrideSettings:
            if "ca" in humioSettings:
                h = humio.humioClass(humioSettings["host"],
                                     humioSettings["port"],
                                     humioSettings["apiToken"],
                                     humioSettings["secure"],
                                     humioSettings["ca"],
                                     humioSettings["requestTimeout"])
            else:
                h = humio.humioClass(
                    humioSettings["host"],
                    humioSettings["port"],
                    humioSettings["apiToken"],
                    humioSettings["secure"],
                    requestTimeout=humioSettings["requestTimeout"])
        else:
            humioTimeout = 30
            if self.humioTimeout > 0:
                humioTimeout = self.humioTimeout
            if not hasattr(self, "plain_humioAPIToken"):
                self.plain_humioAPIToken = auth.getPasswordFromENC(
                    self.humioAPIToken)
            if "ca" in humioSettings:
                h = humio.humioClass(self.humioHost, self.humioPort,
                                     self.plain_humioAPIToken, True,
                                     humioSettings["ca"], humioTimeout)
            else:
                h = humio.humioClass(self.humioHost,
                                     self.humioPort,
                                     self.plain_humioAPIToken,
                                     True,
                                     requestTimeout=humioTimeout)

        if not self.searchLive:
            kwargs = {}
            # Skipping any undefined search values
            if searchQuery:
                kwargs["searchQuery"] = searchQuery
            if searchStart:
                kwargs["searchStart"] = searchStart
            kwargs["searchLive"] = self.searchLive
            if searchEnd:
                kwargs["searchEnd"] = searchEnd
            createJobResult = h.createJob(searchRepository, **kwargs)
            if createJobResult[0] == 200:
                humioJob = createJobResult[1]
                wait = True
                pollResult = h.pollJob(searchRepository, humioJob, wait)
                if pollResult[0] == 200 and "events" in pollResult[1]:
                    actionResult["events"] = pollResult[1]["events"]
            actionResult["rc"] = 0
            actionResult["result"] = True
            return actionResult
        else:
            if not self.humioJob:
                logging.debug(
                    "Humio No Existing Job Found, class={0}".format(
                        self.parse(True)), 10)
                kwargs = {}
                # Skipping any undefined search values
                if self.searchQuery:
                    kwargs["searchQuery"] = self.searchQuery
                if self.searchStart:
                    kwargs["searchStart"] = self.searchStart
                if self.searchLive:
                    kwargs["searchLive"] = self.searchLive
                if self.searchEnd:
                    kwargs["searchEnd"] = self.searchEnd
                createJobResult = h.createJob(self.searchRepository, **kwargs)
                if createJobResult[0] == 200:
                    self.humioJob = createJobResult[1]
                    self.update(["humioJob"])
                    logging.debug(
                        "Humio Job Created, jobID={0}, class={1}".format(
                            self.humioJob, self.parse(True)), 8)
                else:
                    raise humio.jobCreateException(self._id, self.name,
                                                   self.searchQuery)

            if self.humioJob:
                logging.debug(
                    "Humio polling..., class={0}".format(self.parse(True)), 15)
                wait = False
                if not self.searchLive:
                    wait = True
                pollResult = h.pollJob(self.searchRepository, self.humioJob,
                                       wait)
                if pollResult[0] == 200 and "events" in pollResult[1]:
                    actionResult["events"] = pollResult[1]["events"]
                    actionResult["humio"] = {
                        "searchQuery": self.searchQuery,
                        "searchRepository": str(self.searchRepository)
                    }
                    actionResult["rc"] = 0
                    actionResult["result"] = True
                else:
                    self.humioJob = ""
                    self.update(["humioJob"])
                    actionResult[
                        "msg"] = "Error: Unable to poll humio job. job='{0}'".format(
                            self.humioJob)
                    actionResult["rc"] = -1
                    actionResult["result"] = False
Exemple #34
0
            if acl:
                for aclItem in acl["ids"]:
                    for accessID in accessIDs:
                        if aclItem["accessID"] == accessID:
                            access = aclItem[accessType]
            else:
                access = True
    return [access, accessIDs, adminBypass]


# Update DB item within giben collection by ID
def updateDocumentByID(dbCollection, id, update):
    query = {"_id": ObjectId(id)}
    queryResults = dbCollection.update_one(query, update)
    return queryResults


# Get DB item within given collection by ID
def findDocumentByID(dbCollection, id):
    query = {"_id": ObjectId(id)}
    queryResults = dbCollection.find_one(query)
    return queryResults


# Delete database
def delete():
    dbClient.drop_database(mongodbSettings["db"])


logging.debug("db.py loaded")
Exemple #35
0
 def open(self):
     if self not in web_socket_clients:
         web_socket_clients.append(self)
         logging.debug("Client Connected, Total Clients: {}".format(
             len(web_socket_clients)))
Exemple #36
0
def systemInstall():
    # Adding ENC secure
    systemSecure = _system().query(query={"name": "secure"})["results"]
    if len(systemSecure) < 1:
        systemSecure = _system().new("secure").inserted_id
        systemSecure = _system().get(systemSecure)
        systemSecure.data = {"string": secrets.token_hex(32)}
        systemSecure.update(["data"])

    # Installing model if that DB is not installed
    if "model" not in db.list_collection_names():
        logging.debug("DB Collection 'model' Not Found : Creating...")
        # Creating default model required so other models can be registered
        logging.debug("Registering default model class...")
        m = model._model()
        m.name = "model"
        m.classID = None
        m.acl = {
            "ids": [{
                "accessID": "0",
                "delete": True,
                "read": True,
                "write": True
            }]
        }
        m.className = "_model"
        m.classType = "_document"
        m.location = "core.model"
        m.insert_one(m.parse())
    if "conducts" not in db.list_collection_names():
        logging.debug("DB Collection conducts Not Found : Creating...")
        model.registerModel("conduct", "_conduct", "_document",
                            "core.models.conduct")
    if "triggers" not in db.list_collection_names():
        logging.debug("DB Collection action Not Found : Creating...")
        model.registerModel("trigger", "_trigger", "_document",
                            "core.models.trigger")
    if "actions" not in db.list_collection_names():
        logging.debug("DB Collection action Not Found : Creating...")
        model.registerModel("action", "_action", "_document",
                            "core.models.action")
    if "webui" not in db.list_collection_names():
        logging.debug("DB Collection webui Not Found : Creating...")
        model.registerModel("flowData", "_flowData", "_document",
                            "core.models.webui")
    if "modelUI" not in db.list_collection_names():
        logging.debug("DB Collection modelUI Not Found : Creating...")
        model.registerModel("modelUI", "_modelUI", "_document",
                            "core.models.webui")
    if "clusterMembers" not in db.list_collection_names():
        logging.debug("DB Collection clusterMembers Not Found : Creating...")
        model.registerModel("clusterMember", "_clusterMember", "_document",
                            "core.cluster")

    # System - failedTriggers
    from core.models import trigger
    triggers = trigger._trigger().query(
        query={"name": "failedTriggers"})["results"]
    if len(triggers) < 1:
        from system.models import trigger as systemTrigger
        model.registerModel("failedTriggers", "_failedTriggers", "_trigger",
                            "system.models.trigger")
        if not systemTrigger._failedTriggers().new("failedTriggers"):
            logging.debug("Unable to register failedTriggers", -1)
            return False
    temp = model._model().getAsClass(query={"name": "failedTriggers"})
    if len(temp) == 1:
        temp = temp[0]
        temp.hidden = True
        temp.update(["hidden"])

    # System - Actions
    from core.models import action
    actions = action._action().query(query={"name": "resetTrigger"})["results"]
    if len(actions) < 1:
        from system.models import action as systemAction
        model.registerModel("resetTrigger", "_resetTrigger", "_action",
                            "system.models.action")
        if not systemAction._resetTrigger().new("resetTrigger"):
            logging.debug("Unable to register resetTrigger", -1)
            return False
    temp = model._model().getAsClass(query={"name": "resetTrigger"})
    if len(temp) == 1:
        temp = temp[0]
        temp.hidden = True
        temp.update(["hidden"])

    from core import auth

    # Adding models for user and groups
    model.registerModel("user", "_user", "_document", "core.auth")
    model.registerModel("group", "_group", "_document", "core.auth")

    # Adding default admin group
    adminGroup = auth._group().getAsClass(query={"name": "admin"})
    if len(adminGroup) == 0:
        adminGroup = auth._group().new("admin")
        adminGroup = auth._group().getAsClass(query={"name": "admin"})
    adminGroup = adminGroup[0]

    # Adding default root user
    rootUser = auth._user().getAsClass(query={"username": "******"})
    if len(rootUser) == 0:
        rootPass = randomString(30)
        rootUser = auth._user().new("root", "root", rootPass)
        rootUser = auth._user().getAsClass(query={"username": "******"})
        logging.debug("Root user created! Password is: {}".format(rootPass),
                      -1)
    rootUser = rootUser[0]

    # Adding root to group
    if rootUser._id not in adminGroup.members:
        adminGroup.members.append(rootUser._id)
        adminGroup.update(["members"])

    # Adding primary group for root user
    rootUser.primaryGroup = adminGroup._id
    rootUser.update(["primaryGroup"])

    return True
Exemple #37
0
 def on_close(self):
     if self in web_socket_clients:
         web_socket_clients.remove(self)
         logging.debug("Client Disconnected, Total Clients: {}".format(
             len(web_socket_clients)))
Exemple #38
0
import time

from core import db, helpers, logging

# Initialize

dbCollectionName = "occurrence"

if dbCollectionName not in db.db.list_collection_names():
    logging.debug(
        "DB Collection {0} Not Found : Creating...".format(dbCollectionName))


class _occurrence(db._document):
    name = str()
    lullTime = int()
    match = str()
    occurrenceTime = int()
    lastOccurrenceTime = int()
    occurrenceActionID = str()
    occurrenceFlowID = str()
    lullTimeExpired = int()
    lastLullCheck = int()
    triggerID = str()
    data = dict()

    _dbCollection = db.db[dbCollectionName]

    def bulkNew(self, occurrenceObj, match, data, acl, bulkClass):
        self.acl = acl
        self.name = occurrenceObj.name
Exemple #39
0
def schedule_func():
    for message in messages:
        logging.debug("message: {0}".format(message))
        for client in web_socket_clients:
            client.write_message(message)
        messages.remove(message)