Example #1
0
    def playerJoin(self, p):
        debug_print("playerJoin", self.mapNum, p.pos)

        assert p.map is None, 'entrando a un mapa sin salir del actual'

        self.players.add(p)

        x, y = p.pos
        self.setPos(x, y, p)

        p.map = self
        p.cmdout.sendChangeMap(self.mapNum, self.mapFile.mapVers)

        d = p.getCharacterCreateAttrs()

        for a in self.players:
            # Avisarle al resto del nuevo pj
            a.cmdout.sendCharacterCreate(**d)

            # Avisarle al nuevo pj del resto
            if a != p:
                p.cmdout.sendCharacterCreate(**a.getCharacterCreateAttrs())

        p.sendUserCharIndexInServer()

        # FIXME: Buscar otra forma de no tener que recorrer 10k tiles

        mf = self.mapFile

        for y in xrange(1, MAP_SIZE_Y + 1):
            for x in xrange(1, MAP_SIZE_X + 1):
                obj = mf[x, y].objdata()
                if obj is not None:
                    p.cmdout.sendObjectCreate(x, y, obj.GrhIndex)
Example #2
0
    def handleCmdLoginExistingChar(self, prot, buf, player):
        # PacketID
        cmd = buf.readInt8()

        playerName = buf.readString()
        playerPass = buf.readString()
        playerVers = '%d.%d.%d' % (buf.readInt8(), buf.readInt8(),\
            buf.readInt8())

        error = False

        if not gamerules.isValidPlayerName(playerName, False):
            prot.cmdout.sendErrorMsg("Nombre invalido")
            error = True
            debug_print("Nombre invalido:", repr(playerName))
        elif corevars.gameServer.playersLimitReached():
            prot.cmdout.sendErrorMsg("Limite de jugadores alcanzado")
            error = True
        elif corevars.gameServer.isPlayerLogged(playerName):
            prot.cmdout.sendErrorMsg("El jugador ya se encuentra logeado")
            error = True
        else:
            # La instancia de Player se crea recien cuando es vĂ¡lido.
            prot.player = Player(prot, playerName)
            prot.player.start()

        if error:
            prot.loseConnection()
Example #3
0
    def playerMove(self, p, oldpos, newpos):
        """
        p: player.
        
        Mueve un jugador dentro del mapa, validando que newpos sea valida y
        si lo es, actualiza la pos del jugador y notifica al resto de los 
        pjs del mapa. En caso de pisar un tile exit cambia de mapa al jugador
        """

        if not self.validPos(newpos):
            raise gamerules.GameLogicError('Invalid pos')

        x, y = newpos

        self.setPos(oldpos[0], oldpos[1], None)
        self.setPos(x, y, p)
        p.pos = newpos

        for a in self.players:
            if a != p:
                a.cmdout.sendCharacterMove(p.chridx, x, y)

        # Tile Exit
        exit = self.mapFile[x, y].exit
        if exit is not None:
            m2, x2, y2 = exit
            debug_print("exit:", exit)
            p.map.playerLeave(p)
            p.pos = [x2, y2]
            corevars.mapData[m2].playerJoin(p)
    def sudo_command(self, sudo_command_string, password = None):
        '''
        Runs the command, then send the password via stdin
        Input: String some commandline string
        Output: output, error 
        '''
        util.debug_print('calling SSHWrapper sudo_command: ' + sudo_command_string)
        # add sudo to the command string
        if not sudo_command_string.strip().startswith('sudo'):
            sudo_command_string = 'sudo -S ' + sudo_command_string
        
        # run sudo command
        stdin, stdout, stderr = self.ssh.exec_command(sudo_command_string)

        # get password
        if password is None:
            password = os.environ.get('SSH_USER_PASSWORD') # we need it setup as environmental variable
            
        # sleep just to make sure it's good
        time.sleep(2)
        
        # give password
        stdin.write(password+'\n')
        stdin.flush()
        
        # get and return output
        stdout_data = stdout.readlines()
        stderr_data = stderr.readlines()
        
        return stdout_data, stderr_data
Example #5
0
    def playerJoin(self, p):
        debug_print("playerJoin", self.mapNum, p.pos)
        assert p.map is None, 'entrando a un mapa sin salir del actual'
        self.players.add(p)
        x, y = p.pos
        self.setPos(x, y, p)
        p.map = self
        p.cmdout.sendChangeMap(self.mapNum, self.mapFile.mapVers)
        d = p.getCharacterCreateAttrs()

        # notify all the players of this map
        for a in self.players:            
            a.cmdout.sendCharacterCreate(**d)
            # notify this player for all the others player
            if a != p:
                p.cmdout.sendCharacterCreate(**a.getCharacterCreateAttrs())

        # send the player his own char index
        p.sendUserCharIndexInServer()

        # send to user all this map objects
        # FIXME: pasar a sql
        mf = self.mapFile
        for y in xrange(1, MAP_SIZE_Y + 1):
            for x in xrange(1, MAP_SIZE_X + 1):
                obj = mf[x, y].objdata()
                if obj is not None:
                    p.cmdout.sendObjectCreate(x, y, obj.GrhIndex)                    
Example #6
0
    def connectionMade(self):
        debug_print("connectionMade")

        if not gameServer.connectionsLimitReached():
            gameServer.connectionMade(self)
        else:
            self.loseConnection()
def createIssue(issue):
	print "CREATING ISSUE: ", issue.issue, " in file: ", issue.fileName, " on line: ", issue.line, " with label: ", issue.label

	title = "*AutoIssue* " + issue.title
	body = issue.issue
	assignee = getOwner()
	labels = [] if issue.label is None else issue.label

	data = {"title" : title, "body" : body, "state" : "open", "labels" : labels}

	url = urljoin(API_URL, "/".join(["repos", getOwner(), getRepo(), "issues"]))
	url = url + "?access_token=" + getToken()

	util.debug_print("Issue create request url =", url)

	r = requests.post(url, data = json.dumps(data), headers = HEADERS)

	if r.status_code is requests.codes['created']:
		j = json.loads(r.text or r.content)
		print "Successfully created issue", j['number']
		return j['number']
	else:
		print "Something went wrong while attempting to create the issue on Github"
		print "{}:{}".format("Status", r.status_code)
		print r.text
Example #8
0
	def connect(self):
		hostserver = '127.0.0.1'
		hostport = '9221'
		password = '******'

		try:
			self._conn = telnetlib.Telnet(hostserver, hostport)
			msg = self._conn.read_until('Password: '******'Password: ')
					util.debug_print("[Kanraoke] rec: " + msg)
					self._send_cmd(password)
					return True
				except socket.error:
					pass
			return False
Example #9
0
    def connectionLost(self, reason):
        debug_print("connectionLost")
        self._ao_connLost = True

        # Todo el codigo de limpieza de la conexion debe ir en loseConnection.

        if not self._ao_closing:
            self.loseConnection()
Example #10
0
def draw_grid(data, tile_img, tiles):
    """Returns an image of a tile-based grid"""
    debug_print("drawing level", data)
    xsize = len(data[0]) * SIZE
    ysize = len(data) * SIZE
    img = Surface((xsize, ysize))
    for y, row in enumerate(data):
        for x, char in enumerate(row):
            rect = get_tile_rect(x, y)
            img.blit(tile_img, rect, tiles[char])
    return img
Example #11
0
 def npcJoin(self, n):
     debug_print("npcJoin", self.mapNum, n.pos, n.idx)
     self.npcs.add(n)
     x, y = n.pos
     self.setPos(x, y, n)
     n.map = self   
     
     d = n.getNpcCreateAttrs()
     
     for a in self.players:
         # Avisarle al resto del nuevo pj
         a.cmdout.sendCharacterCreate(**d) 
Example #12
0
    def playerLeave(self, p):
        debug_print("playerLeave", self.mapNum, p)

        p.map = None

        x, y = p.pos
        self.setPos(x, y, None)

        for a in self.players:
            a.cmdout.sendCharacterRemove(p.chridx)

        self.players.remove(p)
Example #13
0
def get_max_slavename(some_file_list, return_all = False):
    '''
    This function gets a list of slave names, one per line and either returns the "max" slave name
    Input: ['dlw-Slave2\n', 'dlw-Slave3\n']
    Output 'dlw-Slave3'
    '''
    util.debug_print('calling on util.get_max_slavename')
    
    # get max slave node name
    max_slave_name = ''
    max_slave_number = 0
    all_slaves_list = list()
    checker = re.compile(config.SLAVE_NAMING_REGEX)
    for line in some_file_list:
        matchobj = checker.match(line)
        if matchobj:
            
            # add to all slave list
            all_slaves_list.append(matchobj.group())
            
            # figure out max slavename
            line_slave_number = int(matchobj.group(1))
            if max_slave_number < line_slave_number:
                max_slave_number = line_slave_number
                max_slave_name = matchobj.group()
    
    if return_all:
        util.debug_print('util.get_max_slave is returning list of slaves:')
        util.debug_print(all_slaves_list)
        return all_slaves_list
    else:
        util.debug_print('util.get_max_slave is returning: ' + max_slave_name)
        return max_slave_name
Example #14
0
def save(context, metric_sets, partition, schema_hash):
    util.debug_print("\t{}:".format(partition))
    for schema_hash, dict in metric_sets[partition].iteritems():
        if util.time_remaining(context) <= (
                context[c.CW_ATTR_DELETE_DURATION] + 20):
            break
        columns = dict[c.KEY_SET_COLUMNS]
        if len(dict[c.KEY_SET]) == 0:
            continue
        values = dict[c.KEY_SET].values()
        set = pd.DataFrame(values, columns=columns)
        util.debug_print("\t\t{}:".format(schema_hash))
        path = create_file_path(partition, schema_hash,
                                context[c.KEY_SEPERATOR_PARTITION])
        util.debug_print("Writing to path '{}' with set:\n {}".format(
            path, set))
        elapsed = 0
        try:
            util.debug_print("Writing to partition '{}'".format(partition))

            write(context[c.KEY_METRIC_BUCKET], path, set,
                  context[c.KEY_SEPERATOR_PARTITION],
                  schema.object_encoding(columns))
            context[c.KEY_SUCCEEDED_MSG_IDS] += dict[c.KEY_MSG_IDS]
        except Exception as e:
            print "[{}]An error occured writing to path '{}'.\nSet: {}\nError: \n{}".format(
                context[c.KEY_REQUEST_ID], path, set, traceback.format_exc())
            raise e
        finally:
            number_of_rows = len(values)
            if c.INFO_TOTAL_ROWS not in context[c.KEY_AGGREGATOR].info:
                context[c.KEY_AGGREGATOR].info[c.INFO_TOTAL_ROWS] = 0
            context[c.KEY_AGGREGATOR].info[c.INFO_TOTAL_ROWS] += number_of_rows
            del set
            del columns
Example #15
0
    def npcLeave(self, n):
        debug_print("npcLeave", self.mapNum, n.pos, n.idx)
        self.npcs.remove(n)

        self.freeCharIdx(n.chridx)
        n.chridx = None

        n.map = None

        x, y = n.pos
        self.setPos(x, y, None)

        for a in self.players:
            a.cmdout.sendCharacterRemove(n.idx)
Example #16
0
def resolve_config(
        config_str: Optional[str]) -> Dict[str, Optional[Dict[str, Any]]]:
    """ resolves if config arg is a registry entry, a url, or a file, folder, or loads from defaults if None"""
    start_t = time.time()
    if config_str is None:
        config = load_config_from_local_path()
    elif config_str in RULES_REGISTRY:
        config = download_config(RULES_REGISTRY[config_str])
    elif is_url(config_str):
        config = download_config(config_str)
    else:
        config = load_config_from_local_path(config_str)
    if config:
        debug_print(f"loaded {len(config)} configs in {time.time() - start_t}")
    return config
Example #17
0
    def playerJoin(self, p):
        self._players.add(p)
        self._playersByName[p.playerName.lower()] = p

        chridx = self.nextCharIdx()
        self._playersByChar[chridx] = p
        p.chridx = chridx
        p.userIdx = chridx

        self._playersLoginCounter += 1

        if len(self._players) > self._playersMaxLoginsCount:
            self._playersMaxLoginsCount = len(self._players)

        debug_print("Nuevo jugador, chr:", chridx)
Example #18
0
    def playerJoin(self, p):
        self._players.add(p)
        self._playersByName[p.playerName.lower()] = p

        chridx = self.nextCharIdx()
        self._playersByChar[chridx] = p
        p.chridx = chridx
        p.userIdx = 1

        self._playersLoginCounter += 1

        if len(self._players) > self._playersMaxLoginsCount:
            self._playersMaxLoginsCount = len(self._players)

        debug_print("Nuevo jugador, chr:", chridx)
Example #19
0
def write(bucket, key, data, sep, object_encoding, append=False):   
    if data.empty:        
        raise RuntimeError( "[{}]An attempt to write an empty dataset has occurred.  The request dataset was: {}".format(error.Error.empty_dataframe(), data))    
    sensitivity_type = KeyParts(key, sep).sensitivity_level.lower()   
    s3 = s3fsmap[sensitivity_type]    
    s3_open = s3.open    
    size_before_dup_drop = len(data)
    data.drop_duplicates(inplace=True)        
    size_after_dup_drop = len(data)        
    if size_before_dup_drop - size_after_dup_drop > 0:
        print "{} duplicates have been dropped".format(size_before_dup_drop - size_after_dup_drop) 
    util.debug_print("Using object encoding {}".format(object_encoding))
    path='{}{}'.format(bucket,key)          
    pwrite(path, data, open_with=s3_open, compression='GZIP', append=append, has_nulls=True, object_encoding=object_encoding)        
    return path
Example #20
0
def find_strikes_min_max(all_strikes, strikeCount, stockPrice):
    index = 0
    all_strikes = np.sort(all_strikes)
    for p in all_strikes:
        if p < stockPrice:
            index += 1
        else:
            break
    strike_min_index = index - np.int(strikeCount / 2)
    if strike_min_index < 0:
        strike_min_index = 0
    strike_max_index = index + np.int(strikeCount / 2)
    if strike_max_index >= len(all_strikes):
        strike_max_index = len(all_strikes) - 1
    debug_print("min_max", all_strikes[strike_min_index],
                all_strikes[strike_max_index])
    return (all_strikes[strike_min_index], all_strikes[strike_max_index])
def createIssues(issues, debug = False):
	beforeIssues = getIssueNumberList()
	afterIssues = []

	if debug:
		print "Debug mode on. Not actually creating issues in repo"
	else:
		for issue in issues:
			if issue.issue_num is not None:
				afterIssues.append(issue.issue_num)
			else:
				number = createIssue(issue)
				# inject issue number tag into TODO comment
				injectNumber(issue, number)

		util.debug_print("before issues:\n", str(beforeIssues), "\nafter issues:\n", str(afterIssues))
		removeIssuesInDiff(beforeIssues, afterIssues)
Example #22
0
def findIssuesInFile(file):
    lineNumber = 0
    issueList = []

    with open(file, 'r') as f:
        data = f.readlines()

    debug_print("Searching for issues in:", file,
                "(lines: {})".format(len(data)))

    while lineNumber < len(data):
        issueString = ""
        if globals.startToken in data[lineNumber]:
            # TODO: change to check if // comes just before startToken. This will cover the case where the comment comes after code in the line. Also, handle this case.
            if data[lineNumber].strip().startswith("//"):
                startingLine = lineNumber
                issueString += data[lineNumber]
                lineNumber += 1
                while lineNumber < len(data):
                    line = data[lineNumber]
                    if line.strip():  # if the line is not empty
                        if line.startswith("//"):
                            issueString += line[2:]
                        else:
                            lineNumber -= 1  # since we increment outside of this loop
                            break
                    lineNumber += 1
            elif data[lineNumber].strip().startswith("/*"):
                startingLine = lineNumber
                issueString += data[lineNumber]
                if not issueString.strip().endswith("*/"):
                    lineNumber += 1
                    while lineNumber < len(data):
                        line = data[lineNumber]
                        if line.strip():
                            issueString += line
                            if line.strip().endswith("*/"):
                                break
                        lineNumber += 1
            else:
                lineNumber += 1
                break
            issueList.append(
                parseIssueFromRawComment(issueString, startingLine, file))
        lineNumber += 1
    return issueList
def createIssues(issues, debug=False):
    beforeIssues = getIssueNumberList()
    afterIssues = []

    if debug:
        print "Debug mode on. Not actually creating issues in repo"
    else:
        for issue in issues:
            if issue.issue_num is not None:
                afterIssues.append(issue.issue_num)
            else:
                number = createIssue(issue)
                # inject issue number tag into TODO comment
                injectNumber(issue, number)

        util.debug_print("before issues:\n", str(beforeIssues),
                         "\nafter issues:\n", str(afterIssues))
        removeIssuesInDiff(beforeIssues, afterIssues)
Example #24
0
def decode_apk(apk: Apk):
    '''
    decodes provided apk to a folder
    '''
    util.check_file_directory_exists(apk.apk_path, True)
    try:
        result = subprocess.check_output(['apktool', 'if',
                                          apk.apk_path]).decode()
        util.debug_print(result, flag=PRINT_FLAG)

        result = subprocess.check_output([
            'apktool', 'd', apk.apk_path, '-o',
            config.APK_FULL_PATH.split('.apk')[0], '-f'
        ]).decode()
        util.debug_print(result, flag=PRINT_FLAG)
    except subprocess.SubprocessError as error:
        print(error)
        raise ValueError("error decoding.")
Example #25
0
    def playerLeave(self, p):
        debug_print("playerLeave", self.mapNum, p)

        # save user data to sql
        db = sqlite3.connect('ServerResources/database.db')
        db.execute('UPDATE character SET map=?, x=?, y=?, heading=? WHERE name=?', (self.mapNum, p.pos[0], p.pos[1], p.heading, p.playerName))
        db.commit()
        db.close()

        p.map = None

        x, y = p.pos
        self.setPos(x, y, None)

        for a in self.players:
            a.cmdout.sendCharacterRemove(p.chridx)

        self.players.remove(p)
def findIssuesInFile(file):
    lineNumber = 0
    issueList = []

    with open(file, "r") as f:
        data = f.readlines()

    debug_print("Searching for issues in:", file, "(lines: {})".format(len(data)))

    while lineNumber < len(data):
        issueString = ""
        if globals.startToken in data[lineNumber]:
            # TODO: change to check if // comes just before startToken. This will cover the case where the comment comes after code in the line. Also, handle this case.
            if data[lineNumber].strip().startswith("//"):
                startingLine = lineNumber
                issueString += data[lineNumber]
                lineNumber += 1
                while lineNumber < len(data):
                    line = data[lineNumber]
                    if line.strip():  # if the line is not empty
                        if line.startswith("//"):
                            issueString += line[2:]
                        else:
                            lineNumber -= 1  # since we increment outside of this loop
                            break
                    lineNumber += 1
            elif data[lineNumber].strip().startswith("/*"):
                startingLine = lineNumber
                issueString += data[lineNumber]
                if not issueString.strip().endswith("*/"):
                    lineNumber += 1
                    while lineNumber < len(data):
                        line = data[lineNumber]
                        if line.strip():
                            issueString += line
                            if line.strip().endswith("*/"):
                                break
                        lineNumber += 1
            else:
                lineNumber += 1
                break
            issueList.append(parseIssueFromRawComment(issueString, startingLine, file))
        lineNumber += 1
    return issueList
Example #27
0
def persist_stock_price_history(symbols):
        m = mongo_api()
        (dest_data, start_date, end_date) = get_market_days()
        total_recs = 0

        for symbol in symbols:
            try:
                db_stock_df = m.read_df('stockhist', True, "datetime", [], {'symbol': {'$eq': symbol}} ,{"datetime":1})
                if db_stock_df is not None and db_stock_df.shape[0] > 0 and "datetime" in db_stock_df.columns:
                    db_stock_df["market_day"] = db_stock_df["datetime"].apply(lambda x: datetime.datetime(x.year, x.month, x.day, 0,0,0))
                    curr_data = set(db_stock_df["market_day"])
                    diff_date = np.array(list(dest_data - curr_data))
                else:
                    diff_date = np.array(list(dest_data))
                diff_date = np.sort(diff_date)
                #debug_print("Differentiated dates", len(diff_date))
                if len(diff_date) <=0:
                    continue
                m.deleteMany('stockhist', {'symbol': {'$eq': symbol}})
                start_datetime = datetime.datetime.strptime(start_date, '%Y-%m-%d')
                delta = (datetime.datetime.today() - start_datetime).days + 1
                option_params = "{\"resolution\" : \"D\", \"count\": " + str(delta) + "}"
                df = getResultByType('price_history', '2048', symbol, option_params)
                if df is None:
                    print("can't get result for symbol", symbol)
                    continue
                df["datetime"] = df.t.apply(lambda x: Td.convert_timestamp_to_time(x, 's'))
                df["symbol"] = symbol
                #df = df.sort_values(['datetime'])
                # make sure we get the same shape
                df = df.sort_values('datetime',ascending=True)
                market_day = df.datetime.apply(
                    lambda x: datetime.datetime(x.year, x.month, x.day, 0, 0, 0))
                if (len(set(market_day)) < len(dest_data)):
                    print("length diff", symbol, len(market_day), len(dest_data))
                debug_print("read stock history", df.shape)
                diff_ts = []
                for d in diff_date:
                    diff_ts.append((np.datetime64(d) - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's'))
                df["ts"] = df.datetime.apply(lambda x: (np.datetime64(x.strftime('%Y-%m-%dT00:00:00Z')) - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's'))
                debug_print("df.ts", df.ts)
                debug_print("diff_ts", diff_ts)
                df = df[df["ts"].isin(diff_ts)]
                debug_print("df.shape after filter", df.shape)
                if df.shape[0] > 0:
                    m.write_df(df, 'stockhist')
                    total_recs += df.shape[0]
                else:
                    total_recs += 0
            except KeyError:
                print("error when persist stock price history")
                continue
                return 0
        return total_recs
def write_cloudwatch_metrics(context, save_duration, delete_duration):    
    cw = context[c.KEY_CLOUDWATCH]    
    cw.put_metric_data(util.get_cloudwatch_namespace(context[c.KEY_LAMBDA_FUNCTION]),
                [
                {
                    "MetricName":c.CW_METRIC_NAME_PROCESSED,
                    "Dimensions":[{'Name':c.CW_METRIC_DIMENSION_NAME_CONSUMER, 'Value': c.CW_METRIC_DIMENSION_ROWS}],
                    "Timestamp":datetime.datetime.utcnow(),
                    "Value":context[c.KEY_AGGREGATOR].rows,                                
                    "Unit":'Count'                                      
                },
                {
                    "MetricName":c.CW_METRIC_NAME_PROCESSED,
                    "Dimensions":[{'Name':c.CW_METRIC_DIMENSION_NAME_CONSUMER, 'Value': c.CW_METRIC_DIMENSION_BYTES}],
                    "Timestamp":datetime.datetime.utcnow(),
                    "Value":context[c.KEY_AGGREGATOR].bytes_uncompressed,                                
                    "Unit":'Bytes'                    
                },
                {
                    "MetricName":c.CW_METRIC_NAME_PROCESSED,
                    "Dimensions":[{'Name':c.CW_METRIC_DIMENSION_NAME_CONSUMER, 'Value': c.CW_METRIC_DIMENSION_MSG}],
                    "Timestamp":datetime.datetime.utcnow(),
                    "Value":context[c.KEY_AGGREGATOR].messages,                                
                    "Unit":'Count'                    
                },
                {
                    "MetricName":c.CW_METRIC_NAME_PROCESSED,
                    "Dimensions":[{'Name':c.CW_METRIC_DIMENSION_NAME_CONSUMER, 'Value': c.CW_METRIC_DIMENSION_SAVE}],
                    "Timestamp":datetime.datetime.utcnow(),
                    "Value":save_duration,                                
                    "Unit":'Seconds'                    
                },
                {
                    "MetricName":c.CW_METRIC_NAME_PROCESSED,
                    "Dimensions":[{'Name':c.CW_METRIC_DIMENSION_NAME_CONSUMER, 'Value': c.CW_METRIC_DIMENSION_DEL}],
                    "Timestamp":datetime.datetime.utcnow(),
                    "Value":delete_duration,                                
                    "Unit":'Seconds'                    
                },
                ]
            )    
    if context.get(c.KEY_WRITE_DETAILED_CLOUDWATCH_EVENTS, False):
        util.debug_print("Sending detailed CloudWatch events")
        write_detailed_cloud_watch_event_logs(context, cw)
Example #29
0
 def subprocess_call_get(self, value, namespace: Namespace):
     '''
         sets a value. for example,
         >>> adb -s emulator_name shell settings get system values:
     '''
     # adb shell settings get system accelerometer_rotation
     options = [
         ADB, '-s', self.emulator_name, 'shell', 'settings', 'get',
         namespace.value
     ]
     values = value.split(' ')
     options = options + values
     util.debug_print(options, flag=PRINT_FLAG)
     try:
         value = subprocess.check_output(options).decode().strip()
         print(value)
         return value
     except subprocess.CalledProcessError as exception:
         print(exception)
Example #30
0
def read_one_stock_quote(symbol, currDate, projection):
    m = mongo_api()
    date_filter = construct_day_filter(currDate)
    assert (type(projection) == str)
    try:
        db_df = m.read_df('stockcandles', False, [projection], [],
                          {'$and': [{
                              'symbol': {
                                  '$eq': symbol
                              }
                          }, date_filter]}, None)
        if db_df.shape is not None and db_df.shape[0] >= 1:
            debug_print("found price for ", currDate, " at ", db_df.iloc[0, 1])
            return db_df.iloc[0, 1]
        else:
            print("can't find specific symbol", symbol)
            return None
    except KeyError:
        print("error when reading single stock price history")
        return None
Example #31
0
def start_emulator() -> bool:
    '''
        starts emulator
    '''
    command = config.EMULATOR
    global emulator_model
    if emulator_manager.adb_instances_manager():
        util.debug_print('already emulators are running.', flag=PRINT_FLAG)
        return True
    else:
        util.debug_print(
            str.format("No emulator instance running. starting {} at port {}",
                       emulator_model, emulator_port), flag=PRINT_FLAG)
        api_commands.adb_start_server_safe()
        emulator_manager.emulator_start_avd(emulator_port, emulator_model)
        # subprocess.Popen([command,
        #                   '-port', str(emulator_port), '-avd',
        #                   emulator_name, '-use-system-libs'],
        #                  stdout=subprocess.PIPE)
        emulator_manager.check_avd_booted_completely(emulator_port)
        return True
Example #32
0
def high_state(threshold, high_threshold, patience):
    global g_patience
    global g_state
    util.debug_print('HIGH state with patience: ' + str(patience))
    load = compute_cluster_load()

    while load > high_threshold:
        if load > threshold:
            patience -= 2
            log_load(load, 'very high', patience)
            g_state = 'very high'
        else:
            patience -= 1
            log_load(load, 'high', patience)
            g_state = 'high'

        g_patience = patience

        time.sleep(SLEEP)
        util.debug_print('patience: ' + str(patience) + ',\tload: ' +
                         str(load))
        if patience < 0:
            util.debug_print('patience is 0, UPSIZE!')
            upsize_cluster()
            time.sleep(BIG_SLEEP)
            break

        load = compute_cluster_load()

    return patience
Example #33
0
def try_with_backoff(context, cmd, **kwargs):
    # http://www.awsarchitectureblog.com/2015/03/backoff.html
    backoff = context[
        c.
        KEY_BACKOFF_BASE_SECONDS] if c.KEY_BACKOFF_BASE_SECONDS in context else 0.1
    max_seconds = context[
        c.
        KEY_BACKOFF_MAX_SECONDS] if c.KEY_BACKOFF_MAX_SECONDS in context else 20.0
    max_retry = context[
        c.KEY_BACKOFF_MAX_TRYS] if c.KEY_BACKOFF_MAX_TRYS in context else 5
    count = 1
    while True:
        try:
            response = cmd(**kwargs)
            __check_response(response)
            util.debug_print("{}\n{}".format(cmd, kwargs))
            util.debug_print(response)
            return response
        except ClientError as e:
            __print_error(e, cmd, kwargs)
            if e.response['Error']['Code'] == 'ValidationException':
                raise e
        except Exception as general_error:
            __print_error(general_error, cmd, kwargs)

        backoff = min(max_seconds, random.uniform(max_seconds, backoff * 3.0))
        util.debug_print("Backing off for {}s".format(backoff))
        time.sleep(backoff)
        count += 1
        if count > max_retry:
            print response
            raise Exception("Max retry attempts have been made")
Example #34
0
def persist_option_dirs(dirName, symbols, pattern):
    debug_print("reading", dirName)
    m = mongo_api()
    num_stats_rec_inserted = 0
    for (root, dirs, files) in os.walk(dirName, topdown=True):
        if root == 'historical':
            continue
        files = np.sort(files)
        #print("len files", len(files))
        for fi in files:
            (file_type, d_cur, curr_symbol,
             fi) = determine_file_origin(root, fi)
            #print("file origin", file_type)
            if pattern is not None and fi.find(pattern) == -1:
                #print("file not in pattern", fi)
                continue

            if curr_symbol not in symbols:
                debug_print("skipped symbol ", curr_symbol)
                continue
            if file_type == 'TDA':
                print("processing:", fi)
                df = pd.read_csv(fi)
                x = persist_td_option_file(df, curr_symbol, d_cur, m)
            elif file_type == 'OPTIONISTICS':
                print("processing:", fi)
                df = pd.read_csv(fi)
                x = persist_optionistics_file(df, curr_symbol, d_cur, m)
            else:
                debug_print("skipping", fi)
            num_stats_rec_inserted += x
    print("number of records inserted to optionhist", num_stats_rec_inserted)
def high_state(threshold, high_threshold, patience):
    global g_patience
    global g_state
    util.debug_print('HIGH state with patience: ' + str(patience))
    load = compute_cluster_load()

    while load > high_threshold:
        if load > threshold:
            patience -= 2
            log_load(load, 'very high', patience)
            g_state = 'very high'
        else:
            patience -= 1
            log_load(load, 'high', patience)
            g_state = 'high'

        g_patience = patience

        time.sleep(SLEEP)
        util.debug_print('patience: ' + str(patience) + ',\tload: ' + str(load))
        if patience < 0:
            util.debug_print('patience is 0, UPSIZE!')
            upsize_cluster()
            time.sleep(BIG_SLEEP)
            break
        
        load = compute_cluster_load()

    return patience
Example #36
0
def main():
    print_instructions()
    while True:
        try:
            expr = input("Enter expression: \t")
            util.debug_print("Expression:\t\t" + expr, 1)
            if expr == "quit":
                exit_ui(0)
            # apply replacements before syntax check
            expr = syn.modify_with_replacements(expr)

            if not syn.has_correct_syntax(expr):
                print(NEWLINE + ERROR_SYNTAX + NEWLINE + expr + NEWLINE)
                continue
            expr = syn.modify_input(expr)
            util.debug_print("Syntax mod:\t\t" + expr, 1)
            print("Derivative:\t\t" + derive_ui(expr) + NEWLINE)
        except KeyboardInterrupt:
            exit_ui(1)
        except Exception as e:
            print(e)
            exit_ui(1)
Example #37
0
def _evaluate_expression(
    expression: BooleanRuleExpression,
    results: Dict[PatternId, List[SgrepRange]],
    ranges_left: Set[Range],
    flags: Optional[Dict[str, Any]] = None,
) -> Set[Range]:
    if (
        expression.operator == OPERATORS.AND_EITHER
        or expression.operator == OPERATORS.AND_ALL
    ):
        assert (
            expression.children is not None
        ), f"{pattern_name_for_operator(OPERATORS.AND_EITHER)} or {pattern_name_for_operator(OPERATORS.AND_ALL)} must have a list of subpatterns"

        # recurse on the nested expressions
        if expression.operator == OPERATORS.AND_EITHER:
            # remove anything that does not equal one of these ranges
            evaluated_ranges = [
                _evaluate_expression(expr, results, ranges_left.copy(), flags)
                for expr in expression.children
            ]
            ranges_left.intersection_update(flatten(evaluated_ranges))
        else:
            # chain intersection eagerly; intersect for every AND'ed child
            for expr in expression.children:
                remainining_ranges = _evaluate_expression(
                    expr, results, ranges_left.copy(), flags
                )
                ranges_left.intersection_update(remainining_ranges)

        debug_print(f"after filter `{expression.operator}`: {ranges_left}")
    else:
        assert (
            expression.children is None
        ), f"only `{pattern_name_for_operator(OPERATORS.AND_EITHER)}` or `{pattern_name_for_operator(OPERATORS.AND_ALL)}` expressions can have multiple subpatterns"
        ranges_left = _evaluate_single_expression(
            expression, results, ranges_left, flags
        )
    return ranges_left
Example #38
0
    def set_queue_url(self, lowest_load_queue):
        if len(self.__queue_urls) == 0:
            queues = self.get_queues()
        else:
            queues = self.__queue_urls

        message_count = c.MAX_INT if lowest_load_queue else 0
        idx_to_use = 0
        is_all_under_load = True
        idx = 0
        self.__number_of_queues = len(queues)
        for queue_url in queues:
            response = self.get_queue_attributes(queue_url)
            messages_to_process = int(
                response['Attributes']['ApproximateNumberOfMessages'])
            inflight_messages = int(response['Attributes']
                                    ['ApproximateNumberOfMessagesNotVisible'])
            if lowest_load_queue:
                #find the least stressed queue based on number of messages due to be processed
                if messages_to_process < message_count:
                    message_count = messages_to_process
                    idx_to_use = idx
            else:
                #find the most stressed queue based on number of messages due to be processed that is not above 50% inflight messages
                if messages_to_process > message_count and inflight_messages < self.__context[
                        c.KEY_FIFO_GROWTH_TRIGGER]:
                    message_count = messages_to_process
                    idx_to_use = idx
            util.debug_print((
                "Queue '{}' has {} in-flight messages and has {} messages to process."
            ).format(queue_url, inflight_messages, messages_to_process))

            is_all_under_load &= (inflight_messages >
                                  self.__context[c.KEY_FIFO_GROWTH_TRIGGER])
            idx += 1

        self.__is_all_under_load = is_all_under_load
        self.__queue_urls = queues
        self.__queue_url = queues[idx_to_use]
Example #39
0
def message(request, compression_mode, sensitivity_type, payload_type, data):    
    p_lambda = Lambda({})    
    print "Target lambda {}".format(os.environ[c.ENV_LAMBDA_PRODUCER])
    util.debug_print("Invoking lambda {} with payload size: {} Compression mode: {}, Sensitivity Type: {}, Payload Type: {}".format(os.environ[c.ENV_LAMBDA_PRODUCER], len(data), compression_mode, sensitivity_type,  payload_type))
    payload_data = {c.API_PARAM_SOURCE_IP:request.event[c.API_PARAM_SOURCE_IP], c.SQS_PARAM_SENSITIVITY_TYPE:  sensitivity_type, c.SQS_PARAM_PAYLOAD_TYPE:  payload_type, c.SQS_PARAM_COMPRESSION_TYPE:  compression_mode, c.API_PARAM_PAYLOAD : data }       
    
    response = p_lambda.invoke_sync(os.environ[c.ENV_LAMBDA_PRODUCER], payload_data)        
    
    sb = response['Payload']
    sb = json.loads(sb.read().decode("utf-8"))
    error = sb.get('errorMessage', None)
    
    returnObj={        
        'StatusCode': response['StatusCode'],        
        'PhysicalResourceId': os.environ[c.ENV_LAMBDA_PRODUCER]    
    }

    if error and len(error) > 0:
        print "Error:", sb
        raise errors.ClientError("An error occurred invoking the SQS event producer.  Please check the cloud watch logs.");

    return returnObj
Example #40
0
def insert_one_symbol(df, s, m, df_in_optionstats):
    debug_print("persist for symbol", s)
    df_cur_symbol = df.loc[df.UnderlyingSymbol == s]
    df_cur_symbol_in_optionstats = df_in_optionstats.loc[
        df_in_optionstats["symbol"] == s]
    dates_stats = find_dates(df_cur_symbol, s, df_cur_symbol_in_optionstats)
    df_stats_out = None
    num_d = 1
    for d in dates_stats:
        d_datetime = pd.to_datetime(d)
        cur_df = df_cur_symbol.loc[df_cur_symbol.data_date == d_datetime]
        stock_closing_price = np.min(cur_df["UnderlyingPrice"])
        cur_df = filter_df_by_count(cur_df, 30, stock_closing_price)
        df_cur_stat = computeOptionHist(cur_df, s)
        if df_cur_stat.shape[0] > 1:
            print("wrong duplicates", df_cur_stat, d, s)
            exit(1)
        df_stats_out = append_df(df_stats_out, df_cur_stat)
        num_d += 1
    if df_stats_out is not None and df_stats_out.shape[0] > 0:
        return df_stats_out
    return None
Example #41
0
def adb_start_server_safe():
    '''
    checks if `adb server` is running. if not, starts it.
    '''
    try:
        status = subprocess.check_output(['pidof', ADB])
        util.debug_print('adb already running in PID: ' + status.decode(),
                         flag=PRINT_FLAG)
        return True
    except subprocess.CalledProcessError as exception:
        print('adb is not running, returned status: ' +
              str(exception.returncode))

        print('adb was not started. starting...')

        try:
            subprocess.check_output([ADB, 'start-server'])

            return True
        except subprocess.SubprocessError as exception:
            print('something disastrous happened. maybe ' + ADB +
                  ' was not found')
            return False
Example #42
0
def threads_to_run(emulator: Emulator, apk: Apk, fuzz: Fuzzer, will_monkey: bool) -> List:
    '''
        runs the threads after checking permissions.
    '''
    threads = []
    global contextual_events
    util.debug_print(apk.permissions, flag=PRINT_FLAG)
    emulator_name = 'emulator-' + emulator.port
    if "android.permission.INTERNET" in apk.permissions or \
            "android.permission.ACCESS_NETWORK_STATE" in apk.permissions:
        util.debug_print("Internet permission detected", flag=PRINT_FLAG)
        network_delay_interval_events = fuzz.generate_step_interval_event(
            NetworkDelay)
        contextual_events += len(network_delay_interval_events)
        threads.append(Thread(target=fuzz.random_network_delay, args=(
            config.LOCALHOST, emulator, network_delay_interval_events)))
        network_speed_interval_event = fuzz.generate_step_interval_event(
            NetworkStatus)
        contextual_events += len(network_speed_interval_event)
        threads.append(Thread(target=fuzz.random_network_speed, args=(
            config.LOCALHOST, emulator, network_speed_interval_event)))
        airplane_mode_interval_events = fuzz.generate_step_interval_event(
            Airplane)
        contextual_events += len(airplane_mode_interval_events)
        threads.append(Thread(
            target=fuzz.random_airplane_mode_call,
            args=(emulator_name, airplane_mode_interval_events)))
    if "android.permission.ACCESS_NETWORK_STATE" in apk.permissions:
        util.debug_print("access_network_state detected", flag=PRINT_FLAG)
        gsm_profile_interval_events = fuzz.generate_step_uniforminterval_event(
            GsmProfile)
        contextual_events += len(gsm_profile_interval_events)
        threads.append(Thread(target=fuzz.random_gsm_profile, args=(
            config.LOCALHOST, emulator, config.UNIFORM_INTERVAL, gsm_profile_interval_events)))

    user_rotation_interval_events = fuzz.generate_step_interval_event(
        UserRotation)
    contextual_events += len(user_rotation_interval_events)
    threads.append(Thread(
        target=fuzz.random_rotation, args=((emulator_name, user_rotation_interval_events))))

    key_event_interval_events = fuzz.generate_step_interval_event(
        KeyboardEvent)
    contextual_events += len(key_event_interval_events)
    threads.append(Thread(
        target=fuzz.random_key_event, args=((emulator_name, key_event_interval_events))))
    if will_monkey:
        monkey = AdbMonkey(emulator, apk,
                           config.SEED, config.DURATION)
        thread_monkey = Thread(target=monkey.start_monkey)

        threads.append(thread_monkey)
    return threads
Example #43
0
def simplify_sub(expr):
    util.debug_print("Simplify:\t" + expr, 10)
    expr_simp = ""
    parts_split = lowest_precedence_operator_split(expr)
    util.debug_print("After lowest Precedence operator split:", 100)
    util.debug_print(parts_split, 100)
    parts = []
    parts_plus = []
    parts_minus = []
    op = ""

    # if no split could be made, there are brackets, functions, or there is nothing to simplify
    if not len(parts_split) == 2:
        # look for brackets
        for bracket in BRACKETS:
            if expr[0] == bracket:
                expr_simp += bracket + simplify_sub(
                    expr[1:-1]) + BRACKETS.get(bracket)
                return expr_simp
        # look for functions
        elem_func = util.get_elem_func(expr, 0)
        if elem_func != "":
            expr_simp += elem_func + "{" + simplify_sub(
                expr[len(elem_func) + 1:-1]) + "}"
            return expr_simp
        # if also no functions could be found, there is nothing to simplify
        return expr

    op = parts_split[0]
    parts = parts_split[1]
    if op == "+" or op == "-":
        parts_plus = parts[0]
        parts_minus = parts[1]
        parts_plus, parts_minus = simplify_plus_minus_parts(
            parts_plus, parts_minus)
        for part in parts_plus:
            expr_simp += simplify_sub(part) + '+'
        # remove last plus sign
        expr_simp = expr_simp[:-1]
        for part in parts_minus:
            expr_simp += '-' + simplify_sub(part)
    else:
        if op == "*":
            parts = simplify_multiplication_parts(parts)
        elif op == "^":
            parts = simplify_potential_parts(parts)
        for part in parts:
            expr_simp += simplify_sub(part) + op
        # remove last operator
        expr_simp = expr_simp[:-len(op)]
    return expr_simp
Example #44
0
    def handleData(self, prot):
        buf = prot._ao_inbuff
        cmd = None

        """
        Los comandos consumen los bytes del buffer de entrada, si faltan
        datos se dispara un CommandsDecoderException o ByteQueueError.
        """

        try:
            try:
                while len(buf) > 0:
                    cmd = buf.peekInt8()

                    if cmd < 0 or cmd >= len(self.cmds):
                        debug_print("cmd out of range:", cmd)
                        raise CriticalDecoderException()

                    if self.cmds[cmd] is None:
                        debug_print("cmd not implemented:", cmd, \
                            "should be:", clientPacketsFlip.get(cmd, '?'))
                        raise CriticalDecoderException()

                    # Marca la posicion actual por si hay que hacer rollback.
                    buf.mark()

                    # Invoca al handler del comando cmd.
                    self.cmds[cmd](prot, buf)


                # La operacion commit() destruye los datos del buffer,
                # por lo que para llamarla tengo que estar seguro
                # que se leyeron todos los datos del comando actual.
                #
                # Llamarla al final de cada comando puede llegar a ser
                # lento si hay varios comandos encolados.

                buf.commit()

                prot.lastHandledPacket = time.time()

            except:
                buf.rollback()
                raise
        except ByteQueueError, e:
            pass
            debug_print("ByteQueueError", e)
Example #45
0
    def handleData(self, prot):
        buf = prot._ao_inbuff
        cmd = None
        """
        Los comandos consumen los bytes del buffer de entrada, si faltan
        datos se dispara un CommandsDecoderException o ByteQueueError.
        """

        try:
            try:
                while len(buf) > 0:
                    cmd = buf.peekInt8()

                    if cmd < 0 or cmd >= len(self.cmds):
                        debug_print("cmd out of range:", cmd)
                        raise CriticalDecoderException()

                    if self.cmds[cmd] is None:
                        debug_print("cmd not implemented:", cmd, \
                            "should be:", clientPacketsFlip.get(cmd, '?'))
                        raise CriticalDecoderException()

                    # Marca la posicion actual por si hay que hacer rollback.
                    buf.mark()

                    # Invoca al handler del comando cmd.
                    self.cmds[cmd](prot, buf)

                # La operacion commit() destruye los datos del buffer,
                # por lo que para llamarla tengo que estar seguro
                # que se leyeron todos los datos del comando actual.
                #
                # Llamarla al final de cada comando puede llegar a ser
                # lento si hay varios comandos encolados.

                buf.commit()

                prot.lastHandledPacket = time.time()

            except:
                buf.rollback()
                raise
        except ByteQueueError, e:
            pass
            debug_print("ByteQueueError", e)
def low_state(low_threshold, patience):
    global g_patience
    util.debug_print('LOW state with patience: ' + str(patience))
    load = compute_cluster_load()
    while load < low_threshold:
        patience -= 1
        log_load(load, 'low', patience)
        g_patience = patience
        time.sleep(SLEEP)
        
        util.debug_print('patience: ' + str(patience) + ',\tload: ' + str(load))
        if patience < 0:
            util.debug_print('patience is 0, downsize!')
            downsize_cluster()
            time.sleep(BIG_SLEEP)
            break

        load = compute_cluster_load()

    return patience
Example #47
0
def low_state(low_threshold, patience):
    global g_patience
    util.debug_print('LOW state with patience: ' + str(patience))
    load = compute_cluster_load()
    while load < low_threshold:
        patience -= 1
        log_load(load, 'low', patience)
        g_patience = patience
        time.sleep(SLEEP)

        util.debug_print('patience: ' + str(patience) + ',\tload: ' +
                         str(load))
        if patience < 0:
            util.debug_print('patience is 0, downsize!')
            downsize_cluster()
            time.sleep(BIG_SLEEP)
            break

        load = compute_cluster_load()

    return patience
Example #48
0
	def _send_cmd(self, cmd):
		self._conn.write(cmd + NL)
		util.debug_print("[Kanraoke] send: " + cmd)
		msg = self._conn.read_until('> ')
		util.debug_print("[Kanraoke] rec: " + msg)
		return msg[:-3]
Example #49
0
from typing import Tuple

import sha1
import util
from s4c28 import keyed_mac

debug = util.debug_print(False)

KEY = util.random_word()


def to_registers(hexdigest) -> Tuple:
    # TODO: needs tests
    n = int(hexdigest, 16)

    result = []
    for i in range(5):
        r = n & 0xFFFFFFFF
        result.append(r)
        n >>= 32

    return tuple(reversed(result))


def length_extension_attack(original_message: bytes, existing_hash: str,
                            guessed_length: int):
    extra = b";admin=true"
    glue_padding = compute_glue_padding(original_message, guessed_length)
    new_msg = original_message + glue_padding + extra

    prefix_length = guessed_length + len(original_message) + len(glue_padding)
Example #50
0
    def do_execute_direct(self, code, silent=False):
        """
        :param code:
            The code to be executed.
        :param silent:
            Whether to display output.
        :return:
            Return value, or None

        MetaKernel code handler.
        """

        if self.toreeProfile is None:
            debug_print('do_execute_direct: Not connected to a Toree instance')
            return 'Notebook is offline, due to no resource availability on the server. Please try again later or contact an Administrator'

        if not self.toreeClient.is_alive():
            debug_print('do_execute_direct: Kernel client is not alive')
            return 'Not connected to a Kernel'

        if code is None or code.strip() is None:
            return None

        if not code.strip():
            return None

        if not self.isReady:
            retries = self.configManager.get_as_int(
                'toree.initialization.retries', 3)
            retry_interval = self.configManager.get_as_int(
                'toree.initialization.retry.interval', 5)

            debug_print(
                'Trying to verify Toree has been initialized with options: retries %d times / retry interval %d seconds'
                % (retries, retry_interval))
            n = 1
            while n <= retries:
                debug_print('Trying to connect to remote Toree for %d time' %
                            n)
                if self.toreeClient.is_ready():
                    self.isReady = True
                    break
                else:
                    time.sleep(retry_interval)

                n += 1

            if not self.isReady:
                return 'Kernel is not ready to process yet'

        debug_print('Evaluating: ' + code.strip())

        retval = None
        try:
            retval = self.toreeClient.eval(code.rstrip(),
                                           self.executionTimeout)
        except Exception as exc:
            # this should be final solution
            # return ExceptionWrapper(exc)
            self.Error(exc)

        if retval is None:
            return
        elif isinstance(retval, HtmlOutput):
            self.Display(HTML(retval.output))
            return
        elif isinstance(retval, str):
            return TextOutput(retval)
        else:
            return retval
def main(threshold):
    global g_patience
    global g_state

    high_threshold = HIGH_STATE_PERCENTAGE * threshold
    low_threshold = LOW_STATE_PERCENTAGE * threshold
    patience = INITIAL_PATIENCE
    util.debug_print('high_threshold: ' + str(high_threshold))
    util.debug_print('low_threshold: ' + str(low_threshold))

    last_state = None

    init_log(BALANCER_LOG_FILE_PATH)

    util.debug_print('Starting the main load balancer loop...')
    while True:
        load = compute_cluster_load()
        util.debug_print('current load is: ' + str(load))
        util.debug_print('last_state: ' + str(last_state))
        if load < low_threshold:
            if last_state != 'low':
                last_state = 'low'
                patience = INITIAL_PATIENCE
            patience = low_state(low_threshold, patience)
        elif load > high_threshold:
            if last_state != 'high':
                last_state = 'high'
                patience = INITIAL_PATIENCE
            patience = high_state(threshold, high_threshold, patience)
        else:
            util.debug_print('GOOD state with patience: ' + str(patience))
            patience += 1
            time.sleep(SLEEP)
            util.debug_print('patience: ' + str(patience) + ',\tload: ' + str(load))
            log_load(load, 'good', patience)
            
        if patience > INITIAL_PATIENCE or patience <= 0:
            patience = INITIAL_PATIENCE
Example #52
0
                prot.lastHandledPacket = time.time()

            except:
                buf.rollback()
                raise
        except ByteQueueError, e:
            pass
            debug_print("ByteQueueError", e)
        except ByteQueueInsufficientData, e:
            pass
        except CommandsDecoderException, e:
            pass
            # debug_print("CommandsDecoderException")
        except CriticalDecoderException, e:
            if cmd is not None:
                debug_print("CriticalDecoderException", cmd, \
                    clientPacketsFlip.get(cmd, '?'), e)
            raise
        except Exception, e:
            debug_print("handleData Exception: ", e)
            raise

    def CheckLogged(fOrig):
        """Decorator para verificar que el usuario esta logeado"""
        def fNew(self, prot, buf):
            if prot.player is None:
                raise CriticalDecoderException()
            return fOrig(self, prot, buf, prot.player)
        return fNew

    def CheckNotLogged(fOrig):
        """Decorator para verificar que el usuario no esta logeado"""
Example #53
0
def _evaluate_single_expression(
    expression: BooleanRuleExpression,
    results: Dict[PatternId, List[SgrepRange]],
    ranges_left: Set[Range],
    flags: Optional[Dict[str, Any]] = None,
) -> Set[Range]:

    assert expression.pattern_id, f"<internal error: expected pattern id: {expression}>"
    results_for_pattern = [
        x.range for x in results.get(expression.pattern_id, [])
    ]

    if expression.operator == OPERATORS.AND:
        # remove all ranges that don't equal the ranges for this pattern
        return ranges_left.intersection(results_for_pattern)
    elif expression.operator == OPERATORS.AND_NOT:
        # remove all ranges that DO equal the ranges for this pattern
        # difference_update = Remove all elements of another set from this set.
        return ranges_left.difference(results_for_pattern)
    elif expression.operator == OPERATORS.AND_INSIDE:
        # remove all ranges (not enclosed by) or (not equal to) the inside ranges
        output_ranges = set()
        for arange in ranges_left:
            for keep_inside_this_range in results_for_pattern:
                is_enclosed = keep_inside_this_range.is_enclosing_or_eq(arange)
                # print(
                #    f'candidate range is {arange}, needs to be `{operator}` {keep_inside_this_range}; keep?: {keep}')
                if is_enclosed:
                    output_ranges.add(arange)
                    break  # found a match, no need to keep going
        debug_print(f"after filter `{expression.operator}`: {output_ranges}")
        return output_ranges
    elif expression.operator == OPERATORS.AND_NOT_INSIDE:
        # remove all ranges enclosed by or equal to
        output_ranges = ranges_left.copy()
        for arange in ranges_left:
            for keep_inside_this_range in results_for_pattern:
                if keep_inside_this_range.is_enclosing_or_eq(arange):
                    output_ranges.remove(arange)
                    break
        debug_print(f"after filter `{expression.operator}`: {output_ranges}")
        return output_ranges
    elif expression.operator == OPERATORS.WHERE_PYTHON:
        if not flags or flags[RCE_RULE_FLAG] != True:
            print_error_exit(
                f"at least one rule needs to execute arbitrary code; this is dangerous! if you want to continue, enable the flag: {RCE_RULE_FLAG}"
            )
        assert expression.operand, "must have operand for this operator type"

        output_ranges = set()
        # Look through every range that hasn't been filtered yet
        for sgrep_range in list(flatten(results.values())):
            # Only need to check where-python clause if the range hasn't already been filtered

            if sgrep_range.range in ranges_left:
                debug_print(
                    f"WHERE is {expression.operand}, metavars: {sgrep_range.metavars}"
                )
                if _where_python_statement_matches(expression.operand,
                                                   sgrep_range.metavars):
                    output_ranges.add(sgrep_range.range)
        debug_print(f"after filter `{expression.operator}`: {output_ranges}")
        return output_ranges

    else:
        raise NotImplementedError(f"unknown operator {expression.operator}")
Example #54
0
def removeDecommissionedMachine(slaveName = None):
    '''
    Destroy decommissioned machines from the cluster and removes traces from excludes and slaves file
    INPUT: String slaveName (optional)
    OUTPUT: boolean (True if successful, False otherwise)
    '''
    util.debug_print('calling downsize removeDecommissionedMachine()')
    
    # if not set, then get from excludes list
    if slaveName is None:
        util.debug_print('not slaveName passed as parameter')
        # get the excludes file from master
        excludes_file_content = util.get_file_content(config.DEFAULT_DESTINATION_EXCLUDES_FILENAME)
        
        # no magic, just get last one
        if len(excludes_file_content) > 0:
            slaveName = excludes_file_content[-1].strip()
        else:
            util.debug_print('no slavename passed in as argument AND we got empty slaves file!')
            return False
        
    # remove that slavename from excludes
    remove_line = slaveName + "\n"
    util.debug_print('removing from excludes file the line: ' + remove_line)
    update_excludes = util.updateFile('excludes', remove_line, addLine = False)
        
    # remove that name from slaves file
    util.debug_print('removing from slaves file the line: ' + str(remove_line))
    update_slaves = util.updateFile('slaves', remove_line, addLine = False)
    
    # get vmid from slaveName
    vmid = util.get_vm_id_by_name(slaveName)
    
    # NOW deestroy vm
    util.debug_print('Now we will be trying to destroy the machine with ID: ' + str(vmid))
    result = api.destroyVirtualMachine({'id': vmid})
    
    util.debug_print('waiting for the destroyed machine to be finished being destroyed')
    waitResult = util.waitForAsync(result.get('jobid'))
    
    # since we destroyed the vm, we can remove from master's /etc/hosts file
    hosts = util.get_file_content(config.DEFAULT_DESTINATION_HOSTS_FILENAME)
    checker = re.compile('.*' + slaveName + '\n')
    to_be_removed_hosts_line = [line for line in hosts if checker.match(line) is not None]
    util.debug_print('remove line:' + str(to_be_removed_hosts_line) + ' from /etc/hosts file')
    util.updateFile('hosts', to_be_removed_hosts_line[0], addLine = False)

    util.debug_print('Done destroying VM.')    
    return True
Example #55
0
def decommission(also_stop_vm = True):
    '''
    This function basically copies slave names from slaves list to excludes list and run refresh scripts
    Input: None
    Output: None
    '''
    util.debug_print('Trying to decommission')
    
    # get all slave names in slaves file
    all_slave_names = map(str.strip, util.get_file_content(config.DEFAULT_DESTINATION_SLAVES_FILENAME))
    util.debug_print('all_slave_names:')
    util.debug_print(all_slave_names)
    
    # get excludes content from master
    excludes_list = map(str.strip, util.get_file_content(config.DEFAULT_DESTINATION_EXCLUDES_FILENAME))
    util.debug_print('current excludes list:')
    util.debug_print(excludes_list)
    
    # basic sanity check to see if we should try to decommission 
    remaining_slaves =  len(all_slave_names) - len(excludes_list)
    if remaining_slaves <= config.MINIMUM_DATANODE_SIZE:
        util.debug_print('We have reached the minimum cluster size of ' + str(remaining_slaves) + ', skipping decomissioning.')
        return False
    
    # ok, now we know we can remove some 
    removable_slaves = list(set(all_slave_names) - set(excludes_list))
    max_name = get_max_slavename(removable_slaves, return_all=False)
    util.debug_print('next slavename to remove is: ' + max_name)
    
    # ok, now we have the slave we want to decommission, update the excludes file
    newLine = max_name + "\n"
    util.updateFile('excludes', newLine)

    # run commands on the master that will output make decommission happen
    ssh = SSHWrapper.SSHWrapper(config.MASTER_IP)
    
    util.debug_print('trying to hdfs dfsadmin -refreshNodes')
    outmsg, errmsg = ssh.sudo_command('sudo -S su hduser -c "/home/hduser/hadoop-2.7.0/bin/hdfs dfsadmin -refreshNodes"')

    util.debug_print('trying to yarn rmadmin -refreshNodes')
    outmsg, errmsg = ssh.sudo_command('sudo -S su hduser -c "/home/hduser/hadoop-2.7.0/bin/yarn rmadmin -refreshNodes"')
    
    if also_stop_vm:
        stopDecommissionedMachine(max_name)
Example #56
0
 def _handleData(self):
     try:
         cmdDecoder.handleData(self)
     except CriticalDecoderException, e:
         debug_print("CriticalDecoderException")
         self.loseConnection()
Example #57
0
 def connectionsLimitReached(self):
     connLimit = ServerConfig.getint('Core', 'ConnectionsCountLimit')
     if len(self._connections) >= connLimit:
         debug_print("Limite de conexiones alcanzado: %d" % connLimit)
         return True
     return False
Example #58
0
def stopDecommissionedMachine(slaveName = None):
    '''
    Checks whether decommissioning completed, then stops the vm
    Input String slavename
    Output: None
    '''
    util.debug_print('calling on downsize stopDecommissionedMachine()')
    
    if slaveName is None:
        util.debug_print('not slaveName passed as parameter')
        # get the excludes file from master
        excludes_file_content = util.get_file_content(config.DEFAULT_DESTINATION_EXCLUDES_FILENAME)
        
        # no magic, just get last one
        if len(excludes_file_content) > 0:
            slaveName = excludes_file_content[-1].strip()
        else:
            util.debug_print('no slavename passed in as arument AND we got empty slaves file!')
            return False
    
    
    vmid = util.get_vm_id_by_name(slaveName)
    
    # connect to master 
    ssh = SSHWrapper.SSHWrapper(config.MASTER_IP)
    
    # get status
    util.debug_print('Trying to get report on status of machines... ')
    outmsg, errmsg = ssh.sudo_command('sudo -S su hduser -c "/home/hduser/hadoop-2.7.0/bin/hdfs dfsadmin -report"')

    # find the section for the slave we are interested in
    # eg line is "Name: 199.60.17.186:50010 (dlw-Slave71)"
    checker = re.compile(config.REPORT_DATANODE_STATUS_STARTING_REGEX + str(slaveName) + '\)')
    util.debug_print('starting while loop to check for status of VM is done decommissioning')
    while True:
        # get line for checking machine status
        line = ''
        commission_line = 0
        # ok, I admin it might not be the best to put this here as the report MIGHT not chagne, but who knows... slower but safer for now
        for line in outmsg:
            matchobj = checker.match(line)
            if matchobj:
                util.debug_print('found the line! it is: ' + str(line))
                commission_line = outmsg.index(line) + 2
                break;

        line = outmsg[commission_line]
        util.debug_print('on line: ' + str(commission_line) + ' status of decommissioning machine is: ' + str(line))
        if line.find('Decommissioned') > -1:
            util.debug_print('VM is finally decommissioned...., trying to stop VM now')
            result = api.stopVirtualMachine({'id': vmid})
             
            waitResult = util.waitForAsync(result.get('jobid'))
             
            if waitResult != True: # whoops something went wrong!
                return waitResult
                        
            # let's get out of here!
            util.debug_print('DONE, we waited for it to finish decomissioning, then we stopped the VM! ')
            break;

        # ok, not decommissioned yet, so callthe ssh command again!
        util.debug_print('checking again inside forever WHileTrue loop, as it is not in decomissioned state.')
        outmsg, errmsg = ssh.sudo_command('sudo -S su hduser -c "/home/hduser/hadoop-2.7.0/bin/hdfs dfsadmin -report"')
        
    return True
Example #59
0
 def playersLimitReached(self):
     playersLimit = ServerConfig.getint('Core', 'PlayersCountLimit')
     if len(self._players) >= playersLimit:
         debug_print("Limite de jugadores alcanzado: %d" % playersLimit)
         return True
     return False
Example #60
0
def run(apk: Apk, emulator_name: str, emulator_port: int):
    '''
        runs things
    '''
    to_kill = False
    to_test = True

    to_full_run = True
    wipe_after_finish = False
    # test_time_seconds = 30
    if not start_emulator():
        return
    emulator = emulator_manager.get_adb_instance_from_emulators(emulator_name)
    # emulator_name = 'emulator-' + emulator.port

    telnet_connector = TelnetAdb(config.LOCALHOST, emulator.port)
    # apk = Apk(config.APK_FULL_PATH)
    # api_commands.adb_uninstall_apk(emulator, apk)
    # api_commands.adb_install_apk(emulator, apk)

    # api_commands.adb_start_launcher_of_apk(emulator, apk)
    log = Logcat(emulator, apk, TestType.MobileMonkey)

    # api_commands.adb_pidof_app(emulator, apk)

    if to_kill:
        telnet_connector.kill_avd()
        quit()
    if not to_test:
        return

    log.start_logcat()

    fuzz = Fuzzer(config.MINIMUM_INTERVAL,
                  config.MAXIMUM_INTERVAL, config.SEED, config.DURATION, FatalWatcher(log.file_address))
    # log.experimental_start_logcat(fuzz)
    # fuzz.print_intervals_events()
    threads = threads_to_run(emulator, apk, fuzz, WILL_MONKEY)
    # log_thread = Thread(target=log.start, args=(fuzz,))
    global contextual_events
    print("Total contextual events: " + str(contextual_events))
    # print(threads)
    # return
    # device = AdbSettings.AdbSettings('emulator-' + adb_instance.port)
    # triggers = [fuzz.set_continue_network_speed,
    #             fuzz.set_continue_gsm_profile,
    #             fuzz.set_continue_network_delay]
    # thread_test = Thread(target=time_to_test, args=[
    #     test_time_seconds, triggers, ])

    # thread_fuzz_delay = Thread(target=fuzz.random_network_delay, args=(
    #     config.LOCALHOST, emulator.port,))
    # thread_fuzz_profile = Thread(target=fuzz.random_gsm_profile, args=(
    #     config.LOCALHOST, emulator.port, 12,))
    # thread_fuzz_speed = Thread(target=fuzz.random_network_speed, args=(
    #     config.LOCALHOST, emulator.port,))
    # thread_fuzz_rotation = Thread(
    #     target=fuzz.random_rotation, args=((emulator_name,)))
    # thread_fuzz_airplane = Thread(
    #     target=fuzz.random_airplane_mode_call, args=(emulator_name,))
    # monkey = AdbMonkey(emulator, config.APP_PACKAGE_NAME,
    #                    config.SEED, config.DURATION)
    # thread_monkey = Thread(target=monkey.start_monkey)
    if to_full_run:

        util.debug_print(
            "started testing at {}".format(time.ctime()), flag=TIME_PRINT_FLAG)
        [thread.start() for thread in threads]
        # log_thread.start()

        [thread.join() for thread in threads]
        # log.log_process.kill()
        # log.stop_logcat()
        # log_thread.join()
    # thread_monkey.start()
    # thread_fuzz_rotation.start()
    # thread_fuzz_delay.start()
    # thread_fuzz_profile.start()
    # thread_fuzz_speed.start()
    # thread_fuzz_airplane.start()
    # thread_test.start()
    # thread_test.join()
    # thread_fuzz_delay.join()
    # thread_fuzz_profile.join()
    # thread_fuzz_speed.join()
    # thread_fuzz_rotation.join()
    # thread_fuzz_airplane.join()
    # thread_monkey.join()
    # telnet_connector.kill_avd()
    api_commands.adb_stop_activity_of_apk(emulator, apk)
    log.stop_logcat()
    api_commands.adb_uninstall_apk(emulator, apk)
    util.debug_print(
        'Finished testing and uninstalling app at {}'.format(time.ctime()), flag=TIME_PRINT_FLAG)
    print(Analyzer(log.file_address))
    if wipe_after_finish:
        print("successfully completed testing app. Closing emulator")
        telnet_connector.kill_avd()
        emulator_manager.emulator_wipe_data(emulator)