def loadEnvironment(self): self.loadMap() self.loadSongField() myEnv = None envNames = self.config.getValue("envNames") #envNames is a tuple envColors = self.config.getValue("envColors") envEase = self.config.getValue("envEase") preyProbability = self.config.getValue("preyEncounterProbability") preyEnergyBoost = self.config.getValue("preyEnergyBoost") preyProteinBoost = self.config.getValue("preyProteinBoost") predatorProbability = self.config.getValue("predatorEncounterProbability") predatorEnergyLost = self.config.getValue("predatorEnergyLost") predatorProteinLost = self.config.getValue("predatorProteinLost") for i in range(0, len(envNames)): myEnv = Environment(envNames[i], envColors[i]) myEnv.ease = envEase[i] myEnv.preyProbability = preyProbability[i] myEnv.preyEnergyBoost = preyEnergyBoost[i] myEnv.preyProteinBoost = preyProteinBoost[i] myEnv.predatorProbability = predatorProbability[i] myEnv.predatorEnergyLost = predatorEnergyLost[i] myEnv.predatorProteinLost = predatorProteinLost[i] self.environment.append(myEnv) logger.log(myEnv) logger.title("Environment loading complete.")
def handle_channel_msg(self, raw_msg): """ Given a raw IRC message identified as a channel message, handle it as necessary. Looks something like this: @TAGS, :[email protected], PRIVMSG, #CHANNEL :MSG @badges=;color=#8A2BE2;display-name=Pleb;emotes=;mod=0;room-id=12345678;subscriber=0;turbo=0; user-id=11111111;user-type= :[email protected] PRIVMSG #sometwitchuser :Normal user. :param raw_msg: str - The IRC raw message that includes the type PRIVMSG """ display_name, channel_name, msg, is_mod, is_sub = self.parse_msg(raw_msg) # All channel commands start with '!' if msg[0] != '!': return # Skip the message if it's from an invalid channel; Xelabot should only be listening to channels it's in. if channel_name not in self.channel_manager.channel_settings: log('Skipping message from channel not added to Channel Manager: #' + channel_name) return # Channel specific commands, like quest commands self.channel_manager.channels[channel_name].check_commands(display_name, msg, is_mod, is_sub) if msg in self.whisper_commands.exact_match_commands: self.send_whisper(display_name, 'Try whispering that command to Xelabot instead!')
def test_for_second_packet(self, result): packet = self.build_packet(self.host) try: sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind(('', 8888)) sock.settimeout(self.second_packet_timeout) sock.sendto(packet, (self.resolver, 53)) received_first_packet = False try: first_packet, addr = sock.recvfrom(1024) received_first_packet = True result["first_packet.b64"] = base64.b64encode(first_packet) except socket.timeout: logger.log("i", "Didn't receive first packet") received_second_packet = False result["received_first_packet"] = str(received_first_packet) if received_first_packet: try: second_packet, addr = sock.recvfrom(1024) received_second_packet = True result["second_packet.b64"] = base64.b64encode(second_packet) logger.log("i", "Received second DNS Packet") except socket.timeout: logger.log("i", "Didn't receive second packet") result["received_second_packet"] = str(received_second_packet) except socket.timeout: logger.log("i", "Socket timed out") except Exception as e: logger.log("e", "Error in socket creation: " + str(e)) if sock is not None: sock.close()
def connect(self): """ Connect to the IRC server. """ log('Connecting to IRC service...') self.irc_sock.connect((settings.IRC_SERVER, settings.IRC_PORT)) self.send_raw_instant('PASS ' + self.oauth) self.send_raw_instant('NICK ' + self.nickname)
def send_raw_instant(self, msg_str): """ Sends a raw IRC message with no rate-limiting concerns. :param msg_str: :return: """ log('> ' + msg_str) self.irc_sock.send(bytes(msg_str + '\r\n', 'UTF-8'))
def initialize(self): log('Initializing channel manager...') self.channel_manager = ChannelManager(self) log('Initializing player manager...') self.player_manager = PlayerManager(self) # Commands for direct whispers to the bot self.whisper_commands = CommandSet()
def ping_test(self): result = {"url": self.url, "host": self.host, "timeout": self.timeout} self.dns_test(result) logger.log("i", "Running ping to " + self.host) response = os.system("ping -c 1 -W " + str(self.timeout) + " " + self.host + " >/dev/null 2>&1") if response == 0: result["success"] = "true" # Further experiment process = ["ping", self.host, "-c " + str(self.packets), "-W " + str(self.timeout)] # Create a separate process so the ping output (which is a ton of output) doesn't flood the console. This also allows the output to be parsed console_response = subprocess.Popen(process, stdout=subprocess.PIPE).communicate()[0] ping_data = "" rtt_data = "" response_lines = [] for line in console_response.splitlines(): if "packets transmitted" in line and "received" in line: ping_data = line if line.startswith("rtt"): rtt_data = line if "bytes from" in line and "icmp_seq" in line: response_lines.append(line) if ping_data != "" and rtt_data != "": break split_data = ping_data.split() packetsTransmitted = -1 packetsReceived = -1 packetsLostPercentage = -1 # From 0 - 100 # Parse output to find different statistics for x in range(0, len(split_data) - 1): if split_data[x] == "packets" and split_data[x + 1].replace(",", "") == "transmitted": packetsTransmitted = int(split_data[x - 1]) if split_data[x].replace(",", "") == "received": packetsReceived = int(split_data[x - 1]) split_data = rtt_data.split() for string in split_data: if "/" in string and "." in string: rtt_split = string.split("/") result["rtt_min"] = rtt_split[0] result["rtt_avg"] = rtt_split[1] result["rtt_max"] = rtt_split[2] if len(response_lines) > 0: response_data = response_lines[0].split() for part in response_data: if part.startswith("ttl"): result["ttl"] = part.split("=")[1] result["packets_sent"] = str(packetsTransmitted) result["packets_received"] = str(packetsReceived) else: result["success"] = "false" self.results.append(result)
def latest_version(): """ Check the latest version of xelabot. :return: str - The latest version number posted online """ log('Checking version...') with urllib.request.urlopen(settings.BASE_URL + settings.VERSION_FILENAME) as version_file: newest_version = version_file.readline().decode(encoding='UTF-8').strip() return newest_version == settings.VERSION
def generator(mn, force): mx, task_id = mn + settings.INTERVAL_LENGTH, generator.request.id gs, created = GeneratorStatus.objects.get_or_create(min_value=mn) if gs.status is True or (not force and gs.task_id != task_id): return gs.task_id = task_id gs.save() log("%s started" % gs) generate_primes(mn, mx) gs.status = True gs.save()
def handle_msg(self, raw_msg): """ Given an arbitrary IRC message, handle it as necessary. :param raw_msg: str - The IRC raw message """ if raw_msg: log(raw_msg) lower_msg = raw_msg.lower() if lower_msg.startswith('ping '): self.send_pong(raw_msg.split()[1])
def update_executable(): """ Get the replacement executable and swap it with the running one using a batch script. """ log('Updating...') urllib.request.urlretrieve(settings.BASE_URL + settings.EXECUTABLE_FILENAME, settings.NEW_EXECUTABLE_FILENAME) with open(settings.RENAME_SCRIPT_FILENAME, 'w') as write_file: write_file.write(RENAME_BATCH_SCRIPT) subprocess.Popen(settings.RENAME_SCRIPT_FILENAME, creationflags=subprocess.CREATE_NEW_CONSOLE) sys.exit(0)
def http_get(self, results, dest_name): try: url = dest_name if not url.startswith("http://") or not url.startswith("https://"): url = "http://" + url start_time = time.time() contents = urllib2.urlopen(url) end_time = time.time() results["HttpTime"] = end_time - start_time results["Http"] = contents.read() except Exception as e: logger.log("e", "Error in http_get in indonesia test for " + self.host + " (" + str(e) + ")") results["http_error"] = str(e)
def tls_test(self): result = {"host": self.host} logger.log("i", "Getting TLS Certificate from {0} on port {1} ".format(self.host, self.port)) fpr = tls.get_fingerprint(self.host, self.port) result['fpr'] = fpr if fpr in self.fprs: result["success"] = 'true' else: result["success"] = 'false' self.results.append(result)
def start_agent(): result = False try: process = subprocess.Popen(start_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) raw_output, error_output = process.communicate() if raw_output == '' and error_output == '': logger.log('Agent started.') result = True elif 'No such process' in error_output: logger.log('Agent not found.') else: logger.log('Unknown output: "%s"' % error_output) except Exception as e: logger.log("Could not start agent.", logger.LogLevel.Error) logger.log_exception(e) return result
def load_agent(): try: process = subprocess.Popen(load_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) raw_output, error_output = process.communicate() if raw_output == '' and error_output == '': logger.log('Agent loaded.') elif 'Already loaded' in error_output: logger.log('Agent is already loaded.') else: logger.log('Unknown output: "%s"' % error_output) except Exception as e: logger.log("Could not load agent.", logger.LogLevel.Error) logger.log_exception(e)
def restart_agent(): try: process = subprocess.Popen(stop_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) raw_output, error_output = process.communicate() if raw_output == '' and error_output == '': logger.log('Agent has restarted.') elif 'No such process' in error_output: logger.log('Agent not found. Nothing to restart.') else: logger.log('Unknown output: "%s"' % error_output) except Exception as e: logger.log("Could not start agent.", logger.LogLevel.Error) logger.log_exception(e)
def dns_query(self, results, dest_name): try: start_time = time.time() answers = dns.resolver.query(dest_name, 'A') end_time = time.time() results["DnsTime"] = end_time - start_time; results["DnsNumRecords"] = len(answers) n = 0 for rdata in answers: results["A-record" + str(n)] = rdata.to_text() n += 1 except Exception as e: logger.log("e", "Error with dns_query in indonesia test for " + self.host + " (" + str(e) + ")") results["dns_error"] = str(e) pass
def initialize(self): log('Initializing channel manager...') self.channel_manager = QuestChannelManager(self) log('Initializing player manager...') self.player_manager = QuestPlayerManager(self) # Commands for direct whispers to the bot self.whisper_commands = CommandSet(exact_match_commands={ '!xelabot': self.faq_whisper, '!help': self.faq_whisper, '!gold': self.stats_whisper, '!exp': self.stats_whisper, '!stats': self.stats_whisper, '!items': self.stats_whisper, '!prestige': self.try_prestige })
def update_nginx_dir_config(self, new_path): """ Set the directory where is nginx (by default: /etc/nginx). Args: new_path: Path to NGINX directory. Returns: Boolean Boolean: True if the path has been updated, False otherwise. """ status, message = self.request_handler.send("PUT", self.__URI_NGINX_DIR_CONFIG, payload={'path': new_path}) if status != 200: logger.log("Error {}: {}".format(status, message)) return False return message['state'] == 1
def handle_whisper(self, raw_msg): """ Given a raw IRC message identified as a whisper, handle it as necessary. Looks something like this: @TAGS :[email protected] WHISPER BOT_NAME :MSG @badges=;color=#8A2BE2;display-name=Pleb;emotes=;message-id=2;thread-id=12348765_56784321;turbo=0; user-id=11111111;user-type= :[email protected] WHISPER xelabot :This is a whisper. :param raw_msg: str - The IRC raw message that includes the type WHISPER """ display_name, whisper_target, msg, is_mod, is_sub = self.parse_msg(raw_msg) if whisper_target.lower() != self.nickname.lower(): log('Invalid whisper target: {}'.format(whisper_target)) return self.whisper_commands.execute_command(display_name, msg)
def try_update(): """ Checks the latest version number and prompts to update. """ # Check if we're an executable or not if not os.path.isfile(settings.EXECUTABLE_FILENAME): # Probably dev environment, don't run version checking return if not latest_version(): if not settings.AUTO_UPDATE_EXECUTABLE: response = input('Newer version of Xelabot detected. Would you like to update? (y/n): ') if not response.lower() in ['y', 'yes']: log('Well, we can always update later. :(') return update_executable()
def ssl_strip_test(self, site): result = { "site": site, } logger.log("i", "Checking {0} for SSL stripping".format(site)) r = requests.get('http://' + site, allow_redirects=False) result["headers"] = dict(r.headers) result["status"] = r.status_code # if the status code is not 3xx or the redirect location does # not contain https, then this is a bad site result["success"] = True if (r.status_code > 399) or (r.status_code < 300): result["success"] = False elif (("location" in r.headers) and ("https" not in r.headers["location"])): result["success"] = False self.results.append(result)
def get_site_list(self, all_available_sites=False): """ Get the list of all the names of the sites (i.e. configurations) on the Device. Args: all_available_sites: True if the function has to return all the configurations, even when not enabled. False for only the enabled configurations. Returns: Boolean, List<String> Boolean: True if the Agent has correctly replied (200 OK), False otherwise. List<String>: List of names. """ status, message = self.request_handler.send("GET", self.__URI_SITE_CONFIG, url_params={'allAvailable':all_available_sites}) if status != 200: logger.log("Error {}: {}".format(status, message)) return (False, []) return (True, [str(site) for site in message['sites']])
def get_site_config(self, site_name): """ Get a particular configuration based on its name 'site_name'. Args: site_name: (String) Name of the configuration. Returns: Boolean, List<String> Boolean: True if the Agent has correctly replied (200 OK), False otherwise. String: Configuration. """ status, message = self.request_handler.send("GET", self.__URI_SITE_CONFIG + '/' + site_name) if status != 200: logger.log("Error {}: {}".format(status, message['message'])) return False, "" return True, message['config']
def update_site_config(self, site_name, config, enable=True): """ Update a configuration named 'site_name' on the device, that contains 'config'. If 'enable', the configuration is activated. Args: site_name: (String) Name of the configuration. config: (String) Content of the configuration. enable: (Boolean) True to enable the configuration on the device. Returns: Boolean Boolean: True if the configuration has been updated, False otherwise. """ status, message = self.request_handler.send("PUT", self.__URI_SITE_CONFIG + '/' + site_name, payload={'config': config, 'enable': enable}) if status != 200: logger.log("Error {}: {}".format(status, message['message'])) return False return message['state'] == 1
def deviceHealth(device, interfaces, configuration): logger.log("\n---- deviceHealth with parameters\n--> 'device' : {}\n--> 'interfaces' : {}\n--> 'configuration' : {}".format(device, interfaces, configuration)) nginx_device = NginxDevice(device) # Try to ping first logger.log("Ping {}...".format(nginx_device.host_ip)) if not ConnectivityChecking.ping(nginx_device.host_ip): return return_transient(0, faults=[Fault([], FaultCode.DeviceNotReachable, "Device not responding.").value()]) logger.log("Ping OK!") # logger.log("Checking device status...") # status, device_status = nginx_device.check_device_status() # # logger.log("Status: {} ; Device: {}".format(status, device_status)) # # if not status or device_status != 0: # return return_transient(50, faults=[Fault([], FaultCode.AgentNotReachable, "Agent not responding.").value()]) status, score = nginx_device.check_device_status() if not status: return return_transient(50, faults=[Fault([], FaultCode.AgentNotReachable, "Agent not responding.").value()]) return return_ok(score=score)
def optimize_memory(df: pd.DataFrame, config: Config) -> None: """Optimize dataframe memory usage. Оптимизирование используемой памяти. Источник: https://www.dataquest.io/blog/pandas-big-data/ Используется только downcast численных признаков. Args: df: Датафрейм для обработки config: Параметры модели """ if config.is_predict(): return int_features = [] float_features = [] for feat in df: feat_type = df.dtypes[feat] if feat_type in ['int', 'int32', 'int64']: int_features.append(feat) elif feat_type in ['float', 'float32', 'float64']: float_features.append(feat) config['processed_dataset_size'] = round( df.memory_usage(deep=True).sum() / 1024 / 1024, 2) log('Processed dataframe size: {:0.2f} Mb'.format( config['processed_dataset_size'])) if len(int_features) > 0: df[int_features] = df[int_features].apply(pd.to_numeric, downcast='integer') if len(float_features) > 0: df[float_features] = df[float_features].apply(pd.to_numeric, downcast='float') config['reduced_dataset_size'] = round( df.memory_usage(deep=True).sum() / 1024 / 1024, 2) log('Reduced dataframe size: {:0.2f} Mb'.format( config['reduced_dataset_size']))
def agent_running_stats(): ps_info = [] running = False loaded = False process = subprocess.Popen(list_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) raw_output, error_output = process.communicate() for line in raw_output.splitlines(): pid, run, pname = line.split('\t') ps_info.append((pname, run, pid)) for p in ps_info: if daemon_label == p[0]: # p[1] can either be: # : '0' meaning not running. # : '-' meaning its running. loaded = True if p[1] == '-': running = True break elif p[1] == '0': running = False status = AgentStatus(loaded, running) logger.log(str(status), logger.LogLevel.Debug) return status
def updatePlaylistCover(pid, cover, user): log(obj={'pid': pid, 'cover': cover}) with redis_lock.Lock(rdb, "playlistEdit:" + str(pid)), MongoTransaction(client) as s: list_obj = db.playlists.find_one({'_id': ObjectId(pid)}) log(obj={'playlist': list_obj}) if list_obj is None: raise UserError('PLAYLIST_NOT_EXIST') filterOperation('updatePlaylistCover', user, list_obj) db.playlists.update_one({'_id': ObjectId(pid)}, {'$set': { "cover": cover }}, session=s()) db.playlists.update_one({'_id': ObjectId(pid)}, { '$set': { 'meta.modified_by': makeUserMeta(user), 'meta.modified_at': datetime.now() } }, session=s()) s.mark_succeed()
def get_site_list(self, all_available_sites=False): """ Get the list of all the names of the sites (i.e. configurations) on the Device. Args: all_available_sites: True if the function has to return all the configurations, even when not enabled. False for only the enabled configurations. Returns: Boolean, List<String> Boolean: True if the Agent has correctly replied (200 OK), False otherwise. List<String>: List of names. """ status, message = self.request_handler.send( "GET", self.__URI_SITE_CONFIG, url_params={'allAvailable': all_available_sites}) if status != 200: logger.log("Error {}: {}".format(status, message)) return (False, []) return (True, [str(site) for site in message['sites']])
def upload_to_storage(file_path, project_id, bucket_id, destination): """Uploads the file to Storage. Takes the configuration from GOOGLE_APPLICATION_CREDENTIALS. Args: file_path: the path to the file to be uploaded. project_id: the GCP project id. bucket_id: the Storage bucket. destination: the path to the destination on the bucket. """ # This is a workaround for https://github.com/bazelbuild/rules_python/issues/14 logger.log('Uploading data to Storage.') client = storage.Client(project=project_id) bucket = client.get_bucket(bucket_id) blob = bucket.blob(destination) blob.upload_from_filename(file_path) logger.log('Uploaded {} to {}/{}.'.format(file_path, bucket_id, destination))
def __init__(self, channels, layer_specs, DEVICE="cuda"): super().__init__() self.convs_wide = nn.ModuleList() self.convs_1x1 = nn.ModuleList() self.layer_specs = layer_specs self.DEVICE = DEVICE prev_channels = 1 total_scale = 1 pad_left = 0 self.skips = [] for stride, ksz, dilation_factor in layer_specs: conv_wide = nn.Conv1d(prev_channels, 2 * channels, ksz, stride=stride, dilation=dilation_factor) wsize = 2.967 / math.sqrt(ksz * prev_channels) conv_wide.weight.data.uniform_(-wsize, wsize) conv_wide.bias.data.zero_() self.convs_wide.append(conv_wide) conv_1x1 = nn.Conv1d(channels, channels, 1) conv_1x1.bias.data.zero_() self.convs_1x1.append(conv_1x1) prev_channels = channels skip = (ksz - stride) * dilation_factor pad_left += total_scale * skip logger.log( f'pad += {total_scale} * {ksz-stride} * {dilation_factor}') self.skips.append(skip) total_scale *= stride self.pad_left = pad_left self.total_scale = total_scale self.final_conv_0 = nn.Conv1d(channels, channels, 1) self.final_conv_0.bias.data.zero_() self.final_conv_1 = nn.Conv1d(channels, channels, 1)
def get_raw_data(self) -> bytes: f""" Takes a reading from the sensor. :return: {bytes} """ start_reading = '\r' connected = self.connect_to_sensor() if not connected: return None try: self.__serial.write(start_reading.encode()) line = self.__serial.readline() except SerialException: log("Error reading from SPEC-DGS sensor") return None else: self.__serial.close() return line
def activate_user(request): try: user = User.objects.filter(activation_key=request.GET.get('code')) if user.count() == 0: return JsonResponse(create_response( data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False) user = user[0] if not user.approved: if user.key_expires is None or timezone.now() > user.key_expires: return JsonResponse(create_response( data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False) else: # Activation successful user.approved = True user.activation_key = None user.key_expires = None user.save() return JsonResponse(create_response( data=None, success=True, error_code=ResponseCodes.success), safe=False) # If user is already active, simply display error message else: return JsonResponse(create_response( data=None, success=False, error_code=ResponseCodes.email_already_verified), safe=False) except Exception as e: log(traceback.format_exception(None, e, e.__traceback__), 'e') return JsonResponse(create_response( data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False)
def do_test_generate(self, paths, step, data_path, test_index, deterministic=False, use_half=False, verbose=False): k = step // 1000 test_index = [x[:2] if len(x) > 0 else [] for i, x in enumerate(test_index)] dataset = env.MultispeakerDataset(test_index, data_path) loader = DataLoader(dataset, shuffle=False) data = [x for x in loader] n_points = len(data) gt = [(x[0].float() + 0.5) / (2**15 - 0.5) for speaker, x in data] extended = [np.concatenate([np.zeros(self.pad_left_encoder(), dtype=np.float32), x, np.zeros(self.pad_right(), dtype=np.float32)]) for x in gt] speakers = [torch.FloatTensor(speaker[0].float()) for speaker, x in data] maxlen = max([len(x) for x in extended]) aligned = [torch.cat([torch.FloatTensor(x), torch.zeros(maxlen-len(x))]) for x in extended] os.makedirs(paths.gen_path(), exist_ok=True) out = self.forward_generate(torch.stack(speakers + list(reversed(speakers)), dim=0).cuda(), torch.stack(aligned + aligned, dim=0).cuda(), verbose=verbose, use_half=use_half) logger.log(f'out: {out.size()}') for i, x in enumerate(gt) : librosa.output.write_wav(f'{paths.gen_path()}/{k}k_steps_{i}_target.wav', x.cpu().numpy(), sr=sample_rate) audio = out[i][:len(x)].cpu().numpy() librosa.output.write_wav(f'{paths.gen_path()}/{k}k_steps_{i}_generated.wav', audio, sr=sample_rate) audio_tr = out[n_points+i][:len(x)].cpu().numpy() librosa.output.write_wav(f'{paths.gen_path()}/{k}k_steps_{i}_transferred.wav', audio_tr, sr=sample_rate)
def downloadRedditGallery(url, outputDir, galleryName): """ Download a reddit gallery to outputDir / subredditname / post.id - post.title / Images 0-indexed. """ post = praw.models.reddit.submission.Submission(reddit=client(), url=url) subRedditDir = post.permalink.split("/")[2] pth = pathlib.Path(outputDir, subRedditDir, f"{post.id} - {galleryName}") if not pth.exists(): pth.mkdir(parents=True) # assert pth.is_dir() downloaded = [] if post.media_metadata: for idx, item in enumerate( sorted(post.gallery_data['items'], key=lambda e: e["id"])): media_id = item["media_id"] media_url = post.media_metadata[media_id]["p"][0]["u"].split( "?")[0].replace("preview", "i") media_name = pathlib.Path(media_url.split("/")[-1]) saveFilePath = pth / pathlib.Path(f"{idx}{media_name.suffix}") if not saveFilePath.exists(): req = requests.get(media_url, headers={"user-agent": user_agent}) with open(saveFilePath, "wb") as f: f.write(req.content) time.sleep(0.5) downloaded.append(str(saveFilePath)) else: logger.log(f"[ERROR] {url} has no media_metadata") return downloaded
def wrapper(*args, **kwargs): beginEvent(func.__name__, getRealIP(request), request.full_path, request.args) rd = Namespace() rd._version = _VERSION rd._version_url = _VERSION_URL kwargs['rd'] = rd try: ret = func(*args, **kwargs) return _handle_return(ret, rd) except HTTPException as e: log(level='WARN', obj={'ex': e}) raise e except Exception as ex: import traceback log(level='ERR', obj={ 'ex': str(ex), 'tb1': repr(traceback.format_exc()), 'tb2': repr(traceback.extract_stack()) }) abort(400)
def update_desc(redis_user_key, user_id, new_desc): log(obj={ 'redis_user_key': redis_user_key, 'user_id': user_id, 'new_desc': new_desc }) if len(new_desc) > UserConfig.MAX_DESC_LENGTH: raise UserError('DESC_TOO_LONG') obj = db.users.find_one({'_id': ObjectId(user_id)}) if obj is None: raise UserError('USER_NOT_EXIST') log(obj={'old_desc': obj['profile']['desc']}) db.users.update_one({'_id': ObjectId(user_id)}, {'$set': { 'profile.desc': new_desc }}) def updater(obj): obj['profile']['desc'] = new_desc return obj _updateUserRedisValue(user_id, updater)
def send_email(email, activation_key, action): site = settings.SITE_URL url = site + '/action?action=' + action + '&code=' + activation_key if action == 'activate': subject = '[JobHax Platform] Confirm E-mail Address' body = '''<html> Welcome to JobHax!<br> <br> You must follow this link to <span class="il">activate</span> your account:<br> <a href="''' + url + '''">''' + url + '''</a><br> <br> Have fun with the JobHax, and don't hesitate to contact us with your feedback. </html>''' elif action == 'warning': subject = '[JobHax Platform] Unusual Activity Detected' body = '''<html> Welcome to JobHax!<br> <br> You must follow this link to <span class="il">activate</span> your account:<br> Have fun with the JobHax, and don't hesitate to contact us with your feedback. </html>''' else: subject = '[JobHax Platform] Reset your password' body = '''<html> You recently requested to reset your password.<br> <br> To reset your password you must follow this link:<br> <a href="''' + url + '''">''' + url + '''</a><br> <br> If you did not make this request, you can safely ignore this email. A password reset request can be made by anyone, and it does not indicate that your account is in any danger of being accessed by someone else. </html>''' email = EmailMessage(subject, body, to=[email]) email.content_subtype = "html" # this is the crucial part try: email.send() except Exception as e: log(traceback.format_exception(None, e, e.__traceback__), 'e')
def addVideoToPlaylistLockFree(pid, vid, user, rank, session) : log(obj = {'pid': pid, 'vid': vid}) if tagdb.retrive_item({'_id': ObjectId(vid)}, session = session) is None : #raise UserError('VIDEO_NOT_EXIST') return False conflicting_item = db.playlist_items.find_one({'pid': ObjectId(pid), 'vid': ObjectId(vid)}, session = session) if conflicting_item is not None : editPlaylist_MoveLockFree(pid, conflicting_item, rank, session = session) db.playlists.update_one({'_id': ObjectId(pid)}, {'$set': { 'meta.modified_by': makeUserMeta(user), 'meta.modified_at': datetime.now()}}, session = session) return False playlists = tagdb.retrive_item({'_id': ObjectId(vid)}, session = session)['item']['series'] playlists.append(ObjectId(pid)) playlists = list(set(playlists)) tagdb.update_item_query(ObjectId(vid), {'$set': {'item.series': playlists}}, makeUserMeta(user), session = session) db.playlist_items.insert_one({"pid": ObjectId(pid), "vid": ObjectId(vid), "rank": int(rank), "meta": makeUserMeta(user)}, session = session) db.playlists.update_one({"_id": ObjectId(pid)}, {"$inc": {"videos": int(1)}}, session = session) db.playlists.update_one({'_id': ObjectId(pid)}, {'$set': { 'meta.modified_by': makeUserMeta(user), 'meta.modified_at': datetime.now()}}, session = session) return True
def solve(A, B): logger.log(text="Начинаем расчет СЛАУ методом прогонки", force=True) n = B.size # 1. прямой ход # вычисление альфы и беты для первой строки матрицы b0 = A[0][0] c0 = A[0][1] d0 = B[0] y0 = b0 alfa0 = -c0 / y0 beta0 = d0 / y0 alfas = [alfa0] betas = [beta0] # вычисление альф и бет для оставшихся строк матрицы for i in range(1, n, 1): a = A[i][i-1] b = A[i][i] c = A[i][i+1] if i < n-1 else 0 d = B[i] y = b + a * alfas[i-1] alfa = -c / y beta = (d - a * betas[i-1]) / y alfas.append(alfa) betas.append(beta) # 2. обратный ход # вычисление первого х x = [betas[betas.__len__()-1]] # вычисление оставшихся х for i in range(n-2, -1, -1): x.append(alfas[i] * x[n-i-2] + betas[i]) # переворачиваем список х-ов x = np.array(x)[::-1] logger.log(text="Расчет СЛАУ методом прогонки завершен", force=True) return x
def stop(self): if self.is_proc_running(): self.send_command('exit') #self.proc.stdin.close() counter = 0 while True: if self.is_proc_running(): if counter < 10: if counter == 2: try: self.send_command('exit') except: pass sleep(1) counter += 1 else: self.proc.kill() log("[%s] killed" % self.proc_name, LEVEL_INFO, self.proc_name) break else: break log("[%s] stopped" % self.proc_name, LEVEL_INFO, self.proc_name)
def __init__(self, env=UnityEnv, epochs=10, steps_per_epoch=1000, max_episode_length=1000, gamma=0.99, lam=0.97, seed=0, training=True, load_model=False, save_freq=1, policy_params=dict(), logger_name="", **kwargs): self.env = env self.epochs = epochs self.steps_per_epoch = steps_per_epoch self.max_episode_length = max_episode_length self.gamma = gamma self.lam = lam self.seed = seed self.training = training self.load_model = load_model self.save_freq = save_freq self.policy_params = policy_params log("Policy Parameters") pprint(policy_params, indent=5, width=10) self.buffer_ppo = Buffer_PPO(self.steps_per_epoch, self.env.EnvInfo, gamma=self.gamma, lam=self.lam) self.agent = Policy_PPO(policy_params=policy_params, env_info=self.env.EnvInfo) self.logger = Logger(logger_name)
def inferTagsFromVideo(utags, title, desc, user_language, video_url: str = '', user_urls: [str] = []): log( obj={ 'title': title, 'desc': desc, 'utags': utags, 'lang': user_language, 'video_url': video_url, 'user_urls': user_urls }) video_url = video_url.strip() tagids = [] if video_url: obj, cleanURL = dispatch(video_url) if obj is not None: uid = obj.unique_id(obj, cleanURL) vid_item = db.retrive_item({'item.unique_id': uid}) if vid_item is not None: tagids = list( filter(lambda x: x < 0x80000000, vid_item['tags'])) if not tagids: utags = [u.lower() for u in utags] utags.append(title) utags.append(desc) all_text = ' 3e7dT2ibT7dM '.join(utags) tagids = inferTagidsFromText(all_text) matched_author_records, matched_author_tags = matchUserSpace(user_urls) matched_common_ids = itertools.chain.from_iterable( [x['common_tagids'] for x in matched_author_records]) tagids = list( set(tagids) | set([x['id'] for x in matched_author_tags]) | set(matched_common_ids)) return db.translate_tag_ids_to_user_language(tagids, user_language)[0]
def try_connect(self): url = urlparse.urlparse(self.url) hostname = url.hostname try: port = int(url.port) except: self._pool_info['error'] = "Invalid pool port" log("Invalid pool port!", LEVEL_ERROR) return if not hostname: self._pool_info['error'] = "Invalid pool URL" log("Invalid pool URL", LEVEL_ERROR) return while not self.exit.is_set(): if not self._my_sock: log('Connecting to RPC server [%s:%d]...' % (hostname, port), LEVEL_INFO, self._pool_id) self._my_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock_keep_alive() else: log(NETWORK_ERROR_MSG, LEVEL_ERROR, self._pool_id) self._pool_info['error'] = NETWORK_ERROR_MSG # (try to) stop all mining jobs by setting global job_id as None self._g_work['job_id'] = None # and clear submit works remain in queue if any while not self._work_submit_queue.empty(): _ = self._work_submit_queue.get() try: self._my_sock.close() except: pass else: self._my_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock_keep_alive() try: self._my_sock.connect((hostname, port)) self.connect(self._my_sock) except socket.error: # wait 10 seconds self._wait(10) else: self._login() if 'error' in self._pool_info: self._pool_info['error'] = None break
def pop_blocks(num=1000): num = int(num) resources_path = getResourcesPath() args = u'%s/bin/ryo-blockchain-import --pop-blocks %d' % (resources_path, num) args_array = args.encode( sys.getfilesystemencoding() ).split(u' ') proc = Popen(args_array, shell=False, stdout=PIPE, stderr=STDOUT, stdin=PIPE, creationflags=CREATE_NO_WINDOW) proc_name = 'ryo-blockchain-import' log("[%s] %s" % (proc_name, args), LEVEL_INFO, proc_name) sleep(0.1) for line in iter(proc.stdout.readline, b''): log(">>> " + line.rstrip(), LEVEL_DEBUG, proc_name) if not proc.stdout.closed: proc.stdout.close() return False
def editPlaylist_Move(pid, vid, to_rank, user): log(obj={'pid': pid, 'vid': vid, 'to_rank': to_rank}) with redis_lock.Lock(rdb, "playlistEdit:" + str(pid)), MongoTransaction(client) as s: playlist = db.playlists.find_one({'_id': ObjectId(pid)}, session=s()) if playlist is None: raise UserError('PLAYLIST_NOT_EXIST') filterOperation('editPlaylist', user, playlist) if tagdb.retrive_item({'_id': ObjectId(vid)}, session=s()) is None: raise UserError('VIDEO_NOT_EXIST') if to_rank < 0: raise UserError('OUT_OF_RANGE') if to_rank > playlist['videos']: to_rank = int(playlist['videos']) editPlaylist_MoveLockFree(pid, vid, to_rank, session=s()) db.playlists.update_one({'_id': ObjectId(pid)}, { '$set': { 'meta.modified_by': makeUserMeta(user), 'meta.modified_at': datetime.now() } }, session=s()) s.mark_succeed()
def convertImgurIndirectUrlToImg(submission, imgurAuth, url): # Login to imgur # This is required since they made NSFW images require login imgurClient = imgur.ImgurClient(imgurAuth.clientId, imgurAuth.clientSecret) if not checkImgurAPICredits(imgurClient): return None imageId = imgurIdFromUrl(url) if not imageId: logger.log("Failed to convert {} to image id".format(url)) try: return imgurClient.get_image(imageId).link except Exception as e: errorMessage = ( 'Failed to convert imgur to image link: ' '[ERROR] Exception: Url {} raised exception:\n\t {}'.format( url, e)) logger.log(errorMessage) LikedSavedDatabase.db.addUnsupportedSubmission(submission, errorMessage) return None
def pathexists(self, path, create=None): parts = path.split("/") l = len(parts) isdoc = ((l % 2) == 0) if isdoc: dirpath = self.dbpath + "/" + "/".join(parts[:-1]) effpath = dirpath + "/" + parts[-1] + ".json" exists = os.path.isfile(effpath) else: dirpath = self.dbpath + "/" + path effpath = dirpath exists = os.path.isdir(effpath) if not create: return exists if not exists: if FILE_VERBOSE: log(f"< < {dirpath} > does not exist, creating >", "warning") createdir(dirpath) if isdoc: write_json_to_file(effpath, create) else: for id, doc in create.items(): write_json_to_file(dirpath + "/" + id + ".json", doc)
def updateUserAccessMode(user_id, mode, user): filterOperation('updateUserAccessMode', user, user_id) old_user_obj = db.users.find_one({'_id': ObjectId(user_id)}) if old_user_obj is None: raise UserError('USER_NOT_EXIST') log( obj={ 'user_id': user_id, 'new_mode': mode, 'old_mode': old_user_obj['access_control']['access_mode'] }) if mode not in ['blacklist', 'whitelist']: raise UserError('INCORRECT_ACCESS_MODE') db.users.update_one({'_id': ObjectId(user_id)}, {'$set': { 'access_control.access_mode': mode }}) def updater(obj): obj['access_control']['access_control'] = mode return obj _updateUserRedisValue(user_id, updater)
def updateUserRole(user_id, role, user): filterOperation('updateUserRole', user, user_id) old_user_obj = db.users.find_one({'_id': ObjectId(user_id)}) if old_user_obj is None: raise UserError('USER_NOT_EXIST') log( obj={ 'user_id': user_id, 'new_role': role, 'old_role': old_user_obj['access_control']['status'] }) if role not in ['normal', 'admin']: raise UserError('INCORRECT_ROLE') db.users.update_one({'_id': ObjectId(user_id)}, {'$set': { 'access_control.status': role }}) def updater(obj): obj['access_control']['status'] = role return obj _updateUserRedisValue(user_id, updater)
def updateCommonTags(pid, tags, user) : log(obj = {'pid': pid, 'tags': tags}) with MongoTransaction(client) as s : playlist_obj = db.playlists.find_one({'_id': ObjectId(pid)}) if playlist_obj is None : raise UserError('PLAYLIST_NOT_EXIST') filterOperation('editPlaylist', user, playlist_obj) # user is editing video tags, not the playlist itself, no need to lock playlist tags = tagdb.filter_and_translate_tags(tags, session = s()) old_tags = listCommonTagIDs(pid, user) log(obj = {'old_tags': old_tags}) old_tags_set = set(old_tags) new_tags_set = set(tags) tags_added = list((old_tags_set ^ new_tags_set) - old_tags_set) tags_to_remove = list((old_tags_set ^ new_tags_set) - new_tags_set) if len(tags_added) - len(tags_to_remove) > PlaylistConfig.MAX_COMMON_TAGS : raise UserError('TOO_MANY_TAGS') all_video_ids, _ = listAllPlaylistVideosUnordered(pid) if tags_to_remove : tagdb.update_many_items_tags_pull(all_video_ids, tags_to_remove, makeUserMeta(user), session = s()) if tags_added : tagdb.update_many_items_tags_merge(all_video_ids, tags_added, makeUserMeta(user), session = s()) s.mark_succeed()
def update_gmail_token(request): body = request.data token = body['token'] try: user_profile = UserSocialAuth.objects.get(user=request.user) if user_profile is not None: user_profile.extra_data['access_token'] = token user_profile.save() success = True request.user.is_gmail_read_ok = True request.user.save() code = ResponseCodes.success else: success = False code = ResponseCodes.user_profile_not_found except Exception as e: log(traceback.format_exception(None, e, e.__traceback__), 'e') success = False code = ResponseCodes.couldnt_update_google_token return JsonResponse(create_response(data=None, success=success, error_code=code), safe=False)
def _in_goal(self, state): assert len(state) == self.state_dim x = state[0] y = state[1] theta = state[2] vel = 0 # not used any more if self.set_additional_goal == 'None': if np.sqrt((x - self.goal_state[0])**2 + (y - self.goal_state[1])**2) <= self.goal_pos_tolerance: logger.log("in goal!!") return True else: return False elif self.set_additional_goal == 'angle': if np.sqrt((x - self.goal_state[0]) ** 2 + (y - self.goal_state[1]) ** 2) <= self.goal_pos_tolerance \ and abs(theta - self.goal_state[2]) < self.goal_theta_tolerance: logger.log("in goal with specific angle!!") logger.log("theta:%f" % theta) logger.log("goal theta range from %f to %f" % ((GOAL_STATE[2] - self.goal_theta_tolerance), (GOAL_STATE[2] + self.goal_theta_tolerance))) return True else: return False elif self.set_additional_goal == 'vel': if np.sqrt((x - self.goal_state[0]) ** 2 + (y - self.goal_state[1]) ** 2) <= self.goal_pos_tolerance \ and abs(vel - self.goal_state[3]) < 0.25: logger.log("in goal with specific velocity!!") return True else: return False else: raise ValueError("invalid param for set_additional_goal!")
def update_email(redis_user_key, user_id, new_email): log( obj={ 'redis_user_key': redis_user_key, 'user_id': user_id, 'new_email': new_email }) if len(new_email) > UserConfig.MAX_EMAIL_LENGTH or not re.match( r"[^@]+@[^@]+\.[^@]+", new_email): raise UserError('INCORRECT_EMAIL') obj = db.users.find_one({'_id': ObjectId(user_id)}) if obj is None: raise UserError('USER_NOT_EXIST') log(obj={'old_email': obj['profile']['email']}) db.users.update_one({'_id': ObjectId(user_id)}, {'$set': { 'profile.email': new_email }}) def updater(obj): obj['profile']['email'] = new_email return obj _updateUserRedisValue(user_id, updater)
def updatePlaylistInfo(pid, language, title, desc, cover, user, private=False): log(obj={'title': title, 'desc': desc, 'cover': cover, 'private': private}) if len(title) > PlaylistConfig.MAX_TITLE_LENGTH: raise UserError('TITLE_TOO_LONG') if len(desc) > PlaylistConfig.MAX_DESC_LENGTH: raise UserError('DESC_TOO_LONG') if cover and len(cover) > PlaylistConfig.MAX_COVER_URL_LENGTH: raise UserError('URL_TOO_LONG') if not title: raise UserError('EMPTY_TITLE') if not desc: raise UserError('EMPTY_DESC') with redis_lock.Lock(rdb, "playlistEdit:" + str(pid)), MongoTransaction(client) as s: list_obj = db.playlists.find_one({'_id': ObjectId(pid)}) log(obj={'playlist': list_obj}) if list_obj is None: raise UserError('PLAYLIST_NOT_EXIST') filterOperation('editPlaylist', user, list_obj) if cover: db.playlists.update_one({'_id': ObjectId(pid)}, {'$set': { "cover": cover }}, session=s()) db.playlists.update_one({'_id': ObjectId(pid)}, { '$set': { "title.%s" % language: title, "desc.%s" % language: desc, "private": private, 'meta.modified_by': makeUserMeta(user), 'meta.modified_at': datetime.now() } }, session=s()) s.mark_succeed()
def traceroute(self): results = { "url": self.url, "host": self.host, "max_hops": self.max_hops, "start_hop": self.start_hop, "timeout": self.timeout } self.dns_test(results) traceroute_results = [] # Contains Dict("string", "string") try: t = self.start_hop finalIp = "Placeholder" logger.log("i", "Conducting traceroute on " + self.host) for t in range(self.start_hop, self.max_hops + 1): process = ['ping', self.host, '-c 1', '-t ' + str(t), '-W ' + str(self.timeout)] # Ping in separate process response = subprocess.Popen(process, stdout=subprocess.PIPE).communicate()[0] # Parse the process output for information on the ping if t == 1: if response == "": raise Exception("Host not available") pingSendInfo = response.splitlines()[0] pingSendSplit = pingSendInfo.split() finalIp = pingSendSplit[2].translate(None, '()') ping_info = response.splitlines()[1] split_by_word = str.split(ping_info) reverseDns = "Not Found" ip = "Not Found" for string in split_by_word: stripped = string.translate(None, '():') if self.isIp(stripped): ip = stripped if '=' not in stripped and '.' in stripped and not self.isIp(stripped): reverseDns = stripped temp_results = {} # Results for this hop of the traceroute temp_results["hop_number"] = str(t) temp_results["ip"] = ip temp_results["reverse_dns"] = reverseDns traceroute_results.append(temp_results) if ip == "Not Found" and reverseDns != "Not Found": pass # May implement something here later to see what happened if ip == finalIp or t == self.max_hops: logger.log("s", "Finished Traceroute") break results["total_hops"] = t results["traceroute"] = traceroute_results except Exception as e: logger.log("e", "Error occured in traceroute for " + self.host + ": " + str(e)) results["error_text"] = str(e) self.results.append(results)
def send(self, method="GET", location="/", url_params=None, payload=None): """ Send a request at 'self.address':'self.port', at the URI 'location', with the credentials <'self.username' ; 'self.password'> via HTTPS or HTTP. The request is sent with the HTTP method 'method' and has the payload 'payload', and the URL can have the parameters 'url_params'. Args: method: (String) HTTP method (GET, POST,...) location: (String) Location in the URI (e.g. /, /a, /a/b/c/d,...) url_params: (Dictionary, {'name': value}) Parameters to pass via the URL. payload: (Dictionary, {'name': value}) Payload of the request Returns: (int, String) or (int, Dictionary) int: HTTP status of the response String or Dictionary: Content of the response """ logger.log("[Request] Try to connect at {}".format(self.url(location))) response = None try: if method == "GET": response = requests.get(self.url(location), params=url_params, auth=(self.username, self.password), verify=False) elif method == "PUT": response = requests.put(self.url(location), params=url_params, json=payload, auth=(self.username, self.password), verify=False) elif method == "POST": response = requests.post(self.url(location), params=url_params, json=payload, auth=(self.username, self.password), verify=False) elif method == "DELETE": response = requests.delete(self.url(location), auth=(self.username, self.password), verify=False) else: response = requests.get(self.url(location), auth=(self.username, self.password), verify=False) except Exception as e: logger.log("[Request] Send to {} failed : {}".format(self.url(location), e)) return 504, e.message # If not authenticated if response.status_code == 401: return response.status_code, 'Not authorized' try: json = response.json() except Exception as e: logger.log("[Request] Message extraction from {} failure : {}".format(self.url(location), e)) return response.status_code, e.message return response.status_code, json
def controller(): mp = MaxIndex.update() log('controller started, %s' % mp) gs = GeneratorStatus.objects.filter(status=False) not_ready_count = gs.count() for g in gs: state = AsyncResult(g.task_id).state if state != 'STARTED': log('%s in %s' % (g, state)) generator.delay(g.min_value, True) else: log('%s is not finished yet!' % g) try: mn = max([gs.latest().min_value if gs else 0, GeneratorStatus.objects.filter( status=True).latest().min_value]) except ObjectDoesNotExist: mn = -settings.INTERVAL_LENGTH for i in range(1, settings.MAX_GENERATORS_COUNT - not_ready_count + 1): generator.delay(mn + i * settings.INTERVAL_LENGTH, False)