def addUrlToQueue(self, **kwargs): try: for url in kwargs: queueDownload(url) except: for url in kwargs: log("Unable to queue: " + url)
def find(self): if self.dev: raise RuntimeError("Device already found") log("Waiting for bootrom") old = self.serial_ports() while True: new = self.serial_ports() # port added if new > old: port = (new - old).pop() break # port removed elif old > new: old = new time.sleep(0.25) log("Found port = {}".format(port)) self.dev = serial.Serial(port, BAUD, timeout=TIMEOUT) return self
def swap(version_spec=None): if version_spec != None: log('Swapping node version to ~' + version_spec) else: print('\n\nSwap current node version') version_spec = get_input('Enter the version of node you\'d like to swap to ~', vald_pch) do_ext('nodist ' + version_spec) log('Swapped node version ~' + version_spec)
def send_message_to_user(user_id): vk = __auth_as_group() vk.method( 'messages.send', { 'user_id': user_id, 'random_id': set_random_id(), 'attachment': get_new_photos() }) log('Messages were sended to user with id {0}'.format(user_id))
def read_pkg(pkg): log('Reading ~\'' + pkg + '\'...') NODE_VERSION_SPECIFIED = read_json(pkg) if(NODE_VERSION_SPECIFIED): log('\nFound ~' + NODE_VERSION_SPECIFIED) swap(NODE_VERSION_SPECIFIED) return True return False
def dump_brom(device, bootrom__name, word_mode=False): log("Found send_dword, dumping bootrom to {}".format(bootrom__name)) with open(bootrom__name, "wb") as bootrom: if word_mode: for i in range(0x20000 // 4): device.read(4) # discard garbage bootrom.write(device.read(4)) else: bootrom.write(device.read(0x20000))
def read_json(file): with open(file) as jfile: dep = json.load(jfile) if 'node' in dep['dependencies']: return transform_dep_readable(dep['dependencies']['node']) else: log('\t~' + file + ' found but no node version specified...\nReturning to main...\n' ) return None
def uploadPicture(self, **kwargs): #Takes a binary file and places it in the 'queue' folder for image processing def write_uploaded_image_file(location, cherrypyObj): tempFile = open(location, 'wb') tempFile.write(cherrypyObj.file.read()) tempFile.close() def get_duplicate_image_file_count(location, filename): filename_found_count = 0 filename_without_extension = filename.split('.')[0] for image_file in os.listdir(location): if filename_without_extension in image_file: filename_found_count = filename_found_count + 1 return (filename_found_count) def uploadImage(passedImage): #Save each file in the queue_save_location folder as its own filename #If a duplicate filename is found append a number to the file and write it anyhow if not os.path.isfile( os.path.join(locations.queue_save_location(), passedImage.filename) ): file_location = os.path.join(locations.queue_save_location(), passedImage.filename) write_uploaded_image_file( file_location, passedImage ) #Create Thumbnail for queue process page pictureConverter.create_queue_thumbnail( file_location,locations.queue_save_location() ) else: #Find out how many files exist with the same name and append a count of the files to the filename - super hacky!! #Known "problem" of fuzzy matching between already existing files. Not serious as copy will be made either way. count = get_duplicate_image_file_count(locations.queue_save_location(), passedImage.filename) filename_with_count = passedImage.filename.replace('.', '-' + str(count) + '.') file_location = os.path.join(locations.queue_save_location(), filename_with_count) write_uploaded_image_file( file_location, passedImage ) #Create Thumbnail for queue process page pictureConverter.create_queue_thumbnail( file_location, locations.queue_save_location() ) Headers = cherrypy.request.headers if isAllowedInAdminArea(Headers): try: #Object of each file passed to the server via post uploadObj = kwargs.get('file[]') try: imageCount = len(uploadObj) for image in uploadObj: uploadImage(image) except: uploadImage(uploadObj) except Exception, err: for error in err: log("Unable to receive upload - " + str(error), "WEB", "MEDIUM")
def calibrate(camera, laser, filepath, loglines=None): ((p1, p2), image1, image2, im1_cr, imdiff, raw_blobs, blobs) = \ capture_to_positions(camera, laser, verbose=True) log(loglines, "Vision: calibration is "+str(p1)+" and "+str(p2)) if p1 == POSERR or p2 == POSERR: log(loglines, "CALIBRATION ERROR! Failed to find 2 positions") Calibration.leftpos = p1 if p1[1]>p2[1] else p2 Calibration.rightpos = p1 if p2 is Calibration.leftpos else p2 Calibration.separation = _dot_separation(p1,p2) log_calibration(filepath, image1, image2, imdiff)
def add_user(self, user_name, user_id): if os.path.exists(self.DB_ADDR): conn = sqlite3.connect(self.DB_ADDR) cursor = conn.cursor() cursor.execute("INSERT INTO users VALUES (? , ?)", (user_name, user_id)) conn.commit() log('User {0} with id {1} were added'.format(user_name, user_id)) else: raise DbNotExists
def m_find_json(pkg): log('\n\nSearching for ~\'' + pkg + '\' file...') tmp = 'not found' found = False if find_file(pkg): tmp = 'found' found = True log('\t~\'' + pkg + '\' ' + tmp + ' ...') return found
def startServer(): if database.verify_database_existence(): try: #If the images /thumbnails / queue folders don't exist create them. database.verify_folder_existence() if len( database.get_tags() ) <= 1: database.create_test_data() cherrypy.quickstart(main_site(), config=conf) except Exception, err: for error in err: log("Unable to start Webserver" + str(error), "WEB", "SEVERE")
def create_buffer(data, delta, steps, mode="avg"): samples = [] log("Creating chunks", steps=steps, mode=mode) for j in range(0, steps): if mode == "avg": samples.append(avg_in_area(data, j, delta)) elif mode == "max": samples.append(max_in_area(data, j, delta)) else: raise "Unsupported mode" log("Finished sample chunk creation", length=len(samples)) return normalize(samples)
def eventHandler(server, topicList): """ @Server is a mqtt connection @TopicList is an array of topics to subscribe to The eventHandler will subscribe to all topics in the list And it will listen for MQTT connections """ # TODO: send updated values from here while True: server.start() # start time listner t0 = time.time() # receive actuator data for topic in topicList: server.subscribe(config.MQTT_ENDPOINT_PREFIX + server.id + topic) logger.log( "Subscribe event: " + config.MQTT_ENDPOINT_PREFIX + server.id + topic, logger.LOG_DEBUG) # send sensor data for data in sensordata.readAll(): server.send(data.payload, config.MQTT_ENDPOINT_PREFIX + server.id + data.topic) logger.log("Uploading to {} with data {}".format( config.MQTT_ENDPOINT_PREFIX + server.id + data.topic, data.payload)) t1 = time.time() time.sleep(config.interval) logger.log( "Event loop finished {} seconds and slept for {} seconds".format( t1 - t0, config.interval), logger.LOG_DEBUG) server.end() logger.log("EventHandler has stopped", logger.LOG_ERROR)
def draw(output_file, samples, step_width, step_height, color="white", gap=1, rounded=0, align="bottom"): dwg = svgwrite.Drawing(output_file + ".svg") log("Start drawing boxes", boxes=len(samples), color=color, rounded_radius=rounded, align=align, gap=gap) for i in range(0, len(samples)): if align == "bottom": pos = (i * (step_width), (1 - samples[i]) * step_height) elif align == "center": pos = (i * (step_width), (0.5 * step_height) - (0.5 * samples[i] * step_height)) else: log("Found unsupported alignment", align=align) return dwg.add(dwg.rect(pos, (step_width - gap, samples[i] * step_height), rounded, rounded, fill=color)) # assuming top-left corner to bottom-right growth dwg.save() log("Saved SVG to file", output=output_file, width=((step_width + gap) * len(samples)), height=step_height) cairosvg.svg2png( url=output_file + '.svg', write_to=output_file + '.png', parent_width=len(samples) * (step_width), parent_height=step_height, dpi=600, scale=1 ) log("Converted SVG to PNG", output=output_file, width=((step_width + gap) * len(samples)), height=step_height)
def receive(payload, pin): """ Convert the received data to a pwm signal on the pin """ val = payload if type(payload) is str: val = int(float("".join(payload))) logger.log("Value in percent {}".format(val), logger.LOG_DEBUG) val = int(val * 2.55) val = val if val < 255 else 255 val = val if val > 0 else 0 logger.log("Value as a byte {}".format(val), logger.LOG_DEBUG) pi.set_PWM_dutycycle(pin, val)
def build(self, train='eng.train', dev='eng.testa', test='eng.testb'): logger.log(f'Reading data from {self.path}') self.train = self.get(train) logger.log(f'Train {self.path+"/"+train}') self.dev = self.get(dev) logger.log(f'Dev {self.path+"/"+dev}') self.test = self.get(test) logger.log(f'Test {self.path+"/"+test}')
def _train_one_epoch_normal(self, train_loader, embedding_storage_mode, print_every_batch): epoch_loss = 0 for i, batch in enumerate(train_loader): self.optimizer.zero_grad() batch_loss = self.model.forward_loss(batch) batch_loss.backward() torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5) self.optimizer.step() store_embeddings(batch[0], embedding_storage_mode) epoch_loss += batch_loss.item() if (i % print_every_batch == 0 or i + 1 == len(train_loader)): logger.log( f' Batch ({i+1}/{len(train_loader)}), loss: {epoch_loss/(i+1):.4f}' ) return epoch_loss / len(train_loader)
def detect_file_name(self) -> None: """ Confirm the executable file if there is multiple files that are runnable in current folder Raises ------ RuntimeError If no executable code detected """ acceptable_file_ext: Dict[str, PLanguage] = {} opt = 0 i = 0 existed_templates = self.load_kt_config() for k in existed_templates.keys(): acceptable_file_ext[ MAP_TEMPLATE_TO_PLANG[k].extension] = MAP_TEMPLATE_TO_PLANG[k] files = [x for x in self.cwd.iterdir() if x.is_file()] runnable_files: List[Path] = [] for f in files: if f.suffix[1:] in acceptable_file_ext: runnable_files.append(f) if not runnable_files: raise RuntimeError('Not executable code file detected') if len(runnable_files) > 1: log_cyan('Choose a file to run') for i in range(len(runnable_files)): log(f' {i}: {runnable_files[i].file_name}') opt = int(input()) assert 0 <= opt < len(runnable_files), 'Invalid option chosen' self.file_name = runnable_files[opt] alias = acceptable_file_ext[runnable_files[opt].suffix[1:]].alias self.lang = acceptable_file_ext[ runnable_files[opt].suffix[1:]].full_name file_name = self.file_name.stem self.pre_script = existed_templates.get(alias, {}).get('pre_script').replace( '$%file%$', file_name) self.script = existed_templates.get(alias, {}).get('script').replace( '$%file%$', file_name) self.post_script = existed_templates.get( alias, {}).get('post_script').replace('$%file%$', file_name)
def on_message(self, client, userdata, msg): topic = msg.topic payload = float(msg.payload.decode("utf-8")) prefix = config.MQTT_ENDPOINT_PREFIX + self.id logger.log("Payload in percent {}".format(payload), logger.LOG_DEBUG) if topic == prefix + config.subscribe[0]: # TODO: call motor values logger.log("send to light", logger.LOG_DEBUG) motor.controlLight(payload) elif topic == prefix + config.subscribe[1]: # TODO: call motor values logger.log("send to flowpump", logger.LOG_DEBUG) motor.controlFood(payload) elif topic == prefix + config.subscribe[2]: # TODO: call motor values logger.log("sending to foodpump", logger.LOG_DEBUG) motor.controlWaterPump(payload) else: logger.log("Unknown topic", logger.LOG_ERROR)
def main(): t = None try: opts, args = getopt.getopt(sys.argv[1:], "hp:", ["help", "pool="]) for o, a in opts: if o in ("-p", "--pool"): if a == "dns": t = PoolingLogs.TYPE_DNS else: t = PoolingLogs.TYPE_HTTP elif o in ("-h", "--help"): usage() except getopt.GetoptError as err: pass timeout = 2 while True: select.select([], [], [], timeout) log(0, "pooling...") pooling(t)
def updateBlogData(self, *arguments, **kwargs): Headers = cherrypy.request.headers if isAllowedInAdminArea(Headers): try: if kwargs['postType'] == "update": succesfulUpdate = database.update_blog(kwargs) elif kwargs['postType'] == "insert": succesfulUpdate = database.insert_blog(kwargs) elif kwargs['postType'] == "delete": succesfulUpdate = database.delete_blog_by_id(kwargs['blog_id']) if succesfulUpdate: return ('UPDATED') else: return ("ERROR - UNABLE TO UPDATE") except Exception, err: for error in err: log("Couldn't update blog - " + str(error), "DATABASE", "SEVERE") return ("Error updating blog - contact admin")
def manageBlogs(self, *arguments, **kwargs): Headers = cherrypy.request.headers if isAllowedInAdminArea(Headers): #Dirty double try :( -REFACTOR try: #If blog_id doesn't exist then a new entry is being created so no blog db values try: blog = database.get_blogs("id", kwargs['blog_id']) except: blog = "" perform_action = kwargs['perform_action'] mako_template = Template(filename=os.path.join(locations.current_folder(),'static/templates/manage_blogs.tmpl') ) self.mako_template_render = mako_template.render(blog = blog, perform_action = perform_action) return (self.mako_template_render) except Exception, err: return ( self.default() ) for error in err: log("Unable to Manage Blog - " + str(error), "WEB", "LOW")
def read_config_from_file(self) -> None: """ kttool deals with 2 config files: - kattisrc: provided by official kattis website, provide domain name and general urls - ktconfig: handle templates by kttool Raises ------ RuntimeError If config file is invalid ConfigError If config file is invalid """ # Initialize ktconfig file if file doesnt exist if not self.kt_config.is_file(): with open(self.kt_config, 'w') as f: f.write('{}\n') self.cfg = ConfigParser() if not self.config_path.is_file(): raise RuntimeError(f'No valid config file at {self.config_path}. ' f'Please download it at {KATTIS_RC_URL}') self.cfg.read(self.config_path) username = self.cfg.get('user', 'username') password = token = None try: password = self.cfg.get('user', 'password') except NoOptionError: pass try: token = self.cfg.get('user', 'token') except NoOptionError: pass if password is None and token is None: raise ConfigError('''\ Your .kattisrc file appears corrupted. It must provide a token (or a KATTIS password). Please download a new .kattisrc file''') log(f'Username: {color_green(username)}')
def readReal(): """ Used to read real sensor data """ phValue = str(float(readLevel(config.sensorPins[-1])) / 18.214) phValue = str(random.random() + 7) try: waterTemp = str(readTemperature(getOneWireSensor())) logger.log("Read from w1 the water temp {} C".format(waterTemp), logger.LOG_DEBUG) except Exception as e: waterTemp = "-1" logger.log("Count not read temperature from w1", logger.LOG_ERROR) light = readlight() humidity, humidityTemp = readHumidity() humidity = str((random.random() * 20) + 40) return [ Payload(phValue, "/sensor/waterph"), Payload(waterTemp, "/sensor/watertemp"), Payload(light, "/sensor/lightstr"), Payload(humidity, "/sensor/airhumidity"), Payload(waterTemp, "/sensor/airtemp") ]
def test_dep(): print('\n\nTesting dependencies...') for dep in DEPENDENCIES: test = which(dep) if(test == None): log('Dependency ' + dep + ' not installed...\nPlease install and try again...') return log(dep + ' OK') log('Dependencies OK')
def main(): log(0, "Loading config: " + str(conf.get())) log(0, "Proxy " + conf['proxy.name'] + " is up and running.") log(0, "There are " + str(Node.select().count()) + " nodes around.") app.run( host=conf['proxy.listen.ip'], port=conf['proxy.listen.port'], debug=conf['proxy.debug'] )
def startWebServer(): global manager if database.verifyDatabaseExistence() and database.tableIntegrityCheck(): try: manager = threading.Thread(target=queueManager) # Don't wait for thread to close on cherrypy exit stop method will # set the serverShuttingDown global to True # then the thread will exit manager.daemon = True manager.start() # Register cherrypy monitor - runs every 30 seconds # The monitor will check to make sure the download manager # Is still active. If it is not and a shutdown has been requested # It will terminate the daemon, else it will start a manager. EventScheduler = Monitor(cherrypy.engine, checkManager, 30, 'EventScheduler') EventScheduler.start() cherrypy.quickstart(webServer(), config=conf) except Exception, err: for error in err: log("Unable to start Web Server - " + str(error))
def pooling(t): # FIXME: dns is not yet ready. global toSubmit log(0, "Pooling using: " + PoolingLogs.poolingName(t)) d = None if len(toSubmit) > 0: d = toSubmit.pop() print("Submitting: " + str(d)) res = request('/pool/' + Configuration()['node.id'], data=d) j = json.loads(res) for i in j: if 'command' in i: try: jj = json.loads(i['command']) if 'name' in jj and jj['name'] == "screenshot": log(0, "Taking screenshot...") d = {"cmd": i['id'], "data": take_screenshot()} toSubmit.append(d) print("adding to the list...") except Exception as e: print(str(e)) pass else: print(str(i))
def _train_one_epoch_mixup(self, train_loader, n_passes, embedding_storage_mode, print_every_batch): epoch_loss = 0 for j in range(n_passes): logger.log(f' Pass ({j+1}/{n_passes})') pass_loss = 0 for i, big_batch in enumerate(train_loader): sz = len(big_batch[0]) // 2 batch1 = (big_batch[0][:sz], big_batch[1][:sz]) batch2 = (big_batch[0][sz:sz + sz], big_batch[1][sz:sz + sz]) self.optimizer.zero_grad() batch_loss = self.model.forward_loss(batch1, batch2) batch_loss.backward() torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5) self.optimizer.step() store_embeddings(batch1[0], embedding_storage_mode) store_embeddings(batch2[0], embedding_storage_mode) pass_loss += batch_loss.item() if (i % print_every_batch == 0 or i + 1 == len(train_loader)): logger.log( f' Batch ({i+1}/{len(train_loader)}), loss: {pass_loss/(i+1):.4f}' ) epoch_loss += pass_loss return epoch_loss / (len(train_loader) * n_passes)
def crash_preloader(device, config): print("") log("Found device in preloader mode, trying to crash...") print("") if config.crash_method == 0: try: payload = b'\x00\x01\x9F\xE5\x10\xFF\x2F\xE1' + b'\x00' * 0x110 device.send_da(0, len(payload), 0, payload) device.jump_da(0) except RuntimeError as e: log(e) print("") elif config.crash_method == 1: payload = b'\x00' * 0x100 device.send_da(0, len(payload), 0x100, payload) device.jump_da(0) elif config.crash_method == 2: device.read32(0) device.dev.close() device = Device().find() return device
def get_input(message, validator): u_in = input(message) log('Validating ~' + u_in) if validator(u_in): log('\n~' + u_in + ' valid') return u_in log('Invalid input ~' + u_in + ' try again...') return get_input(message, validator)
manager.daemon = True manager.start() # Register cherrypy monitor - runs every 30 seconds # The monitor will check to make sure the download manager # Is still active. If it is not and a shutdown has been requested # It will terminate the daemon, else it will start a manager. EventScheduler = Monitor(cherrypy.engine, checkManager, 30, 'EventScheduler') EventScheduler.start() cherrypy.quickstart(webServer(), config=conf) except Exception, err: for error in err: log("Unable to start Web Server - " + str(error)) else: try: # Database doesn't exist, create it then recursively try again database.createFreshTables() if CURRENT_STATE is "debug": database.debugCreateTestData() startWebServer() except Exception, err: for error in err: log("Unable to create DB: " + error) log("Your database may be corrupt, \ delete .database.db and try again") if __name__ == "__main__": startWebServer()
def main(): log(0, "Creating tables... " + str(Configuration().get())) Node.create_table() CommandQueue.create_table() PoolingLogs.create_table() CommandResponse.create_table()
def is_dropoff(pos1, pos2, verbose=False, loglines=None): bad_signs = 0 (L, R) = (pos1, pos2) if pos1[1]>pos2[1] else (pos2, pos1) if _dot_separation(L, Calibration.leftpos) > movement_threshold_left: if verbose: log(loglines, "Vision: left dot looks like a dropoff") bad_signs += 1 elif verbose: log(loglines, "Vision: left dot is okay") if _dot_separation(R, Calibration.rightpos) > movement_threshold_right: if verbose: log(loglines, "Vision: right dot looks like a dropoff") bad_signs += 1 elif verbose: log(loglines, "Vision: right dot is okay") if _dot_separation(L,R) - Calibration.separation > separation_threshold: if verbose: log(loglines, \ "Vision: separation distance looks like a dropoff") bad_signs += 1 elif verbose: log(loglines, "Vision: separation distance is okay") return bad_signs > 0
def image_process(imon, imoff, verbose=False, loglines=None): if verbose: (imdiff, imon_cr, imoff_cr) = differentiate_images(imon,imoff,True) else: imdiff = differentiate_images(imon, imoff) raw_blobs = find_connected_components(imdiff) blobs = filter(_blob_size_filter, raw_blobs) if verbose: log(loglines, "Vision: "+str(len(raw_blobs))+" raw blobs") log(loglines, "Vision: "+str(len(blobs))+" correctly sized blobs") if len(blobs) <= 2: good_positions = [b.avg_position() for b in blobs] else: combos = list(combinations(blobs, 2)) pos_costs = [] for b1,b2 in combos: pos1, pos2 = b1.avg_position(), b2.avg_position() theta = _get_angle(pos1, pos2) if not _angle_filter(theta): continue # size factor = worst-sized blob in the pairing # expected < 10 size_factor = max(abs(b1.size()-blob_size_ideal), \ abs(b2.size()-blob_size_ideal)) # angle factor = angular distance from desired # expected < 0.8 angle_factor = min(abs(theta-ideal_angle_1), \ abs(theta-ideal_angle_2)) cost = size_factor*size_weight + angle_factor*angle_weight pos_costs.append( ((pos1,pos2), cost) ) if verbose: log(loglines, "Vision: "+str(len(combos))+" pairs of blobs") log(loglines, "Vision: "+str(len(pos_costs))+" at correct angles") #sorted_positions = sorted(pos_costs, key=lambda pc: pc[1]) #good_positions = [p[0] for p in sorted_positions[0:2]] if len(pos_costs) > 0: good_positions = min(pos_costs, key=lambda pc: pc[1])[0] else: good_positions = [] log(loglines, "positions: " + str(good_positions)) log(loglines, "calibration: "+str((Calibration.leftpos, Calibration.rightpos))) out_positions = ( good_positions[0] if len(good_positions)>0 else POSERR, \ good_positions[1] if len(good_positions)>1 else POSERR ) if verbose: return (out_positions, imon, imoff, imon_cr, imdiff, raw_blobs, blobs) else: return out_positions
def main(): parser = argparse.ArgumentParser(description="Creates cool-lookin' audio waveform visualisations to use as assets in players and videos.") parser.add_argument('--input', help='Input file path. Required', required=True, type=str) parser.add_argument('--steps', help='The total number of steps done. [Default 200]', default=200, type=int) parser.add_argument('--totalwidth', help='The total width of the image. [Default 2000]', default=2000, type=int) parser.add_argument('--stepwidth', help='Width of each step. Can derive the total width by providing --steps and --stepwidth. [Default 10]', default=10, type=float) parser.add_argument('--height', help='The total height of the image. [Default 128]', default=128, type=int) parser.add_argument('--output', help='Output file path. [Default $input]', type=str) parser.add_argument('--color', help="The fill color for the bars. [Default 'black']", default="black", type=str) parser.add_argument('--rounded', help="Rounded corner radius. [Default 0]", default=0, type=float) parser.add_argument('--mode', help="Sample visualization mode. Either 'avg' or 'max' [Default 'avg']", default="avg", type=str, choices=["avg", "max"]) parser.add_argument('--align', help="Vertical bar alignment. Either 'center' or 'bottom' [Default 'bottom']", default="bottom", type=str, choices=["bottom", "center"]) args = parser.parse_args() if args.input: filename = args.input else: log("Missing input file") return if args.output: output = args.output else: output = './' + filename if args.steps: steps = int(args.steps) else: steps = 200 if args.stepwidth: step_width = int(args.stepwidth) else: if args.totalwidth: step_width = int(args.totalwidth) / steps else: step_width = 2000 / steps if args.rounded: rounded = int(args.rounded) else: rounded = 0 if args.color: color = args.color else: color = "black" x_gap = 0.25 * step_width if args.height: step_height = int(args.height) else: step_height = 128 if args.mode: if args.mode == "avg" or args.mode == "max": mode = args.mode else: log("Found unsupported transformation mode. Only supports 'avg' and 'max'", mode=args.mode) mode = "avg" else: mode = "avg" if args.align: if args.align == "bottom" or args.align == "center": align = args.align else: log("Found unsupported alignment. Only supports 'center' and 'bottom'", align=args.align) align = "bottom" else: align = "bottom" start = time.time() log("Loading audio file into RAM", sampling_rate="48000", mono=True) y, _ = librosa.load(filename, 48000, True) delta_t = len(y) // steps # delta is the ratio of desired steps (hence no. of bars) to the total sample count samples = create_buffer(y, delta_t, steps, mode=mode) draw( output_file=output, samples=samples, step_width=step_width, step_height=step_height, color=color, gap=x_gap, rounded=rounded, align=align ) with open(f"{output}_base64.txt", "wb") as f: f.write(encode_as_base64(f"{output}.png")) end = time.time() log("Finished processing the audio file", src=filename, target=output, time_taken=f"{round(end - start)} seconds")
def p(self, *args, **kwargs): mako_template = Template(filename=os.path.join(locations.current_folder(),'static/templates/index_data.tmpl') ) blog = database.get_blogs() main_tag = args[0] try: event_tag = args[2] sub_tag = args[1] imageIDList = [] #SQL Offset number for DB Query try: offset = int(args[3]) except: offset = 0 if event_tag == "Misc": images = database.get_latest_12_images_by_tag(main_tag, sub_tag, False, offset) else: images = database.get_latest_12_images_by_tag(main_tag, sub_tag, event_tag, offset) for image in images: #To be used by the front-end JS to scroll through pictures imageIDList.append(image[0]) if len(images) > 0: self.mako_template_render = mako_template.render(event_main_tag = main_tag, event_sub_tag = sub_tag, imageIDList = imageIDList, event_tag = event_tag, images = images, display_type = "Event", offset = offset, blog = blog) return self.mako_template_render else: log("Not enough images") return ( self.default() ) except: try: sub_tag = args[1] event_tags = database.get_event_tags(sub_tag) images = database.get_image_for_each_event_tag(sub_tag) misc_images = database.get_image_for_misc_sub_tag(main_tag, sub_tag) latest_uploads = database.get_latest_images(10) upcoming = database.manage_upcoming_alerts() #To be used by the front-end JS to scroll through pictures imageIDList = [] for image in latest_uploads: imageIDList.append(image[0]) if len(images) > 0 or len(misc_images) > 0: self.mako_template_render = mako_template.render(parent_main_tag = main_tag, parent_sub_tag = sub_tag, event_tags = event_tags, images = images, misc_images = misc_images, display_type = "Sub", blog = blog, imageIDList = imageIDList, latest_uploads = latest_uploads, upcoming = upcoming) return self.mako_template_render else: return ( self.default() ) except: try: sub_tags = database.get_sub_tags(main_tag) images = database.get_image_for_each_sub_tag(main_tag) latest_uploads = database.get_latest_images(10) upcoming = database.manage_upcoming_alerts() #To be used by the front-end JS to scroll through pictures imageIDList = [] for image in latest_uploads: imageIDList.append(image[0]) if len(images) > 0: self.mako_template_render = mako_template.render(main_tag = main_tag, sub_tags = sub_tags, images = images, display_type = "Main", blog = blog, imageIDList = imageIDList, latest_uploads = latest_uploads, upcoming = upcoming) return self.mako_template_render else: return ( self.default() ) except Exception, err: for error in err: log("Unable to build Template: " + str(error) )