def main(project_name=None): if not project_name: project_name = get_project_name() calibrate.main(project_name=project_name) quantify.main(project_name=project_name) normalize.main(project_name=project_name) print('\a') return 0
def process_feed(args): try: # Attempt to enrich the URL. res = enrich_url(args) # Check for any error code. if not (res.status_code >= 200 and res.status_code < 300): log.error('Error enriching {}'.format(args[0])) log.error(res.json()) log.error('Failed on HTTP code {}: {}'.format(res.status_code, args[1])) return 'error' # Normalize the document to set correct field types. res = normalize.main(res.json()) # Add the label field. res['label'] = args[7] # Write to file. write(res, args[2]) # Insert to elasticsearch. #_ = es.insert(args[5], args[3], args[4], res, args[2]) # Add to dedup database. sql.insert(args[6], today(), args[0]) return res except: return False
def get(self): device_id = self.get_argument('device_id') wave, label, wave_id = fetch(device_id) print('wave', wave) main() y, _ = librosa.load('normalized-temp.wav', sr=config.samplerate) frames = frame(y) result, output = self.runner.predict_ctc(y) self.write({ 'result': result, 'label': label, 'time': time.strftime('%Y-%m-%d %A %X %Z', time.localtime(time.time())), 'wave_id': wave_id, 'output': output })
import scrapper_jsonld import merge_data import normalize if __name__ == "__main__": print('Scrapping iniciado') scrapper_jsonld.main() print('Scrapping finalizado') print('Normalizacion iniciada') normalize.main() print('Normalizacion finalizada') print('Merge iniciado') merge_data.main() print('Merge finalizado')