def main(): """main. """ nid = 0 if len(sys.argv) > 1: nid = int(sys.argv[1]) else: nid = date_nid.nid_now() date = date_nid.nid_to_date(nid) topic = 'ztf_' + date + '_programid1' maxalert = settings.KAFKA_MAXALERTS nprocess = settings.KAFKA_PROCESSES objectdir = settings.OBJECTJSON fitsdir = settings.IMAGEFITS group_id = settings.KAFKA_GROUPID topicout = settings.KAFKA_TOPIC_OUT print('INGEST ----------', now()) conf = { 'bootstrap.servers': '%s' % settings.KAFKA_INPUT, 'group.id': group_id, 'client.id': 'client-1', 'enable.auto.commit': True, 'session.timeout.ms': 6000, 'default.topic.config': {'auto.offset.reset': 'smallest'} } if objectdir and len(objectdir) > 0: json_store = objectStore.objectStore(suffix='json', fileroot=objectdir) else: print('ERROR in ingest/ingestBatch: No object directory found for file storage') sys.stdout.flush() json_store = None if fitsdir and len(fitsdir) > 0: image_store = objectStore.objectStore(suffix='fits', fileroot=fitsdir) else: print('ERROR in ingest/ingestBatch: No image directory found for file storage') sys.stdout.flush() image_store = None # print('Configuration = %s' % str(conf)) print('Processes = %d' % nprocess) sys.stdout.flush() runargs = [] process_list = [] manager = Manager() return_dict = manager.dict() t = time.time() for t in range(nprocess): runarg = { 'processID':t, 'topic' : topic, 'maxalert' : maxalert, 'topicout':topicout, 'json_store': json_store, 'image_store': image_store, 'conf':conf, } p = Process(target=run, args=(runarg, return_dict)) process_list.append(p) p.start() for p in process_list: p.join() r = return_dict.values() nalert = ncandidate = 0 for t in range(nprocess): nalert += r[t]['nalert'] ncandidate += r[t]['ncandidate'] print('%d alerts and %d candidates' % (nalert, ncandidate)) sys.stdout.flush() os.system('date') ms = manage_status('nid', settings.SYSTEM_STATUS) nid = date_nid.nid_now() ms.add({'today_alert':nalert, 'today_candidate':ncandidate}, nid) if nalert > 0: return 1 else: return 0
except: dir_timestamp = 0 newer = area_timestamp - dir_timestamp # if the area from the database is newer than the cache, rebuild it if newer > 0: get.append(row['ar_id']) else: keep.append(row['ar_id']) # areas which will have their caches rebuilt return {'keep': keep, 'get': get} if __name__ == "__main__": import settings nid = date_nid.nid_now() date = date_nid.nid_to_date(nid) logfile = open('/mnt/cephfs/roy/services_log/' + date + '.log', 'a') now = datetime.now() logfile.write('\n-- make_area_files at %s\n' % now.strftime("%d/%m/%Y %H:%M:%S")) msl = mysql.connector.connect(user=settings.DB_USER_READ, password=settings.DB_PASS_READ, host=settings.DB_HOST, database='ztf') cache_dir = settings.AREA_MOCS new_cache_dir = cache_dir + '_new' os.system('mkdir %s' % new_cache_dir)
def main(): """main. """ args = parse_args() # Configure consumer connection to Kafka broker conf = { 'bootstrap.servers': '%s' % args.host, 'default.topic.config': { 'auto.offset.reset': 'smallest' } } if args.group: conf['group.id'] = args.group else: conf['group.id'] = 'LASAIR' print('Configuration = %s' % str(conf)) # How many processs if args.nprocess: nprocess = args.nprocess else: nprocess = 1 print('Processes = %d' % nprocess) sys.stdout.flush() runargs = [] process_list = [] manager = Manager() return_dict = manager.dict() t = time.time() for t in range(nprocess): runarg = { 'processID': t, 'args': args, 'conf': conf, } p = Process(target=run, args=(runarg, return_dict)) process_list.append(p) p.start() for p in process_list: p.join() r = return_dict.values() nalert_in = nalert_out = nalert_ss = 0 for t in range(nprocess): nalert_in += r[t]['nalert_in'] nalert_out += r[t]['nalert_out'] nalert_ss += r[t]['nalert_ss'] print('INGEST finished %d in, %d out, %d solar system' % (nalert_in, nalert_out, nalert_ss)) sys.stdout.flush() ms = manage_status('nid', settings.SYSTEM_STATUS) nid = date_nid.nid_now() ms.add( { 'today_filter': nalert_in, 'today_filter_out': nalert_out, 'today_filter_ss': nalert_ss }, nid) if nalert_in > 0: return 1 else: return 0