def connect(server="eodas_db", db="remote_sensing", user="******",passwd="acri%dc4", cursor_factory=False ): try: conn = connection(server=server, db=db,user=user,passwd=passwd) if cursor_factory: return conn, conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor), {'code': 0, 'msg': get_cur_time() + ': connected to db'} else: return conn, conn.cursor(), {'code': 0, 'msg': get_cur_time() + ': connected to db'} except psycopg2.Error: err = {'code':400} err['msg'] = get_cur_time()+' Error while connecting to the db' return None, None,err
def user_phone_manage_modify(self, user_info): uid = user_info.get('uid','') user = user_info.get('user','') uname = user.get('name','') phones = user.get('phones','') phones_list = [] mails_list = [] for phone in phones: pid = phone.get('pid','') phone = phone.get('phone','') if pid: #update vars = dict(pid=pid) intime_new = utils.get_cur_time() db.update(utils.DB_TABLE_PHONE, where="pid=$pid",phone=phone,intime=intime_new,vars=vars) phone_dic= dict(pid=pid,phone=phone,intime=intime_new) phones_list.append(phone_dic) pass else: #insert pid_new = utils.get_uuid() intime_new = utils.get_cur_time() db.insert(utils.DB_TABLE_PHONE,uid=uid,phone=phone,pid=pid_new,intime=intime_new) phone_dic= dict(pid=pid_new,phone=phone,intime=intime_new) phones_list.append(phone_dic) pass mails = user.get('mails','') for mail in mails: mid = mail.get('mid','') mail_str = mail.get('mail','') if mid: vars = dict(mid=mid) intime_new = utils.get_cur_time() db.update(utils.DB_TABLE_MAIL, where="mid=$mid",mail=mail_str ,intime=intime_new,vars=vars) mail_dic = dict(mid=mid,mail=mail_str,intime=intime_new) mails_list.append(mail_dic) pass else: mid_new = utils.get_uuid() intime_new = utils.get_cur_time() db.insert(utils.DB_TABLE_MAIL,uid=uid,mail=mail_str,mid=mid_new,intime=intime_new) mail_dic = dict(mid=mid_new,mail=mail_str,intime=intime_new) mails_list.append(mail_dic) pass user_dic = dict(uid=uid,name=uname,phones=phones_list,mails=mails_list) return (0,'success',uid,user_dic)
def user_mail_manage(self, user_info): uid = user_info.get('uid','') action = user_info.get('action','') mail_info = user_info.get('mail_info','') if action == 0: #add mail = mail_info.get('mail','') mid = utils.get_uuid(); mintime = utils.get_cur_time() db.insert(utils.DB_TABLE_MAIL,uid=uid,mid=mid,mail=mail,intime=mintime) mail_dic = dict(mail=mail,mid=mid,intime=mintime) return (0,'success',uid,mail_dic) pass elif action == 1: #delete mid = mail_info.get('mid') vars=dict(mid=mid) db.delete(utils.DB_TABLE_MAIL,where="mid=$mid",vars=vars) mail_dic=dict(mid=mid) return (0,'success',uid,mail_dic) pass pass
def user_phone_manage(self, user_info): uid = user_info.get('uid','') action = user_info.get('action','') phone_info = user_info.get('phone_info','') if action == 0: #add phone = phone_info.get('phone','') pid = utils.get_uuid(); pintime = utils.get_cur_time() db.insert(utils.DB_TABLE_PHONE,uid=uid,pid=pid,phone=phone,intime=pintime) phone_dic = dict(phone=phone,pid=pid,intime=pintime) return (0,'success',uid,phone_dic) pass elif action == 1: #delete pid = phone_info.get('pid') vars=dict(pid=pid) db.delete(utils.DB_TABLE_PHONE,where="pid=$pid",vars=vars) phone_dic=dict(pid=pid) return (0,'success',uid,phone_dic) pass pass
def user_phone_manage_add(self, user_info): rid = user_info.get('rid','') users = user_info.get('users','') user_list = [] for user in users: uname = user.get('name','') phone_list = [] mail_list = [] if uname.strip == '': return (-1, 'failed') phones = user.get('phones','') uid = utils.get_uuid() curtime = utils.get_cur_time() ret = db.insert(utils.DB_TABLE_USER,uid=uid,rid=rid,name=uname,intime=curtime) print('ret:',ret) for phone in phones: #insert into t_phone p = phone.get('phone','') if p.strip() != '': pid = utils.get_uuid() db.insert(utils.DB_TABLE_PHONE,pid=pid,uid=uid,phone=p,intime=curtime) phone_dic = dict(pid=pid,uid=uid,phone=p,intime=curtime) phone_list.append(phone_dic) pass mails = user.get('mails','') for mail in mails: mail_str = mail.get('mail','') if mail_str.strip() != '': mid = utils.get_uuid() db.insert(utils.DB_TABLE_MAIL,mid=mid,uid=uid,mail=mail_str,intime=curtime) mail_dic = dict(mid=mid,uid=uid, mail=mail_str,intime=curtime) mail_list.append(mail_dic) user_dic = dict(uid=uid,name=uname,phones=phone_list,mails=mail_list,intime=curtime) user_list.append(user_dic) return (0,'success',rid,user_list) pass
def user_register(self,user_info): rphone = user_info.get('phone','') if not rphone.strip(): return (-1, 'phone empty') var = dict(phone=rphone) results = db.select(utils.DB_TABLE_REG, vars = var, where="phone=$phone") rowcount = len(list(results)) if not rowcount: #no person rid = utils.get_uuid() rintime = utils.get_cur_time() rname = user_info.get('name','') remail = user_info.get('email', rphone) rpassword = user_info.get('password','') rpassword = utils.get_md5(rpassword) ret = db.insert(utils.DB_TABLE_REG,rid=rid, phone=rphone,password=rpassword, name=rname, email=remail,intime=rintime) print('ret:',ret) if not ret: return (0, 'success', rid) else: return (-1, 'register error') else: return (-1, 'user had registed')
def run(self): timeout = 1 while not self.stopped_thread(): with self.lock_serials: serial_filenos = [] for _serial in self.serials: try: serial_filenos.append(_serial.fileno()) except serial.SerialException as e: print('exception in append fileno') if serial_filenos: readable, _, exceptional = select.select( serial_filenos, [], serial_filenos, timeout) for serial_fileno in readable: with self.lock_serials: try: _serial = next( _serial for _serial in self.serials if _serial.fileno() == serial_fileno) except StopIteration as e: print('EXC StopIteration =', e) continue try: if _serial.in_waiting > 0: locked = self.serials_locks[_serial.port].acquire( timeout=0.1) if locked: serial_data = _serial.read(1024) self.serial_ports_buffer[ _serial.port] += serial_data pck = self.serial_ports_buffer[_serial.port] self.serials_locks[_serial.port].release() else: break start_pck_pos = pck.find(utils.DELIMITER) if start_pck_pos > -1: if len(pck) > (start_pck_pos + 2): frame_size = utils.get_serial_frame_size( pck[start_pck_pos + 1:start_pck_pos + 3]) start_frame_pos = start_pck_pos + 3 if len(pck) >= (start_frame_pos + frame_size): fmt = '!B' + str(frame_size - 3) + 's' + '2s' (_node_id, _data, _cs) = utils.get_serial_data( pck[start_frame_pos: start_frame_pos + frame_size], fmt) _node_id = str(_node_id) cs = utils.fletcher16_checksum(pck[ start_frame_pos:start_frame_pos + frame_size - 2]) if (cs[0] == _cs[0]) and (cs[1] == _cs[1]): timestamp = utils.get_cur_time() data = [ timestamp, _node_id, _data.decode() ] self.serial_q.put(data) self.serial_ports_buffer[ _serial.port] = b'' else: print('CHECKSUM BAD') self.serial_ports_buffer[_serial.port] = \ self.serial_ports_buffer[_serial.port][:start_pck_pos] + \ self.serial_ports_buffer[_serial.port][start_pck_pos + frame_size + 3:] except serial.SerialException as e: pass # print('EXC SerialException =', e) except OSError as e: pass # print('EXC OSError =', e) except TypeError as e: pass # print('EXC TypeError =', e) finally: # _serial.close() break else: time.sleep(timeout) print('Exiting from serial_handler. . .')
def train_model(optimizer, model_tf, baseline, validation_dataset, samples=1280000, batch=128, val_batch_size=1000, start_epoch=0, end_epoch=5, from_checkpoint=False, grad_norm_clipping=1.0, batch_verbose=1000, graph_size=20, filename=None): if filename is None: filename = 'VRP_{}_{}'.format(graph_size, strftime("%Y-%m-%d", gmtime())) def rein_loss(model, inputs, baseline, num_batch): """Calculate loss for REINFORCE algorithm """ # Evaluate model, get costs and log probabilities cost, log_likelihood = model(inputs) # Evaluate baseline # For first wp_n_epochs we take the combination of baseline and ema for previous batches # after that we take a slice of precomputed baseline values bl_val = bl_vals[num_batch] if bl_vals is not None else baseline.eval( inputs, cost) bl_val = tf.stop_gradient(bl_val) # Calculate loss reinforce_loss = tf.reduce_mean((cost - bl_val) * log_likelihood) return reinforce_loss, tf.reduce_mean(cost) def grad(model, inputs, baseline, num_batch): """Calculate gradients """ with tf.GradientTape() as tape: loss, cost = rein_loss(model, inputs, baseline, num_batch) return loss, cost, tape.gradient(loss, model.trainable_variables) # For plotting train_loss_results = [] train_cost_results = [] val_cost_avg = [] # Training loop for epoch in range(start_epoch, end_epoch): # Create dataset on current epoch data = generate_data_onfly(num_samples=samples, graph_size=graph_size) epoch_loss_avg = tf.keras.metrics.Mean() epoch_cost_avg = tf.keras.metrics.Mean() # Skip warm-up stage when we continue training from checkpoint if from_checkpoint and baseline.alpha != 1.0: print('Skipping warm-up mode') baseline.alpha = 1.0 # If epoch > wp_n_epochs then precompute baseline values for the whole dataset else None bl_vals = baseline.eval_all(data) # (samples, ) or None bl_vals = tf.reshape( bl_vals, (-1, batch )) if bl_vals is not None else None # (n_batches, batch) or None print("Current decode type: {}".format(model_tf.decode_type)) for num_batch, x_batch in tqdm( enumerate(data.batch(batch)), desc="batch calculation at epoch {}".format(epoch)): # Optimize the model loss_value, cost_val, grads = grad(model_tf, x_batch, baseline, num_batch) # Clip gradients by grad_norm_clipping init_global_norm = tf.linalg.global_norm(grads) grads, _ = tf.clip_by_global_norm(grads, grad_norm_clipping) global_norm = tf.linalg.global_norm(grads) if num_batch % batch_verbose == 0: print("grad_global_norm = {}, clipped_norm = {}".format( init_global_norm.numpy(), global_norm.numpy())) optimizer.apply_gradients(zip(grads, model_tf.trainable_variables)) # Track progress epoch_loss_avg.update_state(loss_value) epoch_cost_avg.update_state(cost_val) if num_batch % batch_verbose == 0: print("Epoch {} (batch = {}): Loss: {}: Cost: {}".format( epoch, num_batch, epoch_loss_avg.result(), epoch_cost_avg.result())) # Update baseline if the candidate model is good enough. In this case also create new baseline dataset baseline.epoch_callback(model_tf, epoch) set_decode_type(model_tf, "sampling") # Save model weights model_tf.save_weights('model_checkpoint_epoch_{}_{}.h5'.format( epoch, filename), save_format='h5') # Validate current model val_cost = validate(validation_dataset, model_tf, val_batch_size) val_cost_avg.append(val_cost) train_loss_results.append(epoch_loss_avg.result()) train_cost_results.append(epoch_cost_avg.result()) pd.DataFrame( data={ 'epochs': list(range(start_epoch, epoch + 1)), 'train_loss': [x.numpy() for x in train_loss_results], 'train_cost': [x.numpy() for x in train_cost_results], 'val_cost': [x.numpy() for x in val_cost_avg] }).to_csv('backup_results_' + filename + '.csv', index=False) print( get_cur_time(), "Epoch {}: Loss: {}: Cost: {}".format(epoch, epoch_loss_avg.result(), epoch_cost_avg.result())) # Make plots and save results filename_for_results = filename + '_start={}, end={}'.format( start_epoch, end_epoch) get_results([x.numpy() for x in train_loss_results], [x.numpy() for x in train_cost_results], [x.numpy() for x in val_cost_avg], save_results=True, filename=filename_for_results, plots=True)
def update_prod_info(media, args, cursor,conn, log_file, max_attempt=3, exit_on_error=False, proc_id='Unknown', info=None): err_msg = 'Media {0}: '.format(media) err_msg += '{}' CMD='update-prod-info-list' print '{0} - Media {1}: start query hsm'.format(get_cur_time(), media) if info is None: products, err = get_file_sample(media, args,100, log_file, max_attempt=max_attempt, exit_on_error=exit_on_error, proc_id=proc_id) else: products, err, info_out = get_file_sample(media, args,100, log_file, max_attempt=max_attempt, exit_on_error=exit_on_error, proc_id=proc_id, info=info) print '{0} - Media {1}: end query hsm'.format(get_cur_time(), media) if err!=0: if info is None: return {'code' : err} else: return {'code' : err}, {} product_names = [get_prod_name(prod) for prod in products] prod_names = "('"+"','".join(product_names)+"')" CMD='update-prod-info-check' # print '{0} - Media {1}: start status check'.format(get_cur_time(), media) query = db_query().count_prod_not_in_tape(prod_names) err = submit_query(query, cursor, conn=conn) check_error(proc_id, err['code'], CMD, log_file, arg_err=err_msg.format(err['msg'])) print '{0} - Media {1}: end status check'.format(get_cur_time(), media) if err['code']!=0: if info: return err, {} else: return err n_not_tape =cursor.fetchone()[0] print '{0} - Media {1}: {2} files do not have tape status'.format(get_cur_time(), media, n_not_tape) if n_not_tape > 0: err = {'code':314, 'msg': '{} out of {} products are not in tape status'.format(n_not_tape, len(product_names))} check_error(proc_id, err['code'], CMD, log_file, arg_err=err_msg.format(err['msg'])) query = db_query().get_prod_not_in_tape(prod_names) err_0 = submit_query(query, cursor, conn=conn) check_error(proc_id, err_0['code'], CMD, log_file, arg_err=err_msg.format(err['msg'])) for c in cursor: msg ='{0} - Media {1}: {2}, {3}, {4}'.format(' '*19,media, c[0], c[1], c[2] ) print msg check_error(proc_id, 314, CMD, log_file, arg_err=msg) if info: return err, {} else: return err CMD='update-prod-info' print '{0} - Media {1}: start status update'.format(get_cur_time(), media) query = db_query().update_prod_status(prod_names) err = submit_query(query, cursor, conn=conn, commit=True) check_error(proc_id, err['code'], CMD, log_file, arg_err=err_msg.format(err['msg'])) print '{0} - Media {1}: end status update'.format(get_cur_time(), media) if info is None: return err else: return err, info_out