def load_data(data_dir, data_list): speedup_dir = os.path.join(data_dir, 'speedup') train_data = data_factory('TRI', data_dir=data_dir, data_list=data_list, quiet=True) batch_features = [] batch_label = [] time = 0 img_width = 0 img_height = 0 channel = 0 while True: evt_data, restart, evt_index, _ = train_data.next(shuffle=True, unit='evt') event_np = os.path.join(speedup_dir, '%d_%s_map.npy' % (evt_index, 'Inception_v3')) if evt_index in [21]: print(event_np) continue if os.path.isfile(event_np): batch_data = np.load(event_np).item() print(evt_index) batch, ctime, cimg_width, cimg_height, cchannel = np.squeeze( batch_data['data'], axis=1).shape print(ctime, cimg_width, cimg_height, cchannel) batch_features.extend(np.squeeze(batch_data['data'], axis=1)) batch_label.extend(batch_data['label']) if restart: break batch_features = np.stack(batch_features) # batch_label = np.stack(batch_label) return batch_features, batch_label
def easy_train(data_list = ['ID001_T001', 'ID001_T002', 'ID001_T003','ID001_T004','ID001_T009', 'ID001_T010'],\ data_dir = '..\..\Out',\ im_shape = [299,299,3],\ time_step = 31): train_data = data_factory('TRI', data_dir=data_dir , data_list= data_list, quiet=False) with tf.Session() as sess: img_str, frame = data_process(im_shape) encoder= encoder_factory('Inception_v3') decoder = decoder_factory('ATT_LSTM') _input, _feature, _train = encoder.last_feature([None, im_shape[0],im_shape[1],im_shape[2]]) _decoder_input = tf.placeholder(dtype = tf.float32, name = 'decoder_input', shape=[None, time_step, 2048]) _truth, _loss, _train_op, _pred = decoder( _decoder_input, n_hidden = 50, n_class=5, learning_rate=1e-3) sess.run(tf.global_variables_initializer()) encoder.load_model(sess) saver = tf.train.Saver() for epoch in range(2000): saver.save(sess, '..\..\Backup\M81ckpt',global_step = epoch) batch_features = [] batch_label = [] for i in range(100): evt_data, restart, evt_index = train_data.next(shuffle = True, unit = 'evt') event_np = os.path.join(data_dir, '%d.npy'%evt_index) if (evt_index in [71,96]): continue if os.path.isfile(event_np): batch_data = np.load(event_np).item() batch_features.extend( batch_data['data']) batch_label.extend(batch_data['label']) loss,pred, _ = sess.run(( _loss, _pred, _train_op), feed_dict={_decoder_input: batch_features, _truth: batch_label}) total_correct = len([x for x, y in zip(pred, batch_label) if x==y]) print('%d: mean loss : %f, accuracy: %f'%(epoch, loss, total_correct/len(batch_label)))
def event_eve(data_dir, data_list, sess, _pred, _istrain, score_threshold): speedup_dir = os.path.join(data_dir, 'speedup') train_data = data_factory('TRI', data_dir=data_dir, data_list=data_list, quiet=True) coorect = 0.0 incorrect = 0.0 while True: evt_data, restart, evt_index, _ = train_data.next(shuffle=True, unit='evt') event_np = os.path.join(speedup_dir, '%d_%s_vec.npy' % (evt_index, 'Inception_v3')) if os.path.isfile(event_np): event_data_label = np.load(event_np).item() event_data = event_data_label['data'] event_label = event_data_label['label'] event_max_oc_label = max(event_label, key=event_label.count) pred = sess.run(_pred, feed_dict={ _inputs: event_data, _istrain: False }) seq_index = [ (index, np.argmax(score)) for index, score in enumerate(pred) if np.max(score) >= score_threshold & np.argmax(score) > 0 ] if seq_index[0][1] == event_max_oc_label: coorect = coorect + 1 else: incorrect = incorrect + 1 if restart: break print('Event accuracy : %f' % (coorect / (coorect + incorrect)))
def train(data_list = ['ID001_T001', 'ID001_T002', 'ID001_T003','ID001_T004','ID001_T009', 'ID001_T010'],\ data_dir = '..\..\Out',\ im_shape = [640,480,3],\ time_step = 31): sppedup_dir = os.path.join(data_dir, 'speedup') train_data = data_factory('TRI', data_dir=data_dir , data_list= data_list, quiet=False) with tf.Session() as sess: img_str, frame = data_process(im_shape) encoder= encoder_factory('Inception_v3') sess.run(tf.global_variables_initializer()) encoder.load_model(sess) while True: evt_data, restart, evt_index = train_data.next(shuffle=True,unit='evt') event_np = os.path.join(sppedup_dir, '%d_Inception_v3_map.npy'%(evt_index)) print(evt_data[0]['mat_path']) batch_label = [] batch_features = [] for seq_data in tqdm(evt_data): label = seq_data['label'] batch_label.extend([label]) seq_features = [] for fimg_path in seq_data['gimg']: fimg = open(fimg_path,'rb').read() fimg = sess.run(frame, feed_dict={img_str: fimg}) feature = sess.run(_feature, feed_dict={_input: [fimg], _train: False}) seq_features.append(feature) batch_features.append(np.stack(seq_features)) np.save(event_np,{'data':batch_features, 'label':batch_label})
def train(data_list = ['ID001_T012','ID001_T013','ID001_T014','ID001_T015','ID001_T016','ID001_T017','ID001_T018','ID001_T019'],\ data_dir = '..\..\Out',\ im_shape = [299,299,3],\ time_step = 31): train_data = data_factory('TRI', data_dir=data_dir , data_list= data_list, quiet=False) speedup_dir = os.path.join(data_dir,'speedup') if os.path.isdir(speedup_dir) is False: os.makedirs(speedup_dir) with tf.Session() as sess: img_str, frame = data_process(im_shape) encoder= encoder_factory('Inception_v3') decoder = decoder_factory('LSTM') _input, _feature, _train = encoder.last_feature([None, im_shape[0],im_shape[1],im_shape[2]]) _decoder_input = tf.placeholder(dtype = tf.float32, name = 'decoder_input', shape=[None, time_step, 2048]) _truth, _loss, _train_op, _pred = decoder( _decoder_input, n_hidden = 100, n_class=5, learning_rate=1e-2) sess.run(tf.global_variables_initializer()) encoder.load_model(sess) saver = tf.train.Saver() for epoch in range(100): saver.save(sess, '..\..\Backup\M802.ckpt',global_step = epoch) total_loss = 0.0 total_num = 0.0 total_correct = 0.0 total_seq = 0.0 while True: evt_data, restart, evt_index = train_data.next(shuffle=True,unit='evt') batch_features = [] batch_label = [] event_np = os.path.join(speedup_dir, '%d_%s_vec.npy'%(evt_index, encoder.model_id)) if os.path.isfile(event_np): batch_data = np.load(event_np).item() batch_features = batch_data['data'] batch_label = batch_data['label'] else: print(evt_data[0]['mat_path']) for seq_data in tqdm(evt_data): label = seq_data['label'] batch_label.extend([label]) seq_features = [] for fimg_path in seq_data['fimg']: fimg = open(fimg_path,'rb').read() fimg = sess.run(frame, feed_dict={img_str: fimg}) feature = sess.run(_feature, feed_dict={_input: [fimg], _train: False}) seq_features.append(np.squeeze(feature)) batch_features.append(np.stack(seq_features)) np.save(event_np,{'data':batch_features, 'label':batch_label}) # batch_features = np.asanyarray(batch_features).reshape(batch, time_step, -1) loss,pred, _ = sess.run(( _loss, _pred, _train_op), feed_dict={_decoder_input: batch_features, _truth: batch_label}) total_loss = total_loss + loss total_num = total_num + 1 total_seq = total_seq + len(batch_label) total_correct = total_correct + len([x for x, y in zip(pred, batch_label) if x==y]) if restart: break print('%d: mean loss : %f, accuracy: %f'%(epoch, total_loss/total_num, total_correct/total_seq))
def test(): data_list = ['ID001_T001', 'ID001_T002', ]# , data_dir = '..\..\Out' im_shape = [480,640,3] train_data = data_factory('TRI', data_dir=data_dir , data_list= data_list, quiet=True) model_factory = Model_Zoo() with tf.Session() as sess: img_str, frame = data_process(im_shape) encoder, decoder = model_factory('Inception_v3') T =31 input_layer, feature_layer, is_train = encoder.back_bone([1, im_shape[0],im_shape[1],im_shape[2]]) [_,w,h,d] = feature_layer.shape feature_input_layer = tf.placeholder(tf.float32,shape = [T,w,h,d], name='CNN_Features') pred_layer ,gtruth_layer ,attention_layer = decoder.build( feature_input_layer, n_hidenn = 100, n_class = 5 ) saver = tf.train.Saver() saver.restore(sess, '..\..\Backup_2\M726-38') correct = 0 total = 0 while(True): seq_data, restart = train_data.next(unit = 'event',shuffle=False) label_correct = [] for i, seq in tqdm.tqdm(enumerate(seq_data) ): label = seq['label'] seq_fimg = [] for fimg, dimg in zip(seq['fimg'], seq['dimg']): [fimg,dimg] = list(map(lambda f: open(f,'rb').read(), [fimg,dimg])) fimg = sess.run(frame, feed_dict={img_str: fimg}) ffeature = sess.run(feature_layer, feed_dict={input_layer: [fimg], is_train: False}) seq_fimg.extend(ffeature) seq_fimg = np.stack(seq_fimg) predict, attention = sess.run((pred_layer,attention_layer), feed_dict={feature_input_layer: seq_fimg}) # out_img = # attention = np.asanyarray(attention) [_, f_row, f_col,_] = ffeature.shape for index, f_path in enumerate(seq['fimg']) : fimg = cv2.imread(f_path) att_map = attention[index].reshape(f_row,f_col) att_map = cv2.resize(att_map,(im_shape[1], im_shape[0])) # att_mask = att_map -0.5 att_mask = att_map * 0.5 out_img = 0.5* fimg + np.expand_dims(att_mask,-1).astype(np.float) * fimg out_img = out_img.astype(np.uint8) cv2.imwrite(f_path.replace('Out','Att'), out_img ) predict = np.argmax(predict) total = total + 1 if predict == label: correct = correct + 1 label_correct.extend([1]) else: label_correct.extend([0]) print(label_correct) if restart: break print('accurate: %f'%(correct/total))
def test(data_list = ['ID001_T001', 'ID001_T002', 'ID001_T003','ID001_T004','ID001_T009', 'ID001_T010'],\ data_dir = '..\..\Out',\ im_shape = [299,299,3],\ time_step = 31): train_data = data_factory('TRI', data_dir=data_dir , data_list= data_list, quiet=True) with tf.Session() as sess: img_str, frame = data_process(im_shape) encoder= encoder_factory('Inception_v3') decoder = decoder_factory('LSTM') _input, _feature, _train = encoder.last_feature([None, im_shape[0],im_shape[1],im_shape[2]]) _decoder_input = tf.placeholder(dtype = tf.float32, name = 'decoder_input', shape=[None, time_step, 2048]) _truth, _loss, _train_op, _pred = decoder( _decoder_input, n_hidden = 100, n_class=5) sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.restore(sess, '..\..\Backup\M802.ckpt-24') total_correct = 0.0 total_num = 0.0 while True: evt_data, restart, evt_index = train_data.next(shuffle=False,unit='evt') batch_features = [] batch_label = [] event_np = os.path.join(data_dir, '%d.npy'%evt_index) # print(evt_index) if (evt_index in [71,96]): continue if os.path.isfile(event_np): batch_data = np.load(event_np).item() batch_features = batch_data['data'] batch_label = batch_data['label'] else: print(evt_data[0]['mat_path']) for seq_data in tqdm(evt_data): label = seq_data['label'] batch_label.extend([label]) seq_features = [] for fimg_path in seq_data['fimg']: fimg = open(fimg_path,'rb').read() fimg = sess.run(frame, feed_dict={img_str: fimg}) feature = sess.run(_feature, feed_dict={_input: [fimg], _train: False}) seq_features.append(np.squeeze(feature)) batch_features.append(np.stack(seq_features)) np.save(event_np,{'data':batch_features, 'label':batch_label}) pred = sess.run(( _pred), feed_dict={_decoder_input: batch_features}) correct_num = len([x for x, y in zip(pred, batch_label) if x==y]) total_correct = total_correct + correct_num total_num = total_num + len(batch_label) if restart: break print('accurate : %f'%( total_correct/total_num))
def train(): data_list = ['ID001_T001', 'ID001_T002', ]# , data_dir = '..\..\Out' im_shape = [480,640,3] train_data = data_factory('TRI', data_dir=data_dir , data_list= data_list, quiet=True) model_factory = Model_Zoo() with tf.Session() as sess: img_str, frame = data_process(im_shape) encoder, decoder = model_factory('Inception_v3') T =31 input_layer, feature_layer, is_train = encoder.back_bone([1, im_shape[0],im_shape[1],im_shape[2]]) [_,w,h,d] = feature_layer.shape feature_input_layer = tf.placeholder(tf.float32,shape = [T,w,h,d], name='CNN_Features') pred_layer ,gtruth_layer ,attention_layer = decoder.build( feature_input_layer, n_hidenn = 100, n_class = 5 ) loss, train_op = Train_Op().build('Ssoftmax','AdamOptimizer',1e-3,pred_layer,gtruth_layer) sess.run(tf.global_variables_initializer()) encoder.load_model(sess) saver = tf.train.Saver() for epoch in range(1): saver.save(sess, '..\..\Backup\M730.ckpt',global_step = epoch) mean_loss = 0.0 total_case = 0.0 while(True): seq_data, restart = train_data.next(shuffle=False) seq_data = seq_data[0] label = seq_data['label'] seq_fimg = [] for fimg, dimg in zip(seq_data['fimg'], seq_data['dimg']): [fimg,dimg] = list(map(lambda f: open(f,'rb').read(), [fimg,dimg])) fimg = sess.run(frame, feed_dict={img_str: fimg}) # dimg = sess.run(frame, feed_dict={img_str: dimg}) fimg = sess.run(feature_layer, feed_dict={input_layer: [fimg], is_train: False}) seq_fimg.extend(fimg) seq_fimg = np.stack(seq_fimg) _loss, _ = sess.run(( loss, train_op), feed_dict={feature_input_layer: seq_fimg, gtruth_layer: [label]}) mean_loss = mean_loss+ _loss total_case =total_case + 1 if restart: break print('mean loss : %f'%(mean_loss/total_case))
def load_data(data_dir, data_list): speedup_dir = os.path.join(data_dir, 'speedup') train_data = data_factory('TRI', data_dir=data_dir, data_list=data_list, quiet=True) batch_features = [] batch_label = [] while True: evt_data, restart, evt_index = train_data.next(shuffle=True, unit='evt') event_np = os.path.join(speedup_dir, '%d_%s_vec.npy' % (evt_index, 'Inception_v3')) if os.path.isfile(event_np): batch_data = np.load(event_np).item() batch_features.extend(batch_data['data']) batch_label.extend(batch_data['label']) if restart: break return batch_features, batch_label
def test(SVM_classer, data_list = ['ID001_T012','ID001_T013','ID001_T014','ID001_T015','ID001_T016','ID001_T017','ID001_T018','ID001_T019'],\ data_dir = '..\..\Out',\ im_shape = [299,299,3],\ time_step = 31): speedup_dir = os.path.join(data_dir,'speedup') train_data = data_factory('TRI', data_dir=data_dir , data_list= data_list, quiet=True) batch_features = [] batch_label = [] while True: evt_data, restart, evt_index = train_data.next(shuffle = True, unit = 'evt') event_np = os.path.join(speedup_dir,'%d_%s_vec.npy'%(evt_index, 'Inception_v3')) if os.path.isfile(event_np): batch_data = np.load(event_np).item() batch_features.extend( batch_data['data']) batch_label.extend(batch_data['label']) if restart: break last_feature = [ frame_feature[-1] for frame_feature in batch_features] y_ = SVM_classer.predict(last_feature) correct_num =len( [x for x, y in zip(y_, batch_label) if x==y]) print('test accuracy: %f'% (correct_num/ len(batch_label))) return SVM_classer
def feature_extraction(data_list, data_dir='..\..\Out', im_shape=[640, 480, 3], sufix='map', subset='train'): sppedup_dir = os.path.join(data_dir, 'speedup2', subset) if os.path.isdir(sppedup_dir) is False: os.makedirs(sppedup_dir) train_data = data_factory('TRI', data_dir=data_dir, data_list=data_list, quiet=False) with tf.Session() as sess: _img_str, _frame = data_process(im_shape) encoder = encoder_factory('Inception_v3') if sufix == 'map': _input, _feature, _train = encoder.last_map( [None, im_shape[0], im_shape[1], im_shape[2]]) else: _input, _feature, _train = encoder.last_feature( [None, im_shape[0], im_shape[1], im_shape[2]]) sess.run(tf.global_variables_initializer()) encoder.load_model(sess) while True: evt_data, restart, evt_index, unique_id = train_data.next( shuffle=False, unit='evt') event_np = os.path.join( sppedup_dir, '%s_%s_%s.npy' % (unique_id, encoder.model_id, sufix)) print('%d: %s' % (evt_index, evt_data[0]['id'])) event_front_features = [] event_driver_features = [] event_signal = [] seq_label = [] early_time = [] early_distance = [] front_img_path = [] driver_img_path = [] event_id = [] evt_label = -1 for seq_data in tqdm(evt_data): seq_f_features = [] seq_d_features = [] # process image for fimg_path, dimg_path in zip(seq_data['fimg'], seq_data['dimg']): img_dir = os.path.dirname(fimg_path) mat_path = os.path.join( img_dir, '%s_%s' % (encoder.model_id, sufix)) if os.path.isdir(mat_path) is False: os.makedirs(mat_path) fmat_path, dmat_path = list( map( lambda x: os.path.join( mat_path, '%s.npy' % os.path.basename(x)[0:-4] ), [fimg_path, dimg_path])) feature = gene_or_load_feature(fimg_path, fmat_path, sess, _img_str, _frame, _input, _feature, _train) seq_f_features.append(feature) feature = gene_or_load_feature(dimg_path, dmat_path, sess, _img_str, _frame, _input, _feature, _train) seq_d_features.append(feature) event_front_features.append(np.stack(seq_f_features)) event_driver_features.append(np.stack(seq_d_features)) # process other information seq_label.append(seq_data['seq_label']) early_time.append(seq_data['early_time']) early_distance.append(seq_data['early_distance']) front_img_path.append(seq_data['fimg']) driver_img_path.append(seq_data['dimg']) event_id.append(seq_data['id']) event_signal.append(seq_data['signal']) if evt_label == -1: evt_label = seq_data['evt_label'] else: assert (evt_label == seq_data['evt_label']) np.save(event_np,{'id':event_id, 'front_feature':event_front_features, 'driver_features':event_driver_features,'signal':event_signal,\ 'seq_label':seq_label, 'early_time':early_time,'early_distance':early_distance,\ 'event_label': evt_label, 'front_img_path': front_img_path, 'driver_img_path': driver_img_path })