def process(lst): file_list = os.listdir(detail_path) test_time = time.time() startCount = 0 for file in file_list: startCount += 1 if startCount < args.start or file == '_frame_num.txt' or file not in lst: continue file_to_read_fid = open(detail_path + file, 'r') fid = file_to_read_fid.read() fid = [int(i) for i in fid.split(' ')] file_to_read_fid.close() file_to_write_x = open(x_path + file, 'w') file_to_write_y = open(y_path + file, 'w') video_to_capture = file.split('.')[0] + '.avi' vc = cv2.VideoCapture(video_path + video_to_capture) # 读入视频文件 if not vc.isOpened(): print('Open failure! exit') exit(0) rval = True count = 0 index = 0 x_matrix = np.zeros((len(fid), joints_keys)) y_matrix = np.zeros((len(fid), joints_keys)) while rval and index < len(fid): # 循环读取视频帧 rval, frame = vc.read() if count == fid[index]: image = preprocess(frame, args.input_width, args.input_height) joints = run(image) for key in sorted(joints.keys()): value = joints[key] x_matrix[index][key] = value[0] y_matrix[index][key] = value[1] index += 1 count += 1 cv2.waitKey(1) vc.release() file_to_write_x.write(','.join( str(i) for i in x_matrix.reshape(len(fid) * joints_keys).tolist())) file_to_write_y.write(','.join( str(i) for i in y_matrix.reshape(len(fid) * joints_keys).tolist())) if (startCount % 100 == 0): print(startCount, ": ", +time.time() - test_time) file_to_write_x.close() file_to_write_y.close() print('total time: ', time.time() - test_time)
def get_prediction(img): preprocessed = preprocess(img, input_width, input_height) pafMat, heatMat = sess.run([ net.get_output(name=last_layer.format(stage=stage_level, aux=1)), net.get_output(name=last_layer.format(stage=stage_level, aux=2)) ], feed_dict={'image:0': [preprocessed]}) heatMat, pafMat = heatMat[0], pafMat[0] humans = estimate_pose(heatMat, pafMat) return humans
def tf(text): article = preprocess(text) frequency, terms = calculateTermFrequency(article) frequencySum = np.sum(frequency, axis=0) # Calculate sentence weights weightArray = [] for i in range(0, len(article)): s = article[i] weight = calculateSentenceWeight(s, frequencySum, terms) weightArray.append((i, weight)) weightArray.sort(key=lambda a: a[1]) weightArray.reverse() return weightArray
def getHumans(self, img): preprocessed = preprocess(img, self.imgW, self.imgH) run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() pafMat, heatMat = self.sess.run([ self.net.get_output( name=self.last_layer.format(stage=self.stage_level, aux=1)), self.net.get_output( name=self.last_layer.format(stage=self.stage_level, aux=2)) ], feed_dict={'image:0': [img]}, options=run_options, run_metadata=run_metadata) heatMat, pafMat = heatMat[0], pafMat[0] humans = estimate_pose(heatMat, pafMat) return humans
def get_eval_fn(model): """Return an evaluation function for server-side evaluation.""" # Load test data here to avoid the overhead of doing it in `evaluate` itself _, test = tf.keras.datasets.mnist.load_data() test_data, test_labels = test # preprocessing test_data, test_labels = common.preprocess(test_data, test_labels) # The `evaluate` function will be called after every round def evaluate(weights: fl.common.Weights): model.set_weights(weights) # Update model with the latest parameters loss, accuracy = model.evaluate(test_data, test_labels) return loss, {"accuracy": accuracy} return evaluate
def main(): data = np.loadtxt(args.input, delimiter=',') X = common.preprocess(data[:, 1:-1]) y = data[:, -1].astype(np.uint8) model = ng3.Ng3Model() model.fit(X, y, monotonicity=[ng3.Ng3Model.NON_INCREASING, ng3.Ng3Model.NON_INCREASING]) df = model.predict(X) for i, model_part in enumerate(model._models): plt.figure() plt.plot(model_part._xs, model_part._ys) plt.title('Feature %d' % i) plt.savefig('feature_%d.png' % i) xx, yy = np.meshgrid( np.linspace(min(X[:, 0]), max(X[:, 0]), 1000), np.linspace(min(X[:, 1]), max(X[:, 1]), 1000)) Z = model.predict(np.hstack([xx.ravel().reshape(-1, 1), yy.ravel().reshape(-1, 1)])) Z = Z.reshape(xx.shape) plt.figure() plt.contourf(xx, yy, Z) plt.title('Decision surface') plt.savefig('decision.png') plt.scatter(X[y==0, 0], X[y==0, 1], c='b', marker='+') plt.scatter(X[y==1, 0], X[y==1, 1], c='r', marker='+') plt.savefig('decision_with_points.png') thresh = np.percentile(df[y == 1], 100 * (1 - args.recall)) p, r = common.pr(df > thresh, y) print('Threshold = %f' % thresh) print(' p = %f' % p) print(' r = %f' % r) with open(args.output, 'wb') as output_file: pickle.dump({ 'model': model, 'thresh': thresh, }, output_file)
def load_data_to_db(dir_prefix, db): tweet_files = glob.glob(os.path.join(dir_prefix, '*.*')) with open('../../anonymized_user_info_by_chunk_training.csv', 'r') as f: reader = csv.reader(f) next(reader, None) users = { row[0]: { 'age': row[1], 'num_tweets': row[2], 'gender': row[3], 'condition': row[4] } for row in reader } # user_records = [(un, float(users[un]['age']), int(users[un]['num_tweets']), users[un]['gender'], LABEL_IDS[users[un]['condition']]) for un in users.keys()] # # c = db.cursor() # c.executemany('INSERT INTO users VALUES (?,?,?,?,?)', user_records) # db.commit() tweet_records = [] for file in tweet_files: username = os.path.splitext(os.path.basename(file))[0] print(username) label = LABEL_IDS[users[username]['condition']] tweet_file = open(file, 'r') i = 0 for line in tweet_file: tweet = json.loads(line) i += 1 if valid_tweet(tweet): t = preprocess(tweet['text']) tweet_records.append((username, t)) tweet_file.close()
interpolation=cv2.INTER_LINEAR) dx = (canvas.shape[1] - img_scaled.shape[1]) // 2 dy = (canvas.shape[0] - img_scaled.shape[0]) // 2 canvas[dy:dy + img_scaled.shape[0], dx:dx + img_scaled.shape[1]] = img_scaled img = canvas elif args.zoom > 1.0: img_scaled = cv2.resize(img, None, fx=args.zoom, fy=args.zoom, interpolation=cv2.INTER_LINEAR) dx = (img_scaled.shape[1] - img.shape[1]) // 2 dy = (img_scaled.shape[0] - img.shape[0]) // 2 img = img_scaled[dy:img.shape[0], dx:img.shape[1]] preprocessed = preprocess(img, args.input_width, args.input_height) logging.debug('video process+') pafMat, heatMat = sess.run([ net.get_output( name=last_layer.format(stage=args.stage_level, aux=1)), net.get_output( name=last_layer.format(stage=args.stage_level, aux=2)) ], feed_dict={'image:0': [preprocessed]}) heatMat, pafMat = heatMat[0], pafMat[0] logging.debug('video postprocess+') t = time.time() humans = estimate_pose(heatMat, pafMat)
images.extend(filenames) break if not path.exists(new_images_folder): makedirs(new_images_folder) counter = 0 img_data_arr = [] for filename in images: rotation = generateRandomRotation() img = Image.open(original_images_folder + '/' + filename) #if (len(common.face_detect(img)) == 0): img = img.rotate(rotation) rotations_output_file.write(str(rotation) + '\n') hog_1, color_histogram, img_face_rotation_data, guessed_rotation_by_faces = common.preprocess( img) img_data = np.concatenate( (hog_1, hog_2, color_histogram, img_face_rotation_data, [guessed_rotation_by_faces])).ravel() img_data_arr.append(img_data) img.save(new_images_folder + "/" + filename) counter += 1 print filename print img_face_rotation_data print guessed_rotation_by_faces print >> sys.stderr, str(counter) #else: # print filename + " has a face" #if counter > 500: # break
file_to_write = open(save_joints_path + save_file_name, 'w') vc = cv2.VideoCapture(source_folder + file) # 读入视频文件 if not vc.isOpened(): print('Open failure! exit') exit(0) total = vc.get(cv2.CAP_PROP_FRAME_COUNT) record_count = [0] * 19 record_sort = [] body = "" rval = True while rval: # 循环读取视频帧 rval, frame = vc.read() joints = [] if frame is not None: image = preprocess(frame, args.input_width, args.input_height) joints = run(image, show=False, trg_len=15) # print(len(joints), joints) for key in sorted(joints.keys()): value = joints[key] body += (str(key) + ':') body += (str(value[0]) + ',' + str(value[1]) + ' ') body += '\n' record_count[len(joints)] += 1 record_sort.append(len(joints)) cv2.waitKey(1) vc.release() title = ' '.join(str(i) for i in record_count) + '\n' title += (' '.join(str(i) for i in record_sort) + '\n') file_to_write.write(title) file_to_write.write(body)
heatmaps_tensor = tf.get_default_graph().get_tensor_by_name('Mconv7_stage6_L2/BiasAdd:0') pafs_tensor = tf.get_default_graph().get_tensor_by_name('Mconv7_stage6_L1/BiasAdd:0') t3 = time.time() print("Time {} for tensor creation".format(t3 - t2)) with tf.Session() as sess: while cam.isOpened(): success, image = cam.read() t4 = time.time() print("Time {} for reading cam".format(t4 - t3)) if not success: continue image = preprocess(image, 656, 368) t5 = time.time() print("Time {} for preprocessed image".format(t5 - t4)) # The session runner is really slow heatMat, pafMat = sess.run([heatmaps_tensor, pafs_tensor], feed_dict={ inputs: image }) t6 = time.time() print("Time {} for session runner".format(t6 - t5)) heatMat, pafMat = heatMat[0], pafMat[0] humans = estimate_pose(heatMat, pafMat)
images.extend(filenames) break if not path.exists(new_images_folder): makedirs(new_images_folder) counter = 0 img_data_arr = [] for filename in images: rotation = generateRandomRotation() img = Image.open(original_images_folder + '/' + filename) #if (len(common.face_detect(img)) == 0): img = img.rotate(rotation) rotations_output_file.write(str(rotation) + '\n') hog_1, color_histogram, img_face_rotation_data, guessed_rotation_by_faces = common.preprocess(img) img_data = np.concatenate((hog_1, hog_2, color_histogram, img_face_rotation_data, [guessed_rotation_by_faces])).ravel() img_data_arr.append(img_data) img.save(new_images_folder + "/" + filename) counter += 1 print filename print img_face_rotation_data print guessed_rotation_by_faces print >> sys.stderr, str(counter) #else: # print filename + " has a face" #if counter > 500: # break
def evaluate(X, model, min_support=DEFAULT_MIN_SUPPORT): y_hat = model['model'].predict(common.preprocess(X)) > model['thresh'] return common.convolve_left(y_hat, np.ones(10).astype(np.uint8)) > min_support