def createJHMDB2(db_settings, logger): frame_format = db_settings['frame_format'] action_name = db_settings['action_name'] video_name = db_settings['video_name'] annotation_path = db_settings['annotation_path'] segmented_path = db_settings['segmented_path'] orig_path = db_settings['orig_path'] level = db_settings['level'] frame = db_settings['frame'] pickle_path = db_settings['pickle_path'] neighbor_num = db_settings[ 'number_of_neighbors'] #TODO add this to db_settings in experimentSetup database_path = db_settings['database_path'] database_list_path = db_settings['database_list_path'] features_path = db_settings['features_path'] feature_type = db_settings['feature_type'] #TODO: maybe we should save them segarately #TODO: write a merge segment function? segmentors = {} logger.log('*** Segment parsing ***') for action in action_name: segmentors[action] = {} for video in video_name[action]: logger.log('Processing action:`{action}`, video:`{video}`:'.format( action=action, video=video)) try: annotator = JA( annotation_path.format(action_name=action, video_name=video)) segmentor = MySegmentation( orig_path.format( action_name=action, video_name=video, level=level) + frame_format, segmented_path.format( action_name=action, video_name=video, level=level) + frame_format, features_path.format(action_name=action, video_name=video, level=level), annotator) segmentor.setFeatureType(feature_type) for i in xrange(frame): logger.log('frame {0}'.format(i + 1)) segmentor.processNewFrame() segmentor.doneProcessing() logger.log("Total number of supervoxels: {0}".format( len(segmentor.supervoxels))) segmentors[action][video] = segmentor except Exception as e: logger.log( '============================\n ERROR: video: "{0}" has problems...: {1}\n===========================' .format(video, str(e))) logger.log('*** Pickling ***') s = time.time() for action in action_name: for video in video_name[action]: logger.log( 'Piclking action:`{action}`, video:`{video}` ...'.format( action=action, video=video)) pickle.dump( segmentors[action][video], open( pickle_path.format(action_name=action, video_name=video, level=level), 'w')) logger.log('Elapsed time: {0}'.format(time.time() - s)) s = time.time() logger.log('*** Collecting features / Creating databases ***') keys = ['target', 'negative' ] + ['neighbor{0}'.format(i) for i in range(neighbor_num)] feats = [] # feats = [features] #logger.log('video 1 done!') #with open(database_list_path, 'w') as db_list: for action in action_name: for video in video_name[action]: db_path = database_path.format(action_name=action, video_name=video, level=level) database = DB(db_path) features = segmentors[action][video].getFeatures( neighbor_num, feature_type=feature_type) for name, data in features.iteritems(): database.save(data, name) database.close() # db_list.write(db_path); write_db_list(db_settings, logger) logger.log('done!')
def create_dbs(): configs = getConfigs() frame_format = configs.frame_format seg_path = configs.seg_path orig_path = configs.orig_path first_output = configs.first_output output_path = configs.output_path dataset_path = configs.dataset_path annotation_path = configs.annotation_path action feature_name = '256bin' level = 2 segmentors = [] vid_num = 2 frames_per_video = 31 if 1 == 1: for dd in range(vid_num): d = dd + 1 print 'b{0}'.format(d) annotator = JA(annotation_path.format(name='b' + str(d))) segmentor = MySegmentation( orig_path.format(d) + frame_format, seg_path.format(d, level) + frame_format, annotator) for i in range(1, frames_per_video): print "processing frame {i}".format(i=i) segmentor.processNewFrame() segmentor.doneProcessing() segmentors.append(segmentor) print "Total number of supervoxels: {0}".format( len(segmentor.supervoxels)) print try: mkdirs(dataset_path) except: pass print 'Piclking ...' t = time.time() for i in range(vid_num): pickle.dump( segmentors[i], open(dataset_path.format(name='segment_{0}.p'.format(i + 1)), 'w')) print '{0}-th done. time elapsed: {1}'.format( i + 1, time.time() - t) t = time.time() #TODO create database else: for i in range(vid_num): segmentors.append( pickle.load( open( dataset_path.format(name='segment_{0}.p'.format(i + 1)), 'r'))) database = DB(dataset_path.format(name='videos{v}_feature{f}_lvl{l}.h5'.format(\ v='_'.join(map(str,range(1,vid_num))), f=feature_name, l=level))) print 'Collecting features ...' neighbor_num = 6 keys = ['target', 'negative' ] + ['neighbor{0}'.format(i) for i in range(neighbor_num)] features = segmentors[0].getFeatures(neighbor_num) print 'shape features', features['target'].shape feats = [features] print 'video 1 done!' for i in range(1, len(segmentors)): tmp = segmentors[i].getFeatures(neighbor_num) #feats.append(tmp) for key in keys: features[key] = np.append(features[key], tmp[key], axis=0) print 'video {0} done!'.format(i + 1) #print data #database_path = ' print 'saving to database ...' for name, data in features.iteritems(): database.save(data, name) #database.save(dataset) database.close() print 'done!'
def createJHMDB(db_settings, logger): frame_format = db_settings['frame_format'] action_name = db_settings['action_name'] video_name = db_settings['video_name'] annotation_path = db_settings['annotation_path'] segmented_path = db_settings['segmented_path'] orig_path = db_settings['orig_path'] level = db_settings['level'] frame = db_settings['frame'] n_neg = db_settings['number_of_negatives'] pickle_path = db_settings['pickle_path'] neighbor_num = db_settings[ 'number_of_neighbors'] #TODO add this to db_settings in experimentSetup database_path = db_settings['database_path'] database_list_path = db_settings['database_list_path'] features_path = db_settings['features_path'] feature_type = db_settings['feature_type'] labelledlevelvideo_path = db_settings['voxellabelledlevelvideo_path'] optical_flow_path = db_settings['optical_flow_path'] fcn_path = db_settings['fcn_path'] #TODO: maybe we should save them segarately #TODO: write a merge segment function? logger.log('*** Segment parsing ***') keys = ['target', 'negative' ] + ['neighbor{0}'.format(i) for i in range(neighbor_num)] # from multiprocessing import Pool # pool = Pool() # parallelProcess = lambda seg: seg[1].processNewFrame(seg[0]) fcn_path = db_settings['fcn_path'] for action in action_name: for video in video_name[action]: logger.log('Processing action:`{action}`, video:`{video}`:'.format( action=action, video=video)) try: annotator = JA( annotation_path.format(action_name=action, video_name=video)) except: annotator = None segmentor = MySegmentation( orig_path.format( action_name=action, video_name=video, level=level) + frame_format, segmented_path.format( action_name=action, video_name=video, level=level) + frame_format, features_path.format(action_name=action, video_name=video, level=level), annotator, None, labelledlevelvideo_path.format(action_name=action, video_name=video, level=level), optical_flow_path.format( action_name=action, video_name=video, level=level) + frame_format, negative_neighbors=n_neg, fcn_path=fcn_path.format( action_name=action, video_name=video, level=level) + frame_format) segmentor.setFeatureType(feature_type) # segmentor_list = [] # for i in xrange(frames_per_vidoe): # segmentor_list.append((i, MySegmentation(orig_path.format(d)+frame_format, seg_path.format(d,level)+frame_format, annotator))) # parallelized_segmentor_list = pool.map(parallelProcess, segmentor_list) for i in xrange(frame): logger.log('frame {0}'.format(i + 1)) segmentor.processNewFrame() segmentor.doneProcessing() logger.log("Total number of supervoxels: {0}".format( len(segmentor.supervoxels))) logger.log('*** Pickling ***') s = time.time() logger.log('Elapsed time: {0}'.format(time.time() - s)) pickle.dump( segmentor, open( pickle_path.format(action_name=action, video_name=video, level=level), 'w')) s = time.time() logger.log( 'Piclking action:`{action}`, video:`{video}` ...'.format( action=action, video=video)) logger.log('*** Collecting features / Creating databases ***') db_path = database_path.format(action_name=action, video_name=video, level=level) database = DB(db_path) features = segmentor.getFeatures(neighbor_num, feature_type=feature_type) if type(feature_type) is list: feat_size = features[-1] features = features[0] for _id, feature_type_i in enumerate(feature_type): idx1 = sum(feat_size[:_id]) idx2 = sum(feat_size[:(_id + 1)]) for name, data in features.iteritems(): database.save(data[..., idx1:idx2], feature_type_i.name + '_' + name) else: for name, data in features.iteritems(): database.save(data, name) database.close() logger.log("Segment {0} Done!\n".format(action)) write_db_list(db_settings, logger) logger.log('done!')
def main(): frame_format = '{0:05d}.ppm' seg_path = '/cs/vml3/mkhodaba/cvpr16/dataset/b{0}/seg/{1:02d}/' #+ frame_format orig_path = '/cs/vml3/mkhodaba/cvpr16/dataset/b{0}/' #+ frame_format first_output = '/cs/vml3/mkhodaba/cvpr16/dataset/b{0}/mymethod/{1:02d}/first/' #.format(level) output_path = '/cs/vml3/mkhodaba/cvpr16/dataset/b{0}/mymethod/{1:02d}/output/' #.format(level) dataset_path = '/cs/vml3/mkhodaba/cvpr16/code/embedding_segmentation/dataset/{name}' annotation_path = '/cs/vml3/mkhodaba/cvpr16/dataset/{name}_mask/mask.csv' # Preparing data for #segmentor = Segmentation(orig_path, seg_path+frame_format) level = 1 segmentors = [] vid_num = 4 frames_per_video = 31 for d in range(1, vid_num): print 'b{0}'.format(d) annotator = JA(annotation_path.format(name='b' + str(d))) segmentor = MySegmentation( orig_path.format(d) + frame_format, seg_path.format(d, level) + frame_format, annotator) for i in range(1, frames_per_video): print "processing frame {i}".format(i=i) segmentor.processNewFrame() segmentor.doneProcessing() segmentors.append(segmentor) print "Total number of supervoxels: {0}".format( len(segmentor.supervoxels)) print #sv = segmentor.getSupervoxelAt(27, 127, 20) #print sv #supervoxels = segmentor.getKNearestSupervoxelsOf(sv, 6) #supervoxels.remove(sv) #for s in supervoxels: # print s #TODO check if features are correct ##for sv in segmentor.supervoxels_list: ##print sv.getFeature() ##print "ID: {0}".format(sv.ID) #R_hist = [0 for i in xrange(13)] #G_hist = [0 for i in xrange(13)] #B_hist = [0 for i in xrange(13)] #R_hist[int(sv.ID[0]/20)] += 1 #G_hist[int(sv.ID[1]/20)] += 1 #B_hist[int(sv.ID[2]/20)] += 1 #print R_hist+G_hist+B_hist #print sum(sv.getFeature())/3 #print "Num pixels: {0}".format(sv.number_of_pixels) pickle.dump(segmentors[0], open(dataset_path.format(name='segment_1.p'), 'w')) pickle.dump(segmentors[1], open(dataset_path.format(name='segment_2.p'), 'w')) pickle.dump(segmentors[2], open(dataset_path.format(name='segment_3.p'), 'w')) ''' #TODO create database mkdirs(dataset_path) database = DB(dataset_path.format(name='b1b2_train_16bins_lvl{0}.h5'.format(level))) print 'Collecting features ...' neighbor_num = 6 keys = ['target', 'negative'] + [ 'neighbor{0}'.format(i) for i in range(neighbor_num)] features = segmentors[0].getFeatures(neighbor_num) print 'shape features', features['target'].shape feats = [features] print 'video 1 done!' for i in range(1, len(segmentors)-1): tmp = segmentors[i].getFeatures(neighbor_num) feats.append(tmp) for key in keys: features[key] = np.append(features[key], tmp[key], axis=0) print 'video {0} done!'.format(i+1) #print data #database_path = ' print 'saving to database ...' for name, data in features.iteritems(): database.save(data, name) #database.save(dataset) database.close() database = DB(dataset_path.format(name='b3_test_16bins_lvl{0}.h5'.format(level))) print 'Collecting features ...' neighbor_num = 6 features = segmentors[-1].getFeatures(neighbor_num) print 'shape features', features['target'].shape feats = [features] print 'video 3 done!' #print data #database_path = ' print 'saving to database ...' for name, data in features.iteritems(): database.save(data, name) #database.save(dataset) database.close() ''' print 'done!'
def createJHMDBParallel(db_settings, logger): frame_format = db_settings['frame_format'] action_name = db_settings['action_name'] video_name = db_settings['video_name'] annotation_path = db_settings['annotation_path'] segmented_path = db_settings['segmented_path'] orig_path = db_settings['orig_path'] level = int(db_settings['level']) frame = db_settings['frame'] n_neg = db_settings['number_of_negatives'] pickle_path = db_settings['pickle_path'] neighbor_num = db_settings['number_of_neighbors'] #TODO add this to db_settings in experimentSetup database_path = db_settings['database_path'] database_list_path = db_settings['database_list_path'] features_path = db_settings['features_path'] feature_type = db_settings['feature_type'] if type(feature_type) is str: feature_type = getattr(FeatureType, feature_type) print feature_type if (type(feature_type) is list) and type(feature_type[0]) is str: feature_type = map(lambda x: getattr(FeatureType, x), feature_type) labelledlevelvideo_path = db_settings['voxellabelledlevelvideo_path'] optical_flow_path = db_settings['optical_flow_path'] output_path = db_settings['output_path'] print 'output_path is:',output_path compute_segment = db_settings['compute_segment'] #TODO: maybe we should save them segarately #TODO: write a merge segment function? logger.log('*** Segment parsing ***') keys = ['target', 'negative'] + [ 'neighbor{0}'.format(i) for i in range(neighbor_num)] fcn_path = db_settings['fcn_path'] for action in action_name: for video in video_name[action]: logger.log('Processing action:`{action}`, video:`{video}`:'.format(action=action, video=video)) try: annotator = JA(annotation_path.format(action_name=action, video_name=video)) except: annotator = None segmentor_list = [] if compute_segment: for i in xrange(frame): print 'segment of frame:', i segmentor = MySegmentation(orig_path.format(action_name=action, video_name=video, level=level)+frame_format, segmented_path.format(action_name=action, video_name=video, level=level)+frame_format, '',# features_path.format(action_name=action, video_name=video, level=level), annotator, None, labelledlevelvideo_path.format(action_name=action, video_name=video, level=level), optical_flow_path.format(action_name=action, video_name=video, level=level)+frame_format, negative_neighbors=n_neg, fcn_path=fcn_path.format(action_name=action, video_name=video, level=level)+frame_format, output_path=output_path) segmentor.setFeatureType(feature_type) segmentor_list.append((i, segmentor)) # segmentor_list.append((i, MySegmentation(orig_path.format(d)+frame_format, seg_path.format(d,level)+frame_format, annotator))) # parallelProcess = lambda pair: pair[1].processNewFrame(pair[0]) #pair = (frame_number, segment) print 'frame number:', frame s = time.time() print 'parallelizing begins', 'Elapsed time:', time.time()-s parallel = True if parallel: from multiprocessing import Pool print 'create pool' #this is the real one pool = Pool(8) print 'defining function' parallelized_segmentor_list = pool.map(parallelProcess, segmentor_list) pool.close() pool.join() else: parallelized_segmentor_list = [] for segment_frame_chunk_small_bad in segmentor_list: parallelized_segmentor_list.append(parallelProcess(segment_frame_chunk_small_bad)) del segmentor_list # for segg in parallelized_segmentor_list: # segg.processNewFramePar(i+1) print 'Parallelizing DONE.', 'Elapsed time: ', time.time()-s print 'Merging begins' s = time.time() segmentor = parallelized_segmentor_list[-1] print len(parallelized_segmentor_list) for i in xrange(len(parallelized_segmentor_list)-1): try: segmentor.merge(parallelized_segmentor_list[i]) except: print 'Bad Segment', i raise parallelized_segmentor_list[i] = 0 # segmentor.current_frame = len(parallelized_segmentor_list) print 'Mernging DONE. Elapsed time: ', time.time()-s # for i in xrange(frame): # logger.log('frame {0}'.format(i+1)) # segmentor.processNewFrame() segmentor.doneProcessing() getFeatFrom = lambda sv, feat_name: getattr(sv, 'get%s' % feat_name)() logger.log("Total number of supervoxels: {0}".format(len(segmentor.supervoxels))) data = {} for feat_type in feature_type: feature_name = feat_type.name feature_len = getFeatFrom(segmentor.supervoxels_list[0], feature_name).shape[1] features = np.zeros((len(segmentor.supervoxels_list), feature_len)) for i,sv in enumerate(segmentor.supervoxels_list): features[i] = getFeatFrom(sv, feature_name) if feature_name == 'FCN': features[i] = _scale(features[i]) data[feature_name] = features # np.savez(features_path.format(action_name=action_name, feature_name=feature_name), **{feature_name:features}) centers = np.zeros((len(segmentor.supervoxels), 3)) colors = np.zeros((len(segmentor.supervoxels), 3), dtype=np.int8) for i, sv in enumerate(segmentor.supervoxels_list): centers[i]= sv.center() colors[i] = sv.ID data['centers'] = centers data['colors'] = colors del segmentor logger.log('Saving data') s = time.time() np.savez(features_path.format(action_name=action, feature_name='features', level=level), **data) # logger.log('*** Pickling ***') # s = time.time() # logger.log('Elapsed time: {0}'.format(time.time()-s)) # pickle.dump(segmentor, open(pickle_path.format(action_name=action, video_name=video, level=level), 'w')) # s = time.time() # logger.log('Piclking action:`{action}`, video:`{video}` ...'.format(action=action, video=video)) # logger.log('*** Collecting features / Creating databases ***') else: # Don't compute segments logger.log('No need to compute segments') logger.log('loading features') data = np.load(features_path.format(action_name=action, feature_name='features', level=level)) # segmentor = pickle.load(open(pickle_path.format(action_name=action, video_name=video, level=level), 'r')) # segmentor.output_path = output_path # segmentor.__class__ = MySegmentation createVSB100Database(data, db_settings, logger) logger.log("Segment {0} Done!\n".format(action)) write_db_list(db_settings, logger) logger.log('done!')
def createJHMDBParallel_old(db_settings, logger): frame_format = db_settings['frame_format'] action_name = db_settings['action_name'] video_name = db_settings['video_name'] annotation_path = db_settings['annotation_path'] segmented_path = db_settings['segmented_path'] orig_path = db_settings['orig_path'] level = int(db_settings['level']) frame = db_settings['frame'] n_neg = db_settings['number_of_negatives'] pickle_path = db_settings['pickle_path'] neighbor_num = db_settings['number_of_neighbors'] #TODO add this to db_settings in experimentSetup database_path = db_settings['database_path'] database_list_path = db_settings['database_list_path'] features_path = db_settings['features_path'] feature_type = db_settings['feature_type'] labelledlevelvideo_path = db_settings['voxellabelledlevelvideo_path'] optical_flow_path = db_settings['optical_flow_path'] output_path = db_settings['output_path'] print 'output_path is:',output_path compute_segment = db_settings['compute_segment'] #TODO: maybe we should save them segarately #TODO: write a merge segment function? logger.log('*** Segment parsing ***') keys = ['target', 'negative'] + [ 'neighbor{0}'.format(i) for i in range(neighbor_num)] fcn_path = db_settings['fcn_path'] for action in action_name: for video in video_name[action]: logger.log('Processing action:`{action}`, video:`{video}`:'.format(action=action, video=video)) try: annotator = JA(annotation_path.format(action_name=action, video_name=video)) except: annotator = None segmentor_list = [] if compute_segment: for i in xrange(frame): print 'segment of frame:', i segmentor = MySegmentation(orig_path.format(action_name=action, video_name=video, level=level)+frame_format, segmented_path.format(action_name=action, video_name=video, level=level)+frame_format, features_path.format(action_name=action, video_name=video, level=level), annotator, None, labelledlevelvideo_path.format(action_name=action, video_name=video, level=level), optical_flow_path.format(action_name=action, video_name=video, level=level)+frame_format, negative_neighbors=n_neg, fcn_path=fcn_path.format(action_name=action, video_name=video, level=level)+frame_format, output_path=output_path) segmentor.setFeatureType(feature_type) segmentor_list.append((i, segmentor)) # segmentor_list.append((i, MySegmentation(orig_path.format(d)+frame_format, seg_path.format(d,level)+frame_format, annotator))) # parallelProcess = lambda pair: pair[1].processNewFrame(pair[0]) #pair = (frame_number, segment) from multiprocessing import Pool print 'create pool' pool = Pool() print 'defining function' # pair[1].processNewFrame(pair[0]) #pair = (frame_number, segment) s = time.time() print 'parallelizing begins', 'Elapsed time:', time.time()-s s = time.time() parallelized_segmentor_list = pool.map(parallelProcess, segmentor_list) del segmentor_list # for segg in parallelized_segmentor_list: # segg.processNewFramePar(i+1) print 'Parallelizing DONE.', 'Elapsed time: ', time.time()-s print 'Merging begins' s = time.time() segmentor = parallelized_segmentor_list[-1] for i in xrange(len(parallelized_segmentor_list)-1): try: segmentor.merge(parallelized_segmentor_list[i]) except: print 'Bad Segment', i raise parallelized_segmentor_list[i] = 0 # segmentor.current_frame = len(parallelized_segmentor_list) print 'Mernging DONE. Elapsed time: ', time.time()-s # for i in xrange(frame): # logger.log('frame {0}'.format(i+1)) # segmentor.processNewFrame() segmentor.doneProcessing() logger.log("Total number of supervoxels: {0}".format(len(segmentor.supervoxels))) logger.log('*** Pickling ***') s = time.time() logger.log('Elapsed time: {0}'.format(time.time()-s)) pickle.dump(segmentor, open(pickle_path.format(action_name=action, video_name=video, level=level), 'w')) s = time.time() logger.log('Piclking action:`{action}`, video:`{video}` ...'.format(action=action, video=video)) logger.log('*** Collecting features / Creating databases ***') else: # Don't compute segments logger.log('No need to compute segments') logger.log('loading segments') segmentor = pickle.load(open(pickle_path.format(action_name=action, video_name=video, level=level), 'r')) segmentor.output_path = output_path segmentor.__class__ = MySegmentation db_path = database_path.format(action_name=action, video_name=video, level=level) database = DB(db_path) features = segmentor.getFeatures(neighbor_num,feature_type=feature_type) n_data = features['target'].shape[0] for name, data in features.iteritems(): database.save(data, name) database.save(np.ones((n_data, db_settings['inner_product_output'])),'data_weights') database.close() logger.log("Segment {0} Done!\n".format(action)) write_db_list(db_settings, logger) logger.log('done!')