def main(): """Main method""" basedir = os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))) INPUTDIR = os.path.join(basedir, 'quantities') MACHINE = os.path.join(basedir, 'mlp/mlp.hdf5') OUTPUTDIR = os.path.join(basedir, 'scores') parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument( 'inputdir', metavar='DIR', type=str, default=INPUTDIR, nargs='?', help= 'Base directory containing the feature vectors to be inspected by this procedure (defaults to "%(default)s")' ) parser.add_argument( 'machine', metavar='FILE', type=str, default=MACHINE, nargs='?', help= 'Name of the machine containing the trained MLP or Linear Machine that will be used to produce the scores (defaults to "%(default)s").' ) parser.add_argument( 'outputdir', metavar='DIR', type=str, default=OUTPUTDIR, nargs='?', help= 'Base directory that will be used to save the results (defaults to "%(default)s").' ) parser.add_argument('-v', '--verbose', action='store_true', dest='verbose', default=False, help='Increases this script verbosity') # Adds database support using the common infrastructure # N.B.: Only databases with 'video' support import antispoofing.utils.db antispoofing.utils.db.Database.create_parser(parser, 'video') args = parser.parse_args() if not os.path.exists(args.inputdir): parser.error("input directory `%s' does not exist" % args.inputdir) if not os.path.exists(args.machine): parser.error("Machine file `%s' does not exist" % args.machine) if not os.path.exists(args.outputdir): if args.verbose: print "Creating output directory `%s'..." % args.outputdir bob.db.utils.makedirs_safe(args.outputdir) # Creates an instance of the database db = args.cls(args) real, attack = db.get_all_data() process = real + attack macfile = bob.io.HDF5File(args.machine) try: machine = bob.machine.MLP(macfile) except: try: machine = bob.machine.LinearMachine(macfile) except: print "Cannot load Linear or MLP machine from file %s" % args.machine raise counter = 0 for obj in process: counter += 1 if args.verbose: filename = obj.make_path(args.inputdir, extension='.hdf5') sys.stdout.write("Processing file %s [%d/%d] " % (filename, counter, len(process))) input = obj.load(args.inputdir, '.hdf5') valid_index = ~numpy.isnan(input.sum(axis=1)) valid_data = input[valid_index, :] valid_output = machine(valid_data) output = numpy.ndarray((len(input), 1), dtype='float64') output[valid_index] = valid_output output[~valid_index] = numpy.NaN obj.save(output, args.outputdir, '.hdf5') if args.verbose: sys.stdout.write('Saving results to "%s"...\n' % args.outputdir) sys.stdout.flush() if args.verbose: print "All done, bye!"
def main(): import bob import numpy basedir = os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))) INPUTDIR = os.path.join(basedir, 'framediff') OUTPUTDIR = os.path.join(basedir, 'clustered') parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument( 'inputdir', metavar='DIR', type=str, default=INPUTDIR, nargs='?', help= 'Base directory containing the frame differences to be treated by this procedure (defaults to "%(default)s")' ) parser.add_argument( 'outputdir', metavar='DIR', type=str, default=OUTPUTDIR, nargs='?', help= 'Base output directory for every file created by this procedure (defaults to "%(default)s")' ) parser.add_argument( '-n', '--window-size', dest="window_size", default=20, type=int, help= "determines the window size to be used when clustering frame-difference observations (defaults to %(default)s)" ), parser.add_argument( '-o', '--overlap', dest="overlap", default=0, type=int, help= "determines the window overlapping; this number has to be between 0 (no overlapping) and 'window-size'-1 (defaults to %(default)s)" ), # The next option just returns the total number of cases we will be running # It can be used to set jman --array option. To avoid user confusion, this # option is suppressed # from the --help menu parser.add_argument('--grid-count', dest='grid_count', action='store_true', default=False, help=argparse.SUPPRESS) # Adds database support using the common infrastructure # N.B.: Only databases with 'video' support import antispoofing.utils.db antispoofing.utils.db.Database.create_parser(parser, 'video') args = parser.parse_args() # checks window size and overlap if args.window_size <= 0: parser.error("window-size has to be greater than 0") if args.overlap >= args.window_size or args.overlap < 0: parser.error( "overlap has to be smaller than window-size and greater or equal zero" ) from .. import cluster_5quantities # Creates an instance of the database db = args.cls(args) real, attack = db.get_all_data() process = real + attack if args.grid_count: print len(process) sys.exit(0) # if we are on a grid environment, just find what I have to process. if os.environ.has_key('SGE_TASK_ID'): pos = int(os.environ['SGE_TASK_ID']) - 1 if pos >= len(process): raise RuntimeError, "Grid request for job %d on a setup with %d jobs" % \ (pos, len(process)) process = [process[pos]] sys.stdout.write('Processing %d file(s)\n' % len(process)) sys.stdout.flush() counter = 0 for obj in process: counter += 1 sys.stdout.write( "Processing file %s [%d/%d] " % (obj.make_path(args.inputdir, '.hdf5'), counter, len(process))) input = obj.load(args.inputdir, '.hdf5') d_face = cluster_5quantities(input[:, 0], args.window_size, args.overlap) d_bg = cluster_5quantities(input[:, 1], args.window_size, args.overlap) sys.stdout.write('Saving results to "%s"...\n' % args.outputdir) obj.save(numpy.hstack((d_face, d_bg)), args.outputdir, '.hdf5') sys.stdout.flush() sys.stdout.write('\n') sys.stdout.flush() return 0
def main(): import bob import numpy basedir = os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))) INPUTDIR = os.path.join(basedir, 'database') OUTPUTDIR = os.path.join(basedir, 'framediff') parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-m', '--mininum-face-size', dest='min_face_size', metavar='PIXELS', type=int, default=0, help='Minimum face size to be considered when pre-processing and extrapolating detections (defaults to %(default)s)') parser.add_argument('inputdir', metavar='DIR', type=str, default=INPUTDIR, nargs='?', help='Base directory containing the videos to be treated by this procedure (defaults to "%(default)s")') parser.add_argument('outputdir', metavar='DIR', type=str, default=OUTPUTDIR, nargs='?', help='Base output directory for every file created by this procedure defaults to "%(default)s")') # The next option just returns the total number of cases we will be running # It can be used to set jman --array option. To avoid user confusion, this # option is suppressed # from the --help menu parser.add_argument('--grid-count', dest='grid_count', action='store_true', default=False, help=argparse.SUPPRESS) # Adds database support using the common infrastructure # N.B.: Only databases with 'video' support import antispoofing.utils.db antispoofing.utils.db.Database.create_parser(parser, 'video') args = parser.parse_args() # checks face sizes if args.min_face_size < 0: parser.error('Face size cannot be less than zero. Give a value in pixels') from antispoofing.utils.faceloc import preprocess_detections from .. import eval_face_differences, eval_background_differences # Creates an instance of the database db = args.cls(args) real, attack = db.get_all_data() process = real + attack if args.grid_count: print len(process) sys.exit(0) # if we are on a grid environment, just find what I have to process. if os.environ.has_key('SGE_TASK_ID'): pos = int(os.environ['SGE_TASK_ID']) - 1 if pos >= len(process): raise RuntimeError, "Grid request for job %d on a setup with %d jobs" % \ (pos, len(process)) process = [process[pos]] counter = 0 for obj in process: counter += 1 filename = obj.videofile(args.inputdir) input = bob.io.VideoReader(filename) facefile = obj.facefile(args.inputdir) locations = preprocess_detections(facefile, len(input), facesize_filter=args.min_face_size) sys.stdout.write("Processing file %s (%d frames) [%d/%d]..." % (filename, input.number_of_frames, counter, len(process))) # start the work here... vin = input.load() # load all in one shot. prev = bob.ip.rgb_to_gray(vin[0,:,:,:]) curr = numpy.empty_like(prev) data = numpy.zeros((len(vin), 2), dtype='float64') data[0] = (numpy.NaN, numpy.NaN) for k in range(1, vin.shape[0]): bob.ip.rgb_to_gray(vin[k,:,:,:], curr) if locations[k] and locations[k].is_valid(): sys.stdout.write('.') data[k][0] = eval_face_differences(prev, curr, locations[k]) data[k][1] = eval_background_differences(prev, curr, locations[k], None) else: sys.stdout.write('x') data[k] = (numpy.NaN, numpy.NaN) sys.stdout.flush() # swap buffers: curr <=> prev tmp = prev prev = curr curr = tmp obj.save(data, args.outputdir, '.hdf5') sys.stdout.write('\n') sys.stdout.flush() return 0
def main(): import bob import numpy basedir = os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))) INPUTDIR = os.path.join(basedir, 'database') OUTPUTDIR = os.path.join(basedir, 'framediff') parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument( '-m', '--mininum-face-size', dest='min_face_size', metavar='PIXELS', type=int, default=0, help= 'Minimum face size to be considered when pre-processing and extrapolating detections (defaults to %(default)s)' ) parser.add_argument( 'inputdir', metavar='DIR', type=str, default=INPUTDIR, nargs='?', help= 'Base directory containing the videos to be treated by this procedure (defaults to "%(default)s")' ) parser.add_argument( 'outputdir', metavar='DIR', type=str, default=OUTPUTDIR, nargs='?', help= 'Base output directory for every file created by this procedure defaults to "%(default)s")' ) # The next option just returns the total number of cases we will be running # It can be used to set jman --array option. To avoid user confusion, this # option is suppressed # from the --help menu parser.add_argument('--grid-count', dest='grid_count', action='store_true', default=False, help=argparse.SUPPRESS) # Adds database support using the common infrastructure # N.B.: Only databases with 'video' support import antispoofing.utils.db antispoofing.utils.db.Database.create_parser(parser, 'video') args = parser.parse_args() # checks face sizes if args.min_face_size < 0: parser.error( 'Face size cannot be less than zero. Give a value in pixels') from antispoofing.utils.faceloc import preprocess_detections from .. import eval_face_differences, eval_background_differences # Creates an instance of the database db = args.cls(args) real, attack = db.get_all_data() process = real + attack if args.grid_count: print len(process) sys.exit(0) # if we are on a grid environment, just find what I have to process. if os.environ.has_key('SGE_TASK_ID'): pos = int(os.environ['SGE_TASK_ID']) - 1 if pos >= len(process): raise RuntimeError, "Grid request for job %d on a setup with %d jobs" % \ (pos, len(process)) process = [process[pos]] counter = 0 for obj in process: counter += 1 filename = obj.videofile(args.inputdir) input = bob.io.VideoReader(filename) facefile = obj.facefile(args.inputdir) locations = preprocess_detections(facefile, len(input), facesize_filter=args.min_face_size) sys.stdout.write( "Processing file %s (%d frames) [%d/%d]..." % (filename, input.number_of_frames, counter, len(process))) # start the work here... vin = input.load() # load all in one shot. prev = bob.ip.rgb_to_gray(vin[0, :, :, :]) curr = numpy.empty_like(prev) data = numpy.zeros((len(vin), 2), dtype='float64') data[0] = (numpy.NaN, numpy.NaN) for k in range(1, vin.shape[0]): bob.ip.rgb_to_gray(vin[k, :, :, :], curr) if locations[k] and locations[k].is_valid(): sys.stdout.write('.') data[k][0] = eval_face_differences(compute_lbp(prev), compute_lbp(curr), locations[k]) data[k][1] = eval_background_differences( compute_lbp(prev), compute_lbp(curr), locations[k], None) else: sys.stdout.write('x') data[k] = (numpy.NaN, numpy.NaN) sys.stdout.flush() # swap buffers: curr <=> prev tmp = prev prev = curr curr = tmp obj.save(data, args.outputdir, '.hdf5') sys.stdout.write('\n') sys.stdout.flush() return 0
def main(): import bob import numpy basedir = os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))) INPUTDIR = os.path.join(basedir, 'framediff') OUTPUTDIR = os.path.join(basedir, 'clustered') parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('inputdir', metavar='DIR', type=str, default=INPUTDIR, nargs='?', help='Base directory containing the frame differences to be treated by this procedure (defaults to "%(default)s")') parser.add_argument('outputdir', metavar='DIR', type=str, default=OUTPUTDIR, nargs='?', help='Base output directory for every file created by this procedure (defaults to "%(default)s")') parser.add_argument('-n', '--window-size', dest="window_size", default=20, type=int, help="determines the window size to be used when clustering frame-difference observations (defaults to %(default)s)"), parser.add_argument('-o', '--overlap', dest="overlap", default=0, type=int, help="determines the window overlapping; this number has to be between 0 (no overlapping) and 'window-size'-1 (defaults to %(default)s)"), # The next option just returns the total number of cases we will be running # It can be used to set jman --array option. To avoid user confusion, this # option is suppressed # from the --help menu parser.add_argument('--grid-count', dest='grid_count', action='store_true', default=False, help=argparse.SUPPRESS) # Adds database support using the common infrastructure # N.B.: Only databases with 'video' support import antispoofing.utils.db antispoofing.utils.db.Database.create_parser(parser, 'video') args = parser.parse_args() # checks window size and overlap if args.window_size <= 0: parser.error("window-size has to be greater than 0") if args.overlap >= args.window_size or args.overlap < 0: parser.error("overlap has to be smaller than window-size and greater or equal zero") from .. import cluster_5quantities # Creates an instance of the database db = args.cls(args) real, attack = db.get_all_data() process = real + attack if args.grid_count: print len(process) sys.exit(0) # if we are on a grid environment, just find what I have to process. if os.environ.has_key('SGE_TASK_ID'): pos = int(os.environ['SGE_TASK_ID']) - 1 if pos >= len(process): raise RuntimeError, "Grid request for job %d on a setup with %d jobs" % \ (pos, len(process)) process = [process[pos]] sys.stdout.write('Processing %d file(s)\n' % len(process)) sys.stdout.flush() counter = 0 for obj in process: counter += 1 sys.stdout.write("Processing file %s [%d/%d] " % (obj.make_path(args.inputdir, '.hdf5'), counter, len(process))) input = obj.load(args.inputdir, '.hdf5') d_face = cluster_5quantities(input[:,0], args.window_size, args.overlap) d_bg = cluster_5quantities(input[:,1], args.window_size, args.overlap) sys.stdout.write('Saving results to "%s"...\n' % args.outputdir) obj.save(numpy.hstack((d_face, d_bg)), args.outputdir, '.hdf5') sys.stdout.flush() sys.stdout.write('\n') sys.stdout.flush() return 0