def __init__(self): self.dataSetFile = '' self.successLogger = Logger( os.path.join(FACE_DETECTION_ROOT, 'faceDetection.success')) self.errorLogger = Logger( os.path.join(FACE_DETECTION_ROOT, 'faceDetection.error')) self.boundingboxFile = Logger( os.path.join(FACE_DETECTION_ROOT, 'boundingbox.list')) self.cc = cv2.CascadeClassifier( os.path.join(FACE_DETECTION_ROOT, 'haarcascade_frontalface_alt.xml'))
def detectLandmarks(boundingboxList): """ detect landmarks in `src` and store the result in `dst` """ #bboxes = [] #landmarks = [] fl = Landmarker() logger = Logger(os.path.join(FACE_ALIGNMENT_ROOT, 'landmark.list')) # create bbox list fid = open(boundingboxList, 'r'); fLines = fid.read().splitlines() fid.close() for line in fLines: word = line.split() filename = word[0] img = cv2.imread(filename) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) bbox = BBox([int(word[1]), int(word[2]), int(word[3]), int(word[4])])\ .subBBox(0.1, 0.9, 0.2, 1) landmark, status = fl.detectLandmark(gray, bbox) ''' get real landmark position ''' landmark = bbox.reprojectLandmark(landmark) logger.writeMsg("%s" % filename) for x, y in landmark: logger.writeMsg(" %s %s" % (str(x), str(y))) logger.writeMsg('\n') ''' free memory: force the Garbage Collector to release ''' gc.collect()
"transition": namedtuple('transition', ('state', 'action', 'reward', 'next_state', 'done')) }) print(f"Environment: {params['env_name']}\n" f"Number of actions: {params['n_actions']}") if params["do_intro_env"]: intro_env() env = make_atari(params["env_name"], params["seed"]) agent = Agent(**params) experiment = Experiment() logger = Logger(agent, experiment=experiment, **params) if not params["train_from_scratch"]: chekpoint = logger.load_weights() agent.online_model.load_state_dict( chekpoint["online_model_state_dict"]) agent.hard_update_target_network() params.update({"beta": chekpoint["beta"]}) min_episode = chekpoint["episode"] print("Keep training from previous run.") else: min_episode = 0 print("Train from scratch.") if params["do_train"]: