def cross_validation(data, K, epoch, class_num, batch_size): category = len(data) print(category) print("=========================") # if shuffle: # for c in range(category): # random.shuffle(data[c]) for i in range(0, K): print("%d fold" % i) train_data_path = [] train_label = [] test_data_path = [] test_label = [] for c in range(category): part_trian_data_path, part_test_data_path = tools.slice_train_test( data[c], i, K) for train_len in range(len(part_trian_data_path)): train_data_path.append(part_trian_data_path[train_len]) train_label.append(c) for test_len in range(len(part_test_data_path)): test_data_path.append(part_test_data_path[test_len]) test_label.append(c) print(len(train_data_path), len(train_label)) print(len(test_data_path), len(test_label)) record = open('records.txt', 'a+') record.write("%d fold\n" % i) record.write(str(train_data_path) + '\n') record.write(str(test_data_path) + '\n') record.close() ''' train_data_path = [] train_label = [] test_data_path = [] test_label = [] ''' train_data = [] test_data = [] for train_path in train_data_path: train_data.append(tools.read_image(train_path, 224, 224, True)) for test_path in test_data_path: test_data.append(tools.read_image(test_path, 224, 224, True)) Network_config(class_num=class_num, epoch=epoch, initial_epoch=0, batch_size=batch_size, train_data=train_data, train_label=train_label, test_data=test_data, test_label=test_label, fold=i) return
def get_face(fname): try: encoding = pickle.loads( piexif.load(fname)["0th"][piexif.ImageIFD.ImageDescription]) print('Use cached: ' + fname) return encoding['encoding'] except Exception: pass try: image = tools.read_image(fname, 1000) except Exception: print(f'image {fname} reading failed') return None boxes = face_recognition.face_locations(image, model='cnn') if len(boxes) != 1: print(f'Image contains {len(boxes)} faces') return None return encoder.encode(image, boxes)[0][0]
def update(patt, db, num_jitters, encoding_model, max_size, out_size): encoder = faceencoder.FaceEncoder(encoding_model=encoding_model, num_jitters=num_jitters, align=True) # TODO: add skip encoding loading or something like it files_faces = list(db.get_all()[1]) encodings, names, filenames = patt.encodings() for patt_fname, enc in zip(filenames, encodings): fname, box = get_from_db(files_faces, db, patt_fname) if fname is None: log.warning(f'Not found in db: {patt_fname}') continue log.debug(f'Found in db file: {fname} {box}') try: image = tools.read_image(fname, max_size) except Exception as ex: log.warning(f'Cant' 't read image: {fname}: ' + str(ex)) continue try: encodings, landmarks = encoder.encode(image, (box, )) if not tools.test_landmarks(landmarks[0]): log.warning(f'bad face detected in {patt_fname}') continue enc = { 'box': box, 'encoding': encodings[0], 'frame': 0, 'landmarks': landmarks[0] } tools.save_face(patt_fname, image, enc, out_size, fname) log.info(f'Updated: {patt_fname}') except Exception as ex: log.exception(f'Failed: {patt_fname}')