def __init__(self, extention, args): output_filename = utils.root_path() + _filename_pattern() + extention self._args = [utils.exe_path('7za'), 'a', output_filename] self._args += args self._args.append('*') self._work_dir = tempfile.mkdtemp(prefix='', dir=utils.temp_path()) try: os.remove(output_filename) except OSError: pass
def __init__(self, params_path, build_model, load_data=load_data, train=True, batch_size=200, learning_rate=1e-2, validate_rate=0.2, epochs=10): self.params_path = utils.root_path(params_path) self.load_data = load_data self.train = train self.batch_size = batch_size self.validate_rate = validate_rate self.epochs = epochs self.model = build_model() self.compile_model(learning_rate) # load train params if exists self.load_weights()
# coding=utf-8 import os import cv2 import utils import imutils import numpy as np from PIL import Image from v1.classifier import FaceClassifier IMAGE_PATH = utils.root_path('doc/images/test2.jpg') PREDICTION_POSTFIX = '_prediction' SAVE_PREDICTION = True PROB_THRESHOLD = 0.5 if __name__ == '__main__': classifier = FaceClassifier() target_W = utils.IM_WIDTH target_H = utils.IM_HEIGHT im = Image.open(IMAGE_PATH) im = np.array(im) faces = utils.detect_faces(im) if len(faces): xs, ls = [], [] for (x, y, w, h) in faces: face = im[y: y + h, x: x + w, :] face = imutils.resize(face, target_W, target_H) if face.shape[0] == target_H and face.shape[1] == target_W: xs.append(face) ls.append((x, y + 5 if y < 20 else y - 5)) prediction = classifier.predict(np.array(xs))
def _configure(): # hard-coded seed helps to have predictable generated names random.seed(31337) # set current directory to project's root directory # in order to make build process portable and self-contained, # and to do not store anything in current or user's home/temp directories os.chdir(utils.root_path()) try: os.mkdir(utils.cache_path()) except OSError: pass rarfile.UNRAR_TOOL = utils.exe_path('unrar') parser = argparse.ArgumentParser() # Generic arguments parser.add_argument('-v', '--verbosity', type=int, choices=range(0, 4), default=0, help='set output verbosity level') parser.add_argument('--disable-optimization', help='disable WAD files optimization', action='store_true') parser.add_argument('-c', '--compression', type=str, choices=['none', 'default', 'pk3', '7zpk3', 'pk7'], help='set output file compression') parser.add_argument('-p', '--profiling', help='enable Python performance profiling', action='store_true') # Operational mode arguments parser.add_argument( '--zdoomed-souls', help='build ZDoomed Souls instead of An Awesome Awesomeness', action='store_true') parser.add_argument( '--check-repo-update', help='look for new assets in web repository instead of ' ' building a package', action='store_true') parser.add_argument( '--clean-asset-cache', help='delete cached assets instead of building a package', action='store_true') parser.add_argument('-d', '--dry-run', help='do all build steps but do not write a package', action='store_true') parser.add_argument( '-s', '--single', type=int, default=0, help='build only one repository asset specified by its index') # Patching-related arguments parser.add_argument('--allow-set-pitch', help='allow A_SetPitch() calls in DECORATE', action='store_true') parser.add_argument('--allow-class-replacement', help='allow class replacement in DECORATE', action='store_true') parser.add_argument( '--allow-doomednum', help='allow editor number (DoomEdNum) assignment in DECORATE', action='store_true') parser.add_argument('--png-sprites', help='convert all sprites to PNG format', action='store_true') parser.add_argument('--png-sprites-compression', type=int, choices=[value for value in range(-1, 10)], default=-1, help='set compression level for sprites in PNG format') args = parser.parse_args() patching.verbosity_level = args.verbosity patching.allow_set_pitch = args.allow_set_pitch patching.allow_class_replacement = args.allow_class_replacement patching.allow_doomednum = args.allow_doomednum patching.enable_optimization = not args.disable_optimization patching.png_sprites = args.png_sprites patching.png_sprites_compression = args.png_sprites_compression if args.zdoomed_souls: utils.set_mode(utils.MODE_ZDS) return args
# all modules are in the lib directory sys.path[0] = os.path.dirname(os.path.abspath(__file__)) + '/../lib' import doomwad from case_insensitive import CaseInsensitiveDict from iwad_lumps import * from iwad_actors import ACTORS_ALL from iwad_sndinfo import LOGICAL_SOUNDS_ALL from iwad_brightmaps import BRIGHTMAPS_ALL from patching import (actor_stateful_pattern, actor_stateless_pattern, actor_header_regex) import utils pk3_filename = utils.root_path() + 'realm667-aaa.pk3' excluded_lump_names = [ # lumps 'ANIMDEFS', 'DECALDEF', 'DECORATE', 'GLDEFS', 'KEYCONF', 'LANGUAGE', 'LOADACS', 'LOCKDEFS', 'SNDINFO', 'TEXTURES', # GLDEFS aliases
train_generator, steps_per_epoch=steps_per_epoch, callbacks=[ModelCheckpoint(self.weights_path)], epochs=self.epoch) # self.model.fit(x, y, batch_size=32, epochs=self.epoch) def evaluate(self, x, y): return self.model.evaluate(x, y, batch_size=32) def predict(self, x, standard=True): if standard: x = x / 255. return self.model.predict(x) PATH_TRAIN = utils.root_path('data/love/roles') PATH_VAL = utils.root_path('data/love/roles') # 是否训练 TRAIN = True # 是否验证 VALIDATE = False if __name__ == '__main__': print('Init model.') classifier = FaceClassifier(lr=1e-2, epoch=50) if TRAIN: print('Train model.') classifier.train(PATH_TRAIN, classes=utils.NAMES) if VALIDATE: names, index2name, name2index = utils.parse_name()
class FaceClassifier: weights_path = utils.root_path('v1/weights/weights.h5') def __init__(self, weight_path=None, lr=1e-2, epoch=30): if weight_path is not None: self.weights_path = weight_path self.epoch = epoch self.model = self.build_model() self.compile_model(lr) def build_model(self): model = Sequential() model.add( Conv2D(8, (3, 3), activation='relu', input_shape=(utils.IM_HEIGHT, utils.IM_WIDTH, 3))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(16, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(8, activation='softmax')) return model def compile_model(self, lr): sgd = SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True) self.model.compile(loss='categorical_crossentropy', optimizer=sgd) if os.path.exists(self.weights_path): self.model.load_weights(self.weights_path) print('Load weights.h5 successfully.') else: print('Model params not found.') def train(self, train_dir, classes=None, batch_size=32): file_num = utils.calculate_file_num(train_dir) steps_per_epoch = file_num // batch_size print('steps number is %d every epoch.' % steps_per_epoch) train_data_gen = ImageDataGenerator(rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) train_generator = train_data_gen.flow_from_directory( train_dir, classes=classes, target_size=(utils.IM_WIDTH, utils.IM_HEIGHT), batch_size=batch_size, class_mode='categorical') utils.ensure_dir(os.path.dirname(self.weights_path)) self.model.fit_generator( train_generator, steps_per_epoch=steps_per_epoch, callbacks=[ModelCheckpoint(self.weights_path)], epochs=self.epoch) # self.model.fit(x, y, batch_size=32, epochs=self.epoch) def evaluate(self, x, y): return self.model.evaluate(x, y, batch_size=32) def predict(self, x, standard=True): if standard: x = x / 255. return self.model.predict(x)
# coding=utf-8 import cv2 import utils import imutils import numpy as np from v1.classifier import FaceClassifier # VIDEO_PATH = u'E:/Youku Files/transcode/爱情公寓 第一季 06_超清.mp4' # SEEK = 2.5985e6 VIDEO_PATH = u'/Users/zijiao/Desktop/love1_3.mp4' PATH_WRITE = utils.root_path('result.mp4') SEEK = 2.6005e6 PROB_THRESHOLD = 0.5 CLASSIFY = True SHOW_VIDEO = True WRITE_VIDEO = False if __name__ == '__main__': # cap = cv2.VideoCapture(0) cap = cv2.VideoCapture(VIDEO_PATH) cap.set(0, SEEK) width = int(cap.get(3)) height = int(cap.get(4)) fps = 24 # 视频帧率 # 指定写视频的格式, I420-avi, MJPG-mp4
def sign_up(total_count, needed_count): dict_path = os.path.join(utils.root_path(), 'english.txt') seed = utils.get_seed(dict_path, 12) keys = shamir.crypto_keys(needed_count, total_count, seed) WalletManager.create_wallet(seed.encode()) return keys
continue index = np.argmax(prediction[0]) subdir = utils.NAMES_EN[index] save_path = os.path.join(CLASSIFY_PATH, subdir) utils.ensure_dir(save_path) final_path = os.path.join(save_path, file_name) cv2.imwrite(final_path, face) # print('File %s saved.' % file_name) return im # 读取视频路径 # PATH_VIDEO = u'E:/Youku Files/transcode/爱情公寓 第一季 10_超清.mp4' PATH_VIDEO = u'/Users/zijiao/Desktop/love1_3.mp4' # 普通模式保存路径 PATH_SAVE = utils.root_path('data/love/images') # 分类模式保存路径 CLASSIFY_PATH = utils.root_path('data/love/predict') # seek位置 SEEK = 2.6005e6 # 识别阈值(0.0 ~ 1.0) FACE_MIN = 0.0 FACE_MAX = 1.0 # 是否保存图片 SAVE_IMAGE = True # 是否自动分类 CLASSIFY = True # 每隔多少帧进行采样 STRIDES = 1 # 是否显示画面 SHOW_CONTENT = False
# Standard Library from sys import stdout import argparse import logging from os.path import abspath, basename, dirname, relpath from typing import List from time import strftime # My Code from markov_chars import generate as genc from markov_chunks import generate as genw from utils import root_path output_file: str = root_path( 'generated', f"{strftime('%H:%M:%S#%y-%m-%d')}.txt", ) parser = argparse.ArgumentParser(description='Generate prose.', prog=basename(dirname(abspath(__file__)))) parser.add_argument('--lookbehind', '-n', metavar='INT', type=int, nargs='?', default=6, choices=tuple(range(1, 20)), required=False, help='max length of lookbehind ngrams') parser.add_argument('--length',
from PyQt5.QtWidgets import QApplication import sys import os import utils from app import Application if __name__ == "__main__": icon_path = os.path.join(utils.root_path(), 'icons', 'appicon.png') app = QApplication(sys.argv) window = Application('DCG', utils.qml_path(), icon_path) window.setSource('main.qml') window.setup_gui_and_connect_base_signals() window.load_state() window.show() app.exec() window.save_state()