def _load_data_set(self): clk = Clock() print('start loading mosh data.') anno_file_path = os.path.join(self.data_folder, 'mosh_annot.h5') with h5py.File(anno_file_path) as fp: self.shapes = np.array(fp['shape']) self.poses = np.array(fp['pose']) print('finished load mosh data, total {} samples'.format( len(self.poses))) clk.stop()
def _load_data_set(self): clk = Clock() self.images = [] self.kp2ds = [] self.boxs = [] self.kp3ds = [] self.shapes = [] self.poses = [] print('start loading hum3.6m data.') anno_file_path = os.path.join(self.data_folder, 'annot.h5') with h5py.File(anno_file_path) as fp: total_kp2d = np.array(fp['gt2d']) total_kp3d = np.array(fp['gt3d']) total_shap = np.array(fp['shape']) total_pose = np.array(fp['pose']) total_image_names = np.array(fp['imagename']) assert len(total_kp2d) == len(total_kp3d) and len(total_kp2d) == len(total_image_names) and \ len(total_kp2d) == len(total_shap) and len(total_kp2d) == len(total_pose) l = len(total_kp2d) def _collect_valid_pts(pts): r = [] for pt in pts: if pt[2] != 0: r.append(pt) return r for index in range(l): kp2d = total_kp2d[index].reshape((-1, 3)) if np.sum(kp2d[:, 2]) < self.min_pts_required: continue lt, rb, v = calc_aabb(_collect_valid_pts(kp2d)) self.kp2ds.append(np.array(kp2d.copy(), dtype=np.float)) self.boxs.append((lt, rb)) self.kp3ds.append(total_kp3d[index].copy().reshape(-1, 3)) self.shapes.append(total_shap[index].copy()) self.poses.append(total_pose[index].copy()) self.images.append( os.path.join(self.data_folder, 'image') + total_image_names[index].decode()) print('finished load hum3.6m data, total {} samples'.format( len(self.kp3ds))) clk.stop()
def __init__(self): self.running = True self.paused = False self.is_typing = False self.display = Display(32, 32) self.palette = Palette() self.clock = Clock() self.clock.set_fps(10) self.sprites = [] self.action_limit = 10 self.action_queue = [] self.debug_text = "" self.title = "Press 'Esc' to quit."
def _load_data_set(self): clk = Clock() print('loading LSP ext data.') self.images = [] self.kp2ds = [] self.boxs = [] anno_file_path = os.path.join(self.data_folder, 'joints.mat') anno = scio.loadmat(anno_file_path) kp2d = anno['joints'].transpose(2, 0, 1) # N x k x 3 image_folder = os.path.join(self.data_folder, 'images') images = sorted(glob.glob(image_folder + '/im*.jpg')) for _ in range(len(images)): self._handle_image(images[_], kp2d[_]) print('finished load LSP ext data.') clk.stop()
def _load_data_set(self): self.images = [] self.kp2ds = [] self.boxs = [] clk = Clock() print('start loading coco 2017 dataset.') #anno_file_path = os.path.join(self.data_folder, 'annotations', 'person_keypoints_train2017.json') anno_file_path = os.path.join(self.data_folder, 'annotations', 'person_keypoints_val2017.json') with open(anno_file_path, 'r') as reader: anno = json.load(reader) def _hash_image_id_(image_id_to_info, coco_images_info): for image_info in coco_images_info: image_id = image_info['id'] image_name = image_info['file_name'] _anno = {} #_anno['image_path'] = os.path.join(self.data_folder, 'images', 'train-valid2017', image_name) #_anno['image_path'] = os.path.join(self.data_folder, 'images', 'train2017', image_name) _anno['image_path'] = os.path.join(self.data_folder, 'images', 'val2017', image_name) _anno['kps'] = [] _anno['box'] = [] assert not (image_id in image_id_to_info) image_id_to_info[image_id] = _anno images = anno['images'] image_id_to_info = {} _hash_image_id_(image_id_to_info, images) annos = anno['annotations'] for anno_info in annos: self._handle_anno_info(anno_info, image_id_to_info) for k, v in image_id_to_info.items(): self._handle_image_info_(v) print('finished load coco 2017 dataset, total {} samples.'.format( len(self.images))) clk.stop()
def _load_data_set(self): clk = Clock() print('loading LSP data.') self.images = [] self.kp2ds = [] self.boxs = [] anno_file_path = os.path.join(self.data_folder, 'joints.mat') anno = scio.loadmat(anno_file_path) kp2d = anno['joints'].transpose(2, 1, 0) # N x k x 3 visible = np.logical_not(kp2d[:, :, 2]) kp2d[:, :, 2] = visible.astype(kp2d.dtype) image_folder = os.path.join(self.data_folder, 'images') images = sorted(glob.glob(image_folder + '/im*.jpg')) for _ in range(len(images)): self._handle_image(images[_], kp2d[_]) print('finished load LSP data.') clk.stop()
def __init__(self, config): self.clock = Clock() self.config = config self.data_centers = {} self.ready = False self.nearest_dc = None # TODO: убрать self.pts = 1 # self.config.get('pts', 0) # persistent time stamp self.qts = 0 # self.config.get('qts', 0) self.date = int(mktime(localtime())) # self.config.get('data', int(mktime(localtime()))) try: with open(self.config["public_key"], 'rb') as f: self.public_key = f.read() except: logging.exception('Server public key is missing!') return dc_id = list(self.config.setdefault('data_centers', {1: {'host': '149.154.175.10', 'port': 443}}).keys())[0] self.getDataCenter(dc_id) # подключаемся
def _load_data_set(self): clk = Clock() self.images = [] self.kp2ds = [] self.boxs = [] print('start loading AI CH keypoint data.') anno_file_path = os.path.join(self.data_folder, 'keypoint_train_annotations_20170902.json') with open(anno_file_path, 'r') as reader: anno = json.load(reader) for record in anno: image_name = record['image_id'] + self.img_ext image_path = os.path.join(self.data_folder, 'keypoint_train_images_20170902', image_name) kp_set = record['keypoint_annotations'] box_set = record['human_annotations'] self._handle_image(image_path, kp_set, box_set) print('finished load Ai CH keypoint data, total {} samples'.format(len(self))) clk.stop()
def perform_action(self): K = Keys() # Add Keypress to Action Queue self.event() actn = self.action_queue[0] ### Add Key Events Here ### if actn == "i": self.display.get_console_info() if actn == "r": self.clock = Clock() if actn == "x": if self.paused == False: self.paused = True elif self.paused == True: self.paused = False if actn == K.ESC: self.running = False # Clears Action Queue if len(self.action_queue) > self.action_limit: self.action_queue.pop(0)
class Telegram: def __init__(self, config): self.clock = Clock() self.config = config self.data_centers = {} self.ready = False self.nearest_dc = None # TODO: убрать self.pts = 1 # self.config.get('pts', 0) # persistent time stamp self.qts = 0 # self.config.get('qts', 0) self.date = int(mktime(localtime())) # self.config.get('data', int(mktime(localtime()))) try: with open(self.config["public_key"], 'rb') as f: self.public_key = f.read() except: logging.exception('Server public key is missing!') return dc_id = list(self.config.setdefault('data_centers', {1: {'host': '149.154.175.10', 'port': 443}}).keys())[0] self.getDataCenter(dc_id) # подключаемся def Run(self): while self.Step(): pass def Step(self): try: self.clock.Process() data_centers, _, _ = select(self.data_centers.values(), (), (), self.clock.GetTimeout()) for data_center in data_centers: data_center.process() return True except ConnectionError: # TODO: reconnect return False except: logging.exception("The game is up!") return False # TODO: может что-нить поумнее сделать? def Call(self, request): self.nearest_dc.Call(request, self.rpc_callback) def dc_ready(self, dc): if not self.ready: dc.Call(help_getConfig.Create(), self.rpc_callback) def getDataCenter(self, dc_id): if dc_id not in self.data_centers: if dc_id not in self.config['data_centers']: logging.error('Data center #{} is not found'.format(dc_id)) return data_center = DataCenter(dc_id, self.config['data_centers'][dc_id], self.config['api_id'], self.config.get('lang_code', 'en'), self.public_key, self.clock) for method in ('process_updatesTooLong', 'process_updateShortMessage', 'process_updateShortChatMessage', 'process_updateShort', 'process_updatesCombined', 'process_updates'): setattr(data_center, method, getattr(self, method)) self.data_centers[dc_id] = data_center data_center.Connect(self.dc_ready, self.config.get('test', False)) return self.data_centers[dc_id] def rpc_callback(self, dc, request, result): if result.Name() == 'rpc_error': return getattr(self, 'rpc_' + request.Name() + "_error_" + str(result.error_code), getattr(self, 'rpc_error_' + str(result.error_code), self.rpc_unknown_error))(dc, request, result) return getattr(self, 'rpc_' + request.Name() + '_result_' + result.Name(), self.rpc_unknown)(dc, request, result) def rpc_error_303(self, dc, request, result): match = re.match(r'(\w+?(\d+))(?:: (.+))?', result.error_message) if not match: logging.error('Unable to parse 303 error: {}'.format(result.error_message)) return dc_id = int(match.group(2)) return self.getDataCenter(dc_id).Call(request, self.rpc_callback) def rpc_error_401(self, dc, request, result): # UNAUTHORIZED # TODO: проверить, что авторизация ещё не начата dc.authorised = False dc.Call(request, self.rpc_callback) # TODO: перепостить запрос if self.nearest_dc is not None and dc.id != self.nearest_dc.id: return self.nearest_dc.Call(auth_exportAuthorization.Create(dc.id), self.rpc_callback) if 'api_id' not in self.config or 'api_hash' not in self.config: logging.error("Get api_id and api_hash from https://my.telegram.org/apps") return if 'profile' not in self.config or 'phone_number' not in self.config['profile']: logging.error('The config does not contain phone number') return return dc.Call(auth_sendCode.Create(self.config['profile']['phone_number'], 0, int(self.config['api_id']), self.config['api_hash'], self.config.get('lang_code', 'en')), self.rpc_callback) def rpc_unknown_error(self, dc, request, result): logging.error('Unhandled rpc error for "{}": {} {}'.format(request.Name(), result.error_code, result.error_message)) return def rpc_unknown(self, dc, request, result): logging.error("There is no handler for request \"{}\" with result \"{}\"".format(request.Name(), result.Name())) def rpc_help_getConfig_result_config(self, dc, request, result): for dc_option in result.dc_options: if dc_option.id not in self.config['data_centers']: host = dc_option.hostname if dc_option.hostname else dc_option.ip_address self.config['data_centers'][dc_option.id] = {'host': host, 'port': dc_option.port} dc.Call(help_getNearestDc.Create(), self.rpc_callback) def rpc_help_getNearestDc_result_nearestDc(self, dc, request, result): self.nearest_dc = self.getDataCenter(result.nearest_dc) self.ready = True self.Call(account_updateStatus.Create(False)) # проверяем авторизацию def rpc_auth_sendCode_result_auth_sentCode(self, dc, request, result): if result.phone_registered: Input('Enter confirmation code: ', self.clock, lambda phone_code: dc.Call(auth_signIn.Create(self.config['profile']['phone_number'], result.phone_code_hash, phone_code), self.rpc_callback)) else: Input('Enter confirmation code: ', self.clock, lambda phone_code: dc.Call(auth_signUp.Create(self.config['profile']['phone_number'], result.phone_code_hash, phone_code, self.config['profile'].get('first_name', 'John'), self.config['profile'].get('last_name', 'Doe')), self.rpc_callback)) def rpc_auth_exportAuthorization_result_auth_exportedAuthorization(self, dc, request, result): self.data_centers[request.dc_id].Call(auth_importAuthorization.Create(result.id, result.bytes), self.rpc_callback) def rpc_auth_signIn_result_auth_authorization(self, dc, request, result): dc.authorised = True def rpc_auth_signUp_result_auth_authorization(self, dc, request, result): dc.authorised = True def rpc_auth_importAuthorization_result_auth_authorization(self, dc, request, result): dc.authorised = True def rpc_account_updateStatus_result_boolFalse(self, dc, request, result): self.Call(updates_getDifference.Create(self.pts, self.date, self.qts)) def rpc_updates_getDifference_result_updates_differenceEmpty(self, dc, request, result): pass def rpc_updates_getDifference_result_updates_difference(self, dc, request, result): pass def rpc_updates_getDifference_result_updates_differenceSlice(self, dc, request, result): pass def process_updatesTooLong(self, dc, request, result): self.Call(updates_getDifference.Create(self.pts, self.date, self.qts)) def process_updateShortMessage(self, dc, request, result): pass def process_updateShortChatMessage(self, dc, request, result): pass def process_updateShort(self, dc, request, result): pass def process_updatesCombined(self, dc, request, result): pass def process_updates(self, dc, request, result): pass
class Engine: def __init__(self): self.running = True self.paused = False self.is_typing = False self.display = Display(32, 32) self.palette = Palette() self.clock = Clock() self.clock.set_fps(10) self.sprites = [] self.action_limit = 10 self.action_queue = [] self.debug_text = "" self.title = "Press 'Esc' to quit." def perform_action(self): K = Keys() # Add Keypress to Action Queue self.event() actn = self.action_queue[0] ### Add Key Events Here ### if actn == "i": self.display.get_console_info() if actn == "r": self.clock = Clock() if actn == "x": if self.paused == False: self.paused = True elif self.paused == True: self.paused = False if actn == K.ESC: self.running = False # Clears Action Queue if len(self.action_queue) > self.action_limit: self.action_queue.pop(0) def event(self): # Get Key Press if self.is_typing == False: sel = Event.keypress() if self.is_typing == True: self.is_typing = False sel = input(": ") self.action_queue.append(sel) def draw_sprites(self): if len(self.sprites) > 0: for s in self.sprites: self.display.draw_sprite(s) def start(self): self.run() def update(self): self.debug_text = 'FPS({:.1f}) Elapsed({:.10f}) Actions({})'.format( #''Title({})Display({}, {}) FPS({:.1f}) Elapsed({:.1f}) Actions({})'.format( #self.title, #self.display.width, #self.display.height, self.clock.fps, self.clock.elapsed, self.action_queue) self.clock.tick() #self.display.refresh() #size = self.display.get_console_size() #self.display.set_console_size(size.x, size.y) # Draw Here #self.draw_sprites # Game Frame #self.display.render() def run(self): while self.running: if self.paused == False: self.update() print(self.debug_text) self.perform_action()
from multiprocessing import Process, Array from time import sleep from timer import Clock if __name__ == '__main__': timer = Clock() time = Array("i", 2) clocker = Process(target=timer.increment, args=(time,)) clocker.start() sleep(4) print(str(time[0]).zfill(2)+":"+str(time[1]).zfill(2)) clocker.join()
tgt_dis_labels = torch.ones(config.batch_size, dtype=torch.long) print('Initializing network ...') if config.is_cuda: net = Net(config).cuda() else: net = Net(config) criterion_label = torch.nn.CrossEntropyLoss() criterion_domain = torch.nn.CrossEntropyLoss() # optimizer_F = torch.optim.SGD(net.feature_extractor.parameters(), lr=0.001, momentum=0.9) # optimizer_D = torch.optim.SGD(net.adversarial_classifier.parameters(), lr=0.001, momentum=0.9) optimizer_F = torch.optim.Adam(net.feature_extractor.parameters()) optimizer_D = torch.optim.Adam(net.adversarial_classifier.parameters()) print('Starting training ...') clock_epoch = Clock(config.epochs) for epoch in range(config.epochs): print(' Epoch {}/{}'.format(epoch + 1, config.epochs)) step = 0 clock_batch = Clock(len(src_dataloader)) for src_data, tgt_data in zip(src_dataloader, cycle(tgt_dataloader)): print(' Batch {}/{}'.format(step + 1, len(src_dataloader))) src_inputs, src_labels = src_data tgt_inputs, tgt_labels = tgt_data # src_inputs = torch.autograd.Variable(src_inputs) # tgt_inputs = torch.autograd.Variable(tgt_inputs) if config.is_cuda: src_inputs, src_labels = src_inputs.cuda(), src_labels.cuda() tgt_inputs, tgt_labels = tgt_inputs.cuda(), tgt_labels.cuda() else: src_inputs, src_labels = src_inputs, src_labels
import importlib import re from control import Control def main(*args): """Launch simulation from provided package name.""" name = "barabasi" animate = False for arg in args[1:]: if arg == "-a": animate = True else: name = re.sub("[^a-zA-Z_]", "", arg) if importlib.util.find_spec(name) is None: raise ModuleNotFoundError(f"Package '{name}' wasn't found.") package = importlib.import_module(name) Control(package.Model, package.View).run(animate) if __name__ == "__main__": import sys from timer import Clock main(*sys.argv) for name, (n, time) in Clock.report().items(): print(f"{name} (x{n}): {time:.3f} s")