def __init__(self): super().__init__() self._stub = self.connect() self.network = Network(self) self.update_blockheight() self.set_info() self.public_active_channels = self.get_open_channels(public_only=True, active_only=True)
def init(self): self.net = Network('localhost') pygame.init() self.build_board() self.build_main_menu() pygame.display.flip() self.main_menu()
def test_save_and_load_model(self): self.network.define_model() first_prediction = self.network.batch_prediction(np.array([[1,1]])) self.network.save_network('saved_model') self.second_network = Network(2,2) self.second_network.load_network('saved_model') second_prediction = self.second_network.batch_prediction(np.array([[1,1]])) self.assertTrue((first_prediction == second_prediction).all())
def __init__(self, num_layers, heads, head_conv, down_ratio, weight_path): self.device = torch.device( "cuda:0" if torch.cuda.is_available() else "cpu") self.net = Network(num_layers, heads, head_conv, down_ratio).to(self.device) self.net.load_state_dict(torch.load(weight_path)) self.net.eval() self.down_ratio = down_ratio
def __init__(self, frameset_size, num_actions, last_actions_size): super().__init__(num_actions) self.sample_size = 200 self.num_epochs = 1 self.epsilon = 1.0 #exploration rate self.epsilon_decay = 0.996 self.epsilon_min = 0.01 self.gamma = 0.95 #discount rate self.frameset_size = frameset_size self.learn_count = 0 self.last_actions_size = last_actions_size self.num_actions = num_actions self.policy_network = Network(frameset_size, num_actions, last_actions_size=last_actions_size) self.target_network = Network(frameset_size, num_actions, last_actions_size=last_actions_size) self.target_network.weights = self.policy_network.weights self.target_network_update_rate = 5 self.replay_memory = ReplayMemory(capacity=50000)
def __init__(self, GameState): # Initialise pygame pygame.init() pygame.font.init() pygame.mixer.init() pygame.display.set_caption(self.caption) self.screen = pygame.display.set_mode( self.dimensions, pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE) # Delegate self.net = Network() self.GameState = GameState self.state = MenuState(self)
def main(): logging.basicConfig(format=u'%(levelname)-8s [%(asctime)s] %(message)s', level=logging.DEBUG) ######################################################## n = Network() vk = VK(n) print('Token is: ' + vk.auth_direct(Config.USER_LOGIN, Config.USER_PASSWORD, Config.APP_ID, Config.APP_SECRET)) ######################################################## logging.info("--- APP END ---") return
def __init__(self, networkdb: 'NetworkDb', config: Config): self.networkdb: 'NetworkDb' = networkdb self.config: Config = config self.time: int = 0 self.network: Network = Network(networkdb) self.trains: List[Train] = [] self.start_minutes = {} if not self.config.deduce_schedule: self.read_trains() else: self.deduce_trains() self.all_routes = [] self.passengers = None if config.num_passengers_per_route > 0: self.create_passengers() self.graph = None self.positions = None self.xlim = None self.ylim = None self.minute = -1
def __init__(self, window): self.window = window ## objects ## self.netevent = None self.apic = Calls() self.processor = ProcessEvent() self.network = Network(self.window) self.login = LoginDialog(self) self.friend = FriendDialog(self.apic, self.login) self.chat = Chat(self.window, self.network, self.apic, loop, self.login) self.classroom_buttons = {} self.previous = None self.commands = { 'button_login': self.login.show, 'button_new': self.friend.show, 'emoji_dialog': self.chat.dialog_emoji.start, 'chat_send': self.chat.addMsgSelf, 'emojize': self.chat.emojize }
def run(args): x_dim = [76, 91, 75] y_dim = 10 splits = [0.8, 0.1, 0.1] X_data = [] Y_data = [] num_examples = 10 for i in range(1, num_examples + 1): f_x = os.path.join(args.dataset, f'X_Img_Values{i}.npy') f_y = os.path.join(args.dataset, f'YValues{i}.npy') X_data.append(torch.tensor(np.load(f_x))) Y_data.append(torch.tensor(np.load(f_y))) # (batch, channel, width, height) X_data = torch.cat(X_data, dim=0).view([-1, x_dim[2], *x_dim[:2]]) Y_data = torch.cat(Y_data, dim=0).view([-1, y_dim]) assert X_data.shape[0] == Y_data.shape[0] total_size = X_data.shape[0] X_train = X_data[:int(total_size * splits[0]), ...] Y_train = Y_data[:int(total_size * splits[0]), ...] X_dev = X_data[int(total_size * splits[0]):int(total_size * (splits[0] + splits[1])), ...] Y_dev = Y_data[int(total_size * splits[0]):int(total_size * (splits[0] + splits[1])), ...] X_test = X_data[int(total_size * (splits[0] + splits[1])):, ...] Y_test = Y_data[int(total_size * (splits[0] + splits[1])):, ...] train_dataset = Data.TensorDataset(X_train, Y_train) dev_dataset = Data.TensorDataset(X_dev, Y_dev) test_dataset = Data.TensorDataset(X_test, Y_test) train_loader = Data.DataLoader(dataset=train_dataset, batch_size=16, shuffle=True) dev_loader = Data.DataLoader(dataset=dev_dataset, batch_size=16, shuffle=False) test_loader = Data.DataLoader(dataset=test_dataset, batch_size=16, shuffle=False) net = Network(in_channels=x_dim[2], out_size=y_dim) if torch.cuda.is_available(): net = nn.DataParallel(net) net.cuda() opt = torch.optim.Adam(net.parameters(), lr=0.001) loss_func = nn.MSELoss(reduction='mean') for epoch_index in range(20): st = time.time() torch.set_grad_enabled(True) net.train() for train_batch_index, (img_batch, label_batch) in enumerate(train_loader): if torch.cuda.is_available(): img_batch = img_batch.cuda() label_batch = label_batch.cuda() predict = net(img_batch) loss = loss_func(predict.float(), label_batch.float()) net.zero_grad() loss.backward() opt.step() print('(LR:%f) Time of a epoch:%.4fs' % (opt.param_groups[0]['lr'], time.time() - st)) # evaluation torch.set_grad_enabled(False) net.eval() total_loss = [] total_sample = 0 for dev_batch_index, (img_batch, label_batch) in enumerate(dev_loader): if torch.cuda.is_available(): img_batch = img_batch.cuda() label_batch = label_batch.cuda() predict = net(img_batch) loss = loss_func(predict.float(), label_batch.float()) total_loss.append(loss) total_sample += img_batch.size(0) mean_loss = sum(total_loss) / len(total_loss) print('[Test] epoch[%d/%d] loss:%.4f\n' % (epoch_index, 100, mean_loss.item()))
return param_group['lr'] logging.basicConfig(level=logging.INFO, format='(%(asctime)s %(levelname)s) %(message)s', datefmt='%d %b %H:%M:%S', filename='logs/region_layer.log', filemode='w') console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter('(%(levelname)s) %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) net = Network(cfg.class_number) #network parameters # print(net) if torch.cuda.is_available(): net.cuda(cfg.cuda_num) dataset = DataSet(cfg) #train samples train_sample_nb = len(dataset.train_dataset) #batch numbers train_batch_nb = len(dataset.train_loader) test_sample_nb = len(dataset.test_dataset) test_batch_nb = len(dataset.test_loader) logging.info('Train batch[%d] sample[%d]' % (train_batch_nb, train_sample_nb))
train=True, transform=torchvision.transforms.ToTensor(), download=True) test_data = torchvision.datasets.MNIST( root='./mnist/', transform=torchvision.transforms.ToTensor(), train=False) train_loader = Data.DataLoader(dataset=train_data, batch_size=128, shuffle=True) test_loader = Data.DataLoader(dataset=test_data, batch_size=128, shuffle=False) train_batch_num = len(train_loader) test_batch_num = len(test_loader) net = Network(adjustable_mode=True, adj_kernel_size=4, adj_stride=4, adj_avgpool=True) if torch.cuda.is_available(): net = nn.DataParallel(net) net.cuda() opt = torch.optim.Adam(net.parameters(), lr=0.001) loss_func = nn.CrossEntropyLoss() for epoch_index in range(10): st = time.time() torch.set_grad_enabled(True) net.train() for train_batch_index, (img_batch, label_batch) in enumerate(train_loader): if torch.cuda.is_available():
""" # number of training samples num_train_samples = 10000 # number of test samples num_test_samples = 100 # inlet flow velocity u0 = 1 # density rho = 1 # viscosity nu = 0.01 # build a core network model network = Network().build() network.summary() # build a PINN model pinn = PINN(network, rho=rho, nu=nu).build() # create training input xy_eqn = np.random.rand(num_train_samples, 2) xy_ub = np.random.rand(num_train_samples//2, 2) # top-bottom boundaries xy_ub[..., 1] = np.round(xy_ub[..., 1]) # y-position is 0 or 1 xy_lr = np.random.rand(num_train_samples//2, 2) # left-right boundaries xy_lr[..., 0] = np.round(xy_lr[..., 0]) # x-position is 0 or 1 xy_bnd = np.random.permutation(np.concatenate([xy_ub, xy_lr])) x_train = [xy_eqn, xy_bnd] # create training output zeros = np.zeros((num_train_samples, 2))
def main(): memory_bank = Memory(MEMORY_SIZE) pong_game = Game(GAME_LENGTH, GAME_STEP_TIME) champion = Network(3, 7, hidden_layer_size=HIDDEN_LAYER_SIZE, no_hidden_layers=NO_HIDDEN_LAYERS, learning_rate=LEARNING_RATE) competitor = Network(3, 7, hidden_layer_size=HIDDEN_LAYER_SIZE, no_hidden_layers=NO_HIDDEN_LAYERS) trainer = Trainer(pong_game, memory_bank, champion, competitor, MAX_EPSILON, MIN_EPSILON, EPSILON_DECAY, GAMMA, RETURNS_DECAY, WINNERS_GROWTH, batch_size=BATCH_SIZE) champion.save_network(DIRECTORY + '/version_' + str(STARTING_VERSION)) for version in range(STARTING_VERSION, STARTING_VERSION + NUMBER_OF_TRAINING_SESSIONS): start_time = time.time() for _ in range(GAMES_PER_TRAINING_SESSION): print('New game') trainer.run_game() trainer.game = Game(GAME_LENGTH, GAME_STEP_TIME) print("Time taken for training session: ", time.time() - start_time) champion.save_network(DIRECTORY + '/version_' + str(version + 1)) current_epsilon = trainer.epsilon current_returns_parameter = trainer.returns_parameter current_winners_parameter = trainer.winners_parameter trainer = Trainer(Game(GAME_LENGTH, GAME_STEP_TIME), memory_bank, champion, competitor, current_epsilon, MIN_EPSILON, EPSILON_DECAY, GAMMA, RETURNS_DECAY, WINNERS_GROWTH, returns_parameter=current_returns_parameter, winners_parameter=current_winners_parameter, batch_size=BATCH_SIZE) test_score = trainer.test_game() if test_score < 0: print('Competitor wins, score was ' + str(test_score)) competitor.save_network(DIRECTORY + '/competitor_save') champion.load_network(DIRECTORY + '/competitor_save') else: print('Champion continues, score was ' + str(test_score)) new_competitor_version = random.randint(max(0, version - 5), version) print('New competitor version: ' + str(new_competitor_version)) competitor.load_network(DIRECTORY + '/version_' + str(new_competitor_version)) current_epsilon = trainer.epsilon print('epsilon is ' + str(current_epsilon)) current_returns_parameter = trainer.returns_parameter current_winners_parameter = trainer.winners_parameter trainer = Trainer(Game(GAME_LENGTH, GAME_STEP_TIME), memory_bank, champion, competitor, current_epsilon, MIN_EPSILON, EPSILON_DECAY, GAMMA, RETURNS_DECAY, WINNERS_GROWTH, returns_parameter=current_returns_parameter, winners_parameter=current_winners_parameter, batch_size=BATCH_SIZE)
train_data = torchvision.datasets.MNIST(root='./mnist', train=True, transform=torchvision.transforms.ToTensor(), download=True) test_data = torchvision.datasets.MNIST(root='./mnist/', transform=torchvision.transforms.ToTensor(), train=False) train_loader = Data.DataLoader(dataset=train_data, batch_size=128, shuffle=True) test_loader = Data.DataLoader(dataset=test_data, batch_size=128, shuffle=False) train_batch_num = len(train_loader) test_batch_num = len(test_loader) net = Network() if torch.cuda.is_available(): net = nn.DataParallel(net) net.cuda() opt = torch.optim.Adam(net.parameters(), lr=0.001) loss_func = nn.CrossEntropyLoss() for epoch_index in range(20): st = time.time() for train_batch_index, (img_batch, label_batch) in enumerate(train_loader): img_batch = Variable(img_batch) label_batch = Variable(label_batch) if torch.cuda.is_available():
def setUp(self): self.network = Network(2, 2)
from flask import Flask from lib.chain import Chain from lib.network import Network from lib.qbc_utils import QbcUtils from modules.transactions.controllers import transactions_blueprint from modules.mining.controllers import mining_blueprint from modules.network.controllers import network_blueprint from modules.chain.controllers import chain_blueprint node = Flask(__name__) QBC = Chain() QBCN = Network() QBCU = QbcUtils() port = QBCU.get_port() # Registering all the modules node.register_blueprint(transactions_blueprint) node.register_blueprint(mining_blueprint) node.register_blueprint(network_blueprint) node.register_blueprint(chain_blueprint) # Discover full network and register on each of the nodes network = QBCN.discover_network() live_nodes = network["registered_nodes"]
from lib.data import loaddata from lib.simple import SimpleTrain, SimpleShow inputs = [InpNeu(), InpNeu()] layer = [Neu('tanh'), Neu('logistic'), Neu('logistic')] outputs = [OutNeu('logistic')] xor_net = Network( inputs=inputs, outputs=outputs, links=[ Link(inputs[0],layer[0]), #defaul link is : out = 1*inp + 0 Link(inputs[0],layer[1]), Link(inputs[0],layer[2]), # Link(inputs[1],layer[0]), Link(inputs[1],layer[1]), Link(inputs[1],layer[2]), # Link(layer[0],out[0]), Link(layer[1],out[0]), Link(layer[2],out[0]), ] ) xor_data = loaddata('path/to/csv-file') SimpleTrain(xor_net, xor_data, time='20s', target_error=1e4) SimpleShow(xor_net, xor_data) ############### Train Btc ###############
def main(): logging.basicConfig(format=u'%(levelname)-8s [%(asctime)s] %(message)s', level=logging.INFO) ######################################################## FM.do_directory(Config.OUTPUT_DIRECTORY) n = Network() vk = VK(n, Config.USER_TOKEN) current = 0 total = 1 while current < total: data = vk.api_get_user_wall_raw(Config.USER_LINK, current, VK.WALL_MAX_COUNT, Config.USER_HASH) total = vk.json_wall_get_total(data) for record in data[VK.RESPONSE]['items']: logging.info('Progress: ' + str(current) + ' / ' + str(total)) current += 1 date = datetime.fromtimestamp(record['date']).strftime('%Y_%m_%d') FM.do_directory(Config.OUTPUT_DIRECTORY + date) if VK.ATTACHMENTS not in record: logging.info('Skip date: ' + date + ' because post without audio') continue for attach in record['attachments']: if attach['type'] == 'audio': file_name = FM.safe_file_name(attach['audio']['artist'] + ' - ' + attach['audio']['title'] + '.mp3') file_path = Config.OUTPUT_DIRECTORY + date + '/' + file_name file_size = FM.file_size(file_path) if Config.SKIP_EXISTS and file_size > 0: logging.info('Skip file [' + file_name + '] because it exist (size: ' + str(file_size) + ')') continue logging.info('Getting file [' + file_name + ']...') url = attach['audio']['url'] if url == '': if file_size == -1: logging.info('DCMA file. Create empty and skip...') open(file_path, 'w').close() else: logging.info('DCMA file. Skip...') continue time.sleep(1) n.do_get(url, True) if n.last_answer.status_code == 200: with open(file_path, 'wb') as f: for chunk in n.last_answer: f.write(chunk) else: logging.warning('Wrong HTTP answer...') continue logging.info('Downloaded ok...') ######################################################## logging.info("--- APP END ---") return
for hop, decoy_cands in target.decoy_cands.items(): stats[hop] = Counter() for attacker in attackers: cands = [cand for cand in decoy_cands if cand != attacker] for decoy in cands: path = network.paths[attacker][decoy] stats[hop]['passed'] += is_sublist(target.link, path) stats[hop]['total'] += 1 stats[hop]['skipped'] += len(cands) == 0 return stats if __name__ == '__main__': H = range(1, 7) network = Network('./data/dataset_1000a.txt') isolated_nodes = [n for n, d in network.graph.degree() if d == 1] edges = [edge for edge in network.edges if edge[0] not in isolated_nodes and edge[1] not in isolated_nodes] num_targets = math.ceil(len(edges)/2) num_attackers = math.ceil(len(network.nodes)/2) targets = [Target(edge, network.graph, H) for edge in tqdm(random.sample(edges, num_targets))] attackers = random.sample(network.nodes, num_attackers) with Pool() as p: stats = p.map(attack, [(target, attackers) for target in targets]) stats_h = {h: reduce(lambda x, y: Counter(x, **y), [stat[h] for stat in stats])
type=int, default=0, help="sets the reward for negative change in health. Default: 0", ) args = parser.parse_args() device = torch.device("cuda" if args.gpu else "cpu") # Initialise the game instance game = ExtendedGame(args.game_config_file_path) game.set_window_visible(True) game.init() # Setup and load the network net_shape = (args.num_frames_stacked, args.width, args.height) net = Network(net_shape, game.n_actions).to(device) net.load_state_dict(torch.load(args.model)) net.eval() # Initialise the agent agent = Agent( game, net, None, args.width, args.height, args.num_frames_stacked, args.repeat_for_frames, args.health_reward, )
logging.getLogger().setLevel(logging.INFO) logging.info('Mode: %s' % cfg.mode) if not os.path.exists(cfg.model_dir): os.mkdir(cfg.model_dir) if not os.path.exists(cfg.transform_img_dir): os.mkdir(cfg.transform_img_dir) train_loader, test_loader = get_data_loader() train_batch_nb = len(train_loader) test_batch_nb = len(test_loader) print('Train batch_nb:%d' % train_batch_nb) print('Test batch_nb:%d' % test_batch_nb) net = Network(mode=cfg.mode) if torch.cuda.is_available(): net.cuda(cfg.cuda_num) opt = torch.optim.Adam(net.parameters(), lr=cfg.LR) loss_func = nn.CrossEntropyLoss() for epoch_idx in range(cfg.epoch): # ========================== Training Model ============================= net.train() for batch_idx, (train_img, train_target) in enumerate(train_loader): train_img = Variable(train_img) train_target = Variable(train_target) if torch.cuda.is_available(): train_img = train_img.cuda(cfg.cuda_num)
parser.add_argument('mode', type=str, help='', choices=['train', 'convert', 'summary', 'predict']) args = parser.parse_args() BATCH_SIZE = 2048 HIDDEN_SIZE = 512 FEATURE_SIZE = 840 LABEL_SIZE = 137 if args.mode == 'convert': convert() elif args.mode == 'summary': network = Network(BATCH_SIZE, HIDDEN_SIZE, FEATURE_SIZE, LABEL_SIZE) network.model.summary() elif args.mode == 'train': network = Network(BATCH_SIZE, HIDDEN_SIZE, FEATURE_SIZE, LABEL_SIZE) network.model.compile( optimizer='Adam', loss='categorical_crossentropy', metrics=['categorical_accuracy', 'top_k_categorical_accuracy']) generator = Generator(LABEL_SIZE, './data/train_data.npz') val = Generator(LABEL_SIZE, './data/val_data.npz') network.model.fit_generator(generator=generator.generate(BATCH_SIZE), validation_data=val.generate(BATCH_SIZE), validation_steps=1, steps_per_epoch=128, epochs=1024, callbacks=[
for i in range(12): reward += game.make_action_int(4, 4) print("reward {}".format(reward)) input("Press any key to continue") capacity = 10 nsteps = 2 gamma = 0.99 experience_buffer = ExperienceBuffer(capacity, nsteps, gamma) num_frames_stacked = 4 width = 54 height = 54 repeat_for_frames = 4 net_shape = (num_frames_stacked, width, height) train_net = Network(net_shape, game.n_actions).to(device) target_net = Network(net_shape, game.n_actions).to(device) agent = Agent( game, train_net, experience_buffer, width, height, num_frames_stacked, repeat_for_frames, ) print( "Test Agent: state should be shaped num frames {} width {} height {}". format(num_frames_stacked, width, height)) assert agent.state.shape == (