def main() -> None: usage = ''' Usage (from `PyBoardTypeshedGenerator`'s directory) one of: 1. `python3 main.py <destination directory>`. 2. `./main.py <destination directory>` (if `main.py` is executable). ''' assert len(argv) > 1, 'No destination directory given.' + usage assert len(argv) == 2, 'Extra argument(s) given.' + usage shed = RST2PyI(output_dir=argv[1]) uasyncio(shed) math(shed) gc(shed) cmath(shed) uctypes(shed) ucryptolib(shed) ubluetooth(shed) network(shed) micropython(shed) framebuf(shed) btree(shed) machine(shed) lcd160cr(shed) uarray(shed) pyb(shed)
def main(): # Load data files nRows_iris = 150 nColumns_iris = 5 num_epoch_iris = 1 learning_rate_iris = .5 nRows_diabetes = 768 nColumns_diabetes = 9 num_epoch_diabetes = 1 learning_rate_diabetes = .5 iris = lf.load_file("iris.csv", nRows_iris, nColumns_iris) diabetes = lf.load_file("diabetes.data", nRows_diabetes, nColumns_diabetes) # Collect target data before it is normalized iris_targets = [] diabetes_targets = [] for row in range(nRows_iris): iris_targets.append(iris[row][nColumns_iris - 1]) for row in range(nRows_diabetes): diabetes_targets.append(diabetes[row][nColumns_diabetes - 1]) # Normalize data files iris = preprocessing.normalize(iris) diabetes = preprocessing.normalize(diabetes) # Run Iris iris_num_layers_array = [ 1, 3 ] # Length is num_layers, each element is num_nodes for i in range(num_epoch_iris): np.random.shuffle(iris) iris_net = net.network(iris_num_layers_array, nRows_iris, nColumns_iris, iris, "Iris", learning_rate_iris) iris_net.run_network() iris_net.generate_guesses() iris_net.update_weights() iris_net.print_accuracy(iris_targets) if learning_rate_iris > .1: learning_rate_iris -= .001 # Run Diabetes diabetes_num_layers_array = [1, 2] for i in range(num_epoch_diabetes): np.random.shuffle(diabetes) diabetes_net = net.network(diabetes_num_layers_array, nRows_diabetes, nColumns_diabetes, diabetes, "Diabetes", learning_rate_diabetes) diabetes_net.run_network() diabetes_net.generate_guesses() diabetes_net.update_weights() diabetes_net.print_accuracy(diabetes_targets) if learning_rate_diabetes > .1: learning_rate_diabetes -= .001
def main(): # Load data files nRows_iris = 150 nColumns_iris = 5 num_epoch_iris = 1 learning_rate_iris = .5 nRows_diabetes = 768 nColumns_diabetes = 9 num_epoch_diabetes = 1 learning_rate_diabetes = .5 iris = lf.load_file("iris.csv", nRows_iris, nColumns_iris) diabetes = lf.load_file("diabetes.data", nRows_diabetes, nColumns_diabetes) # Collect target data before it is normalized iris_targets = [] diabetes_targets = [] for row in range(nRows_iris): iris_targets.append(iris[row][nColumns_iris - 1]) for row in range(nRows_diabetes): diabetes_targets.append(diabetes[row][nColumns_diabetes - 1]) # Normalize data files iris = preprocessing.normalize(iris) diabetes = preprocessing.normalize(diabetes) # Run Iris iris_num_layers_array = [1, 3] # Length is num_layers, each element is num_nodes for i in range(num_epoch_iris): np.random.shuffle(iris) iris_net = net.network(iris_num_layers_array, nRows_iris, nColumns_iris, iris, "Iris", learning_rate_iris) iris_net.run_network() iris_net.generate_guesses() iris_net.update_weights() iris_net.print_accuracy(iris_targets) if learning_rate_iris > .1: learning_rate_iris -= .001 # Run Diabetes diabetes_num_layers_array = [1, 2] for i in range(num_epoch_diabetes): np.random.shuffle(diabetes) diabetes_net = net.network(diabetes_num_layers_array, nRows_diabetes, nColumns_diabetes, diabetes, "Diabetes", learning_rate_diabetes) diabetes_net.run_network() diabetes_net.generate_guesses() diabetes_net.update_weights() diabetes_net.print_accuracy(diabetes_targets) if learning_rate_diabetes > .1: learning_rate_diabetes -= .001
def main(): graph = tf.Graph() with graph.as_default(): #in_image = tf.compat.v1.placeholder(tf.float32, [None, TEST_CROP_FRAME, None, None, 4], name='input') #gt_image = tf.compat.v1.placeholder(tf.float32, [None, TEST_CROP_FRAME, None, None, 3], name='gt') in_image = tf.compat.v1.placeholder( tf.float32, [None, TEST_CROP_FRAME, 512, 512, 4], name='input') gt_image = tf.compat.v1.placeholder( tf.float32, [None, TEST_CROP_FRAME, 512, 512, 3], name='gt') out_image = network(in_image) saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables()) sess = tf.compat.v1.Session() sess.run(tf.compat.v1.global_variables_initializer()) sess.run(tf.compat.v1.local_variables_initializer()) saver.restore(sess, './1_checkpoint/16_bit_HE_to_HE_gt/model.ckpt') saver.save(sess, './1_checkpoint/16_bit_HE_to_HE_gt/modelfilnal.ckpt') graphdef = tf.compat.v1.graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), ['output']) tf.io.write_graph(graphdef, './1_checkpoint/16_bit_HE_to_HE_gt', 'lsmod.pb', as_text=False)
def main(): #data_input = get_pix_img_in_list("test.png") #data_input = get_pix_img_in_list("a.bmp") #data_input = get_pix_img_in_list("a.png") #test and data_input = [[0, 0], [0, 1], [1, 0], [1, 1]] data_output = [0, 0, 0, 1] net = network.network(2, 0.1)# 0,001 > x > 0,01 i = 0 while i < 2800: #net.train(data_input, ord(data_output)) net.train(data_input[0], data_output[0]) net.train(data_input[1], data_output[1]) net.train(data_input[2], data_output[2]) net.train(data_input[3], data_output[3]) i = i + 1 print("##### test ##### ") net.test(data_input[0]) net.test(data_input[1]) net.test(data_input[2]) net.test(data_input[3])
def _parallel_net_measures(self, idx, Group, Subject, FreqBand): """ Computes Graph measures for one subject over all FrequencyBands """ print(f'Processing {Subject}, {FreqBand} Band') # Init Result Dict ResultDict = {} ResultDict['Subject'] = Subject ResultDict['Group'] = Group ResultDict['Frequency'] = FreqBand # Network Version version = self.net_version # Load FC matrix Data = np.load( self.find(suffix=version, filetype='.npy', Sub=Subject, Freq=FreqBand)) # Remove negative edges from Network and set Diagonal to 0 Data[Data < 0] = 0 np.fill_diagonal(Data, 0) network = net.network(Data, np.arange(Data.shape[-1])) # Calls network methods, appends result to Dict for Measure, FuncName in self.GraphMeasures.items(): ResultDict[Measure] = getattr(network, FuncName)() df = pd.DataFrame(ResultDict, index=[idx]) return df
def infer(args): batch_size = args.batch_size items_num = reader.read_config(args.config_path) test_data = reader.Data(args.test_path, False) place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) loss, acc, py_reader, feed_datas, logits = network.network( items_num, args.hidden_size, args.step, args.batch_size) exe.run(fluid.default_startup_program()) [infer_program, feeded_var_names, target_var] = fluid.io.load_inference_model(dirname=args.model_path, executor=exe) feed_list = [e.name for e in feed_datas] print(feed_list, type(target_var[0]), type(logits)) infer_reader = test_data.reader(batch_size, batch_size * 20, False) feeder = fluid.DataFeeder(place=place, feed_list=feed_list) for iter, data in enumerate(infer_reader()): res = exe.run(infer_program, feed=feeder.feed(data), fetch_list=[logits]) #logits = res #print('session:', data, 'label:',np.argmax(logits)) print("@@@, ", res) print("!!!,", logits) if iter == 0: break
def __init__(self, ip, port, name): self.ip = ip self.port = port self.network = network() self.game = game() self.player = player self.player.name = name
def train(): mnist = MNIST() X_train, X_label = mnist.train_set() Y_test, Y_label = mnist.test_set() train_network = network() train_network.fc_network(X_train, X_label, Y_test, Y_label)
def main(): sess = tf.Session() in_image = tf.placeholder(tf.float32, [None, TEST_CROP_FRAME, None, None, 4]) gt_image = tf.placeholder(tf.float32, [None, TEST_CROP_FRAME, None, None, 3]) out_image = network(in_image) saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(CHECKPOINT_DIR) if ckpt: print('loaded ' + ckpt.model_checkpoint_path) saver.restore(sess, ckpt.model_checkpoint_path) if not os.path.isdir(TEST_RESULT_DIR): os.makedirs(TEST_RESULT_DIR) for i, file0 in enumerate(in_paths): t0 = time.time() # raw = vread(file0) raw = np.load(file0) if raw.shape[0] > MAX_FRAME: print 'Video with shape', raw.shape, 'is too large. Splitted.' count = 0 begin_frame = 0 while begin_frame < raw.shape[0]: t1 = time.time() print 'processing segment %d ...' % (count + 1), new_filename = '.'.join(file0.split('.')[:-1] + [str(count)] + file0.split('.')[-1::]) process_video(sess, in_image, out_image, new_filename, raw[begin_frame: begin_frame + MAX_FRAME, :, :, :]) count += 1 begin_frame += MAX_FRAME print '\t{}s'.format(time.time() - t1) else: process_video(sess, in_image, out_image, file0, raw, out_file=train_ids[i] + '.mp4') print train_ids[i], '\t{}s'.format(time.time() - t0)
def main(): dirp = "../MNIST_handwritten_digits/data/" train_img = dirp + "train_images" train_lab = dirp + "train_labels" x, y = loadlocal_mnist(images_path=train_img, labels_path=train_lab) y = convert_labels(y) arch = [x.shape[1], 300, 10] net = network.network(x, y, arch, normalize='yes') net.optimizer = net.AdamOptimizer net.lossFunction = network.CrossEntropyLoss net.learning_rate = 0.01 test_img = dirp + "test_images" test_lab = dirp + "test_labels" x_t, y_t = loadlocal_mnist(images_path=test_img, labels_path=test_lab) y_t = convert_labels(y_t) net.x_te = x_t net.y_te = y_t a = time() net.train(100) print(time() - a) #net.exportWeights("weights.txt") net.test(x_t, y_t)
def main(): # restore model parameters and move to GPU model = network() # Cal num of paras # default: calculate the FLOPS of the latest output. See network.py order #input = torch.randn(1, 1, height_test, width_test) #flops, params = profile(model, inputs=(input, )) #flops, params = clever_format([flops, params], "%.3f") #print(flops) #print(params) # record time time_start = time.time() # restore model model_path = os.path.join(dir_model, "model_" + args.type_test + ".pt") model.load_state_dict(torch.load(model_path, map_location=dev)) model.to(dev) model.eval() print("\n=== Successfully restore model! ===") fp_ave.write("\n=== Successfully restore model! ===\n") # test test(model) time_all = (time.time() - time_start) / 3600 print("=== Time consuming: %.1f h ===" % time_all) fp_ave.write("=== Time consuming: %.1f h ===\n" % time_all) fp_ave.flush() fp_ave.close()
def main(): n = network() p = n.get_p() run = True clock = pygame.time.Clock() ithread = threading.Thread(target=to_receive_data, args=(n, p, p2)) ithread.daemon = True ithread.start() counter = 0 while run: try: n.send(p) except: pass clock.tick(25) if p.life_status == "alive": p.keymove() p.move() p.snakecollision(p2) if (p.check_collision() == 1): p.gameover() if p.life_status == "dead" and counter == 0 and anotherc == 0: keepthread = False print("passing for the first time") counter += 1 elif counter == 1: print("breaking") message_display("you crashed") run = False redrawWindow(win, p, p2) pygame.quit() sys.exit()
def main(): if (len(sys.argv) == 1): raise NameError('[ERROR] No dataset key') elif (sys.argv[1] == 'lfw'): FLAGS.updates_per_epoch = 380 FLAGS.log_interval = 120 FLAGS.out_dir = '/home/mchong6/data/output_lfw/' FLAGS.pc_dir = 'data/pcomp/lfw/' FLAGS.in_dir = '/home/mchong6/data/lfw_deepfunneled/' FLAGS.sub_dir = True FLAGS.ext = 'jpg' FLAGS.imglist_dir = '/home/mchong6/data/output_lfw/' FLAGS.countbins_fn = None FLAGS.log_dir = '/home/mchong6/data/output_lfw/logs/' data_loader = imglab_loader(FLAGS.in_dir, \ os.path.join(FLAGS.out_dir, 'images_vae'), \ shape=(FLAGS.col_img_height, FLAGS.col_img_width), \ outshape=(FLAGS.grey_img_height, FLAGS.grey_img_width), \ subdir=FLAGS.sub_dir, \ countbins_fn=FLAGS.countbins_fn, \ ext=FLAGS.ext, \ listdir=FLAGS.imglist_dir) #Train colorfield VAE graph_cvae = tf.Graph() with graph_cvae.as_default(): model_cvae = cvae(FLAGS, nch=2) network_cvae = network(model_cvae, data_loader, 2, FLAGS) network_cvae.train_vae(os.path.join(FLAGS.out_dir, 'model_cvae'), \ FLAGS.is_train)
def build_model(inputs, mask, units, depth, n_labels, feat_dim, init_lr, direction, dropout, init_filters, optim, lstm=False, vgg=False): filters=init_filters outputs = Masking(mask_value=0.0)(inputs) if vgg is False: outputs = vgg2l.VGG2L(inputs, init_filters, feat_dim) else: outputs = vgg1l.VGG(inputs, init_filters, feat_dim) outputs = network.network(outputs,units, depth, n_labels, direction, dropout, lstm) outputs = TimeDistributed(Dense(n_labels+1))(outputs) outputs = Activation('softmax')(outputs) model=Model([inputs, mask], outputs) # we can get accuracy from data along with batch/temporal axes. if optim == 'adam': model.compile(keras.optimizers.Adam(lr=init_lr, clipnorm=50.), loss=['categorical_crossentropy'], metrics=['categorical_accuracy']) else: model.compile(keras.optimizers.Adadelta(), loss=['categorical_crossentropy'], metrics=['categorical_accuracy']) return model
def __init__(self): super(MainWindow, self).__init__() self.setMinimumSize( int(QApplication.primaryScreen().size().width() * 0.1), int(QApplication.primaryScreen().size().height() * 0.2)) self.resize(int(QApplication.primaryScreen().size().width() * 0.3), int(QApplication.primaryScreen().size().height() * 0.5)) window_width = int(QApplication.primaryScreen().size().width() * 0.3) id = QFontDatabase.addApplicationFont(variables.FONT_FILE) family = QFontDatabase.applicationFontFamilies(id)[0] variables.font = QFont(family, variables.FONT_SIZE) variables.font_small = QFont(family, variables.FONT_SIZE_SMALL) variables.nw = network.network() variables.signals = variables.DialogSignals() variables.signals.create_dialog.connect(self.createDialog) variables.signals.open_dialog.connect(self.openDialog) variables.signals.close_dialog.connect(self.closeDialog) variables.signals.message_sent.connect(self.messageSent) variables.nw.received.connect(self.receiveMessage) variables.nw.undelivered.connect(self.undelivered) variables.nw.delivered.connect(self.delivered) variables.nw.read.connect(self.read) variables.nw.reconnect.connect(self.clientReconnected) self.dialogs = [] self.dialog_menu = DialogList() self.main_widget = QStackedWidget() self.main_widget.addWidget(self.dialog_menu) self.setCentralWidget(self.main_widget) self.setWindowTitle(variables.APP_NAME) self.setWindowIcon(QIcon(variables.LOGO_IMG)) self.dialog_menu.ip_input.setFocus()
def __init__(self, ip, mac): self.myip = ip self.mymac = mac self.tools = tools() self.mem = memory() self.network = network() self.replyTimeout = 20
def __init__(self, url, priority, description, httptimeout=2, pingtimeout=1000, sleep=10, httpExceptionSleep=2, ping=True, exitonping=True): self.url = url self.description = description self.priority = priority netobj = network(self.url, httpTimeout=httptimeout, pingTimeout=pingtimeout, sleepTimer=sleep, httpExceptionTimer=httpExceptionSleep) if ping: if not netobj.ping(): self.ping = False if exitonping: return else: self.ping = True
def __init__(self,ip,mac): self.myip=ip self.mymac=mac self.tools=tools() self.mem=memory() self.network=network() self.replyTimeout=20
def main(): run = True n = network() clientNo = n.connect() b = Board(4) p = player(clientNo, 244, 'blah', b) while run: time.sleep(2)
def train(session): network = network_mod.network(config.IMAGE_DIMS) session.run(tf.global_variables_initializer()) fw = tf.summary.FileWriter("logs/", graph=session.graph) get_batches = dataset.get_get_batches() for epoch_idx in range(1, config.training.NUM_EPOCHS + 1): train_epoch(session, network, epoch_idx, get_batches)
def setup(self): self.url = 'ws://attend.ddns.net:989/node' # self.url = 'ws://192.168.0.50:989/node' self.ip_addr = None self.conn_port = 989 self.network = network.network(host=self.url, port=self.conn_port) self.stay_alive_count = 0
def program_start(): """MAIN""" os.system('clear') print(""" ____________________________________________________________________________________________________________________________________________ | | | | | BYPASS's PASSWORD MANAGER | | | |__________________________________________________________________________________________________________________________________________| Made compatiable with Python3.x by @upn or bypass#0666 """) choice = "" if choice != '4': os.system('exit') choice = input(""" (1) Manually input an account (2) Search for an existing account (3) Delete an account (4) Clear your Screen (5) Go to Password Generator (6) Go to Pinger (7) Go to Self Network Analysis (8) Exit [MANAGE] $ """) if choice == "1": manual() elif choice == "2": find_account() elif choice == "3": delete_account() elif choice == "4": ClearScreen() elif choice == "5": os.system('python3 main.py') elif choice == "all accounts": all_accounts() elif choice == "7": import network network.network() elif choice == "6": pinger() elif choice == "8": ExitTerm() else: print(" ")
def create_population(p_init, var_names): population = [] for _ in range(p_init): matrix = create_random_matrix(var_names) population.append(network(matrix, var_names)) return population
def create_network(self, address, netmask, name_suffix=""): print "Creating network ", address, ":", netmask net = network(address, netmask, name_suffix, self) if net.key() not in self.networks: net.start() net.root = True self.networks[net.key()] = net msg = message.NetworkQueryMessage(address, netmask) self.topology.broadcast(msg)
def network_query(self, query_msg, source): with self.lock: net = network(query_msg.address, query_msg.netmask) if net.key() in self.networks: net = self.networks[net.key()] if net.root: switch = net.switch_name() msg = message.NetworkReplyMessage(query_msg.address, query_msg.netmask, switch) self.topology.send_message(msg, source)
def create_network(self, address, netmask, name_suffix=''): print 'Creating network ', address, ':', netmask net = network(address, netmask, name_suffix, self) if net.key() not in self.networks: net.start() net.root = True self.networks[net.key()] = net msg = message.NetworkQueryMessage(address, netmask) self.topology.broadcast(msg)
def __init__(self, ip, port): self.network = network() self.game = game() self.ip = ip self.port = port self.lastid = -1 self.players = {}#playerid: [playerobj, clientobj] self.p1 = False self.p2 = False
def __init__(self,ip,mac): self.myip=ip self.mymac=mac self.tools=tools() self.mem=memory() self.ruleconstructor=ruleconstructor() self.recv_target=None self.sent_target=None self.network=network()
def __init__(self, classifier): #we load the heaviest objects at initialization model = network() model.load(classifier) self.model = model imglist = allFiles() self.dic_class = create_class(imglist)
def __init__(self,url): self.connection=docker.Client(base_url=url) if os.path.isdir("/etc/config"): pass else: utils.execute("mkdir /etc/config") self.path="/etc/config/" self.netpath="/etc/network/" self.net=network.network()
def main(): mnist_path = 'MNIST' cifar10_path = 'CIFAR10' model_path = 'model' mnist = None use_cifar10 = True X_test = None y_test = None if use_cifar10: cifar10 = get_CIFAR10_data(cifar10_path) X_test = cifar10['X_test'] y_test = np.eye(10)[cifar10['y_test']] else: mnist = input_data.read_data_sets(mnist_path, one_hot=True) if use_cifar10: inputs = tf.placeholder(tf.float32, (None, None, None, 3), name='inputs') labels = tf.placeholder(tf.float32, (None, 10), name='labels') else: inputs = tf.placeholder(tf.float32, (None, 784), name='inputs') labels = tf.placeholder(tf.float32, (None, 10), name='labels') inputs = tf.reshape(inputs, shape=(-1, 28, 28, 1)) logits = network(inputs, use_bn=True, use_cbn=True, is_training=False) outputs = tf.nn.softmax(logits) accuracy_condition = tf.equal(tf.argmax(outputs, axis=-1), tf.argmax(labels, axis=-1)) accuracy_op = tf.reduce_mean(tf.cast(accuracy_condition, tf.float32)) for var in tf.global_variables(): print('=> variable ' + var.op.name) print('start network evaluation...') loader = tf.train.Saver() config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) try: print('=> restoring weights from: %s ...' % model_path) ckpt = tf.train.latest_checkpoint(model_path) loader.restore(sess, ckpt) except: print('=> restoring weights from: %s failed.' % model_path) if use_cifar10: batch_inputs = X_test batch_labels = y_test else: batch_inputs = mnist.test.images batch_labels = mnist.test.labels accuracy = sess.run(accuracy_op, feed_dict={ inputs: batch_inputs, labels: batch_labels }) print('=> accuracy: %.5f' % accuracy)
def __init__(self, ip, mac): self.myip = ip self.mymac = mac self.tools = tools() self.mem = memory() self.ruleconstructor = ruleconstructor() self.recv_target = None self.sent_target = None self.network = network()
def main(): this_stations = parser.xml_parsing() # creation of network with parameters of stations this_network = network(stations=this_stations) # after implementing all parameters let's start simulation simulation(this_network)
def __init__(self, xy): self.parts = [] self.backup = [] self.sensors = [] self.colliding = [] self.network = network() parts = self.parts backup = self.backup self.locked = [0, 0, 0, 0, 0, 0, 0, 0, 0] # add the head and body parts parts.append(Part(0, xy[0], xy[1], True)) parts.append(Part(0, 0, 0, False)) parts.append(Part(0, 0, 0, False)) # Load the image files parts[0].loadImage("image_resources/body.png") parts[1].loadImage("image_resources/body.png") parts[2].loadImage("image_resources/head.png") # Set the constraints of the body parts so that the agents cannot fold in on themselves parts[0].setConstraint(((parts[1].getRotation() - 90) % 360, (parts[1].getRotation() + 90) % 360)) parts[1].setConstraint(((parts[0].getRotation() - 90) % 360, (parts[0].getRotation() + 90) % 360)) parts[2].setConstraint(((parts[1].getRotation() - 90) % 360, (parts[1].getRotation() + 90) % 360)) # Body parts and heads weight parts[0].setWeight(23.44) parts[1].setWeight(11.72) parts[2].setWeight(8.24) # Add the leg parts for i in range(0, 2): parts.append(part(310, 0, 0, False)) parts.append(part(50, 0, 0, False)) parts.append(part(50, 0, 0, False)) parts.append(part(0, 0, 0, False)) parts.append(part(0, 0, 0, False)) parts.append(part(0, 0, 0, False)) for l in range(3, 15): # Load the image files and set the constraints parts[l].loadImage("image_resources/leg.png") parts[l].setWeight(0.66) if l % 6 < 3: parts[l].setConstraint(((parts[l].getRotation() - 90) % 360, (parts[l].getRotation() + 90) % 360)) else: parts[l].setConstraint((0, 360)) # Initialise the backup storage for the parts information with 0 values for k in range(0, 15): backup.append(part(0, 0, 0, False)) self.colliding.append(False) # Backup objects that can be used to revert object overlaps and handle collisions self.objects = [] self.setSensors()
def __init__(self, config): self.total_steps = 0 self.env = gym.make(config.get('game')) self.env = MaxAndSkipEnv(self.env) self.env = FireResetEnv(self.env) self.input_shape = config.get('input_shape') self.state_steps = config.get('state_steps') self.batch_size = config.get('batch_size') self.train_interval = config.get('train_interval') self.start_train_after_steps = config.get('start_train_after_steps') self.update_follower_steps = config.get('update_follower_steps') self.current_state = state.state(self.input_shape, self.state_steps) self.actions = self.env.action_space.n config.put('actions', self.actions) # used in network to create N outputs, one per action self.epsilon_start = config.get('epsilon_start') self.epsilon_end = config.get('epsilon_end') self.initial_explore_steps = config.get('initial_explore_steps') self.total_explore_steps = config.get('total_explore_steps') self.epsilon = self.epsilon_start self.alpha = config.get('q_alpha') self.discount_gamma = config.get('discount_gamma') self.history = history.history(config.get('history_size')) output_path = config.get('output_path') output_path += '/run.%d' % (time.time()) self.summary_writer = tf.summary.FileWriter(output_path) config.put('summary_writer', self.summary_writer) # used in network with tf.variable_scope('main') as vscope: self.main = network.network('main', config) with tf.variable_scope('follower') as vscope: self.follower = network.network('follower', config) self.follower.import_params(self.main.export_params(), 0)
def __init__(self, isLoad, eps, annealRate, isTest): self.gamma = 0.90 self.eps = eps self.annealRate = annealRate self.maxBombs = 1 self.net = network(self.gamma) self.isTest = isTest self.nonRedundantActions = None # Start tf session self.config = config.Config() self.net._startSess(isLoad)
def network_reply(self, reply_msg, source): addr = self.topology.address_of_id(source) if addr is not None: with self.lock: net = network(reply_msg.address, reply_msg.netmask) if net.key() in self.networks: net = self.networks[net.key()] net.root = False net.root_id = source net.reattach_containers() net.wire(addr, reply_msg.switch)
def __init__( self ): self.lib_version = '1.0.0' self.api_key = None self.api_private = None self.base_url = 'https://rest.quiubas.com' self.version = '1.0' self.network = network( self ) self.balance = balance( self ) self.callback = callback( self ) self.keywords = keywords( self ) self.sms = sms( self )
def main(): #data_input = get_pix_img_in_list("test.png") data_input = get_pix_img_in_list("a.bmp") #data_input = get_pix_img_in_list("a.png") data_output = 'a' # 0,001 > x > 0,01 net = network.network(20*20, 0.01) i = 0 while i < 2: net.train(data_input, ord(data_output)) i = i + 1 net.test(data_input)
def main(): ''' Main method: Here I read the input, and for ''' net = network() n = int(input()) inputX = [] inputY = [] outputs = [] totalError = 0 for i in range(n): inp = [int(x) for x in raw_input().split(' ')] inputX.append(inp[0]) inputY.append(inp[1]) outputs.append(inp[2]) cont = 0 # Each loop is an epoch. # While there isn't a set of weights on the neural network with error less than 0.4 the loop doesnt break. for currentOrder in permutations(range(n)): outs = [] for i in currentOrder: outs.append(net.getOutput(inputX[i], inputY[i])) # Calculating the output net.updateWeights(inputX[i], inputY[i], outputs[i]) # Updating the weights of the Neural network totalError = 0 goingToBreak = True for i in currentOrder: totalError += (outputs[i]-outs[i])**2 if abs(outputs[i]-outs[i]) > 0.4: goingToBreak = False if goingToBreak: break if cont % 10 == 0: print('Epoch ' + str(cont)) print('Squared Error: ' + str(totalError) + '\n') cont += 1 delta = 0 for i in range(n): o = net.getOutput(inputX[i], inputY[i]) delta += abs(outputs[i]-o) print('Exemplar: ' + str(inputX[i]) + ' ' + str(inputY[i]) + ' ' + str(outputs[i]) + ' Neural Network Output: ' + str(o)) print('\ndelta: ' + str(delta/8))
def move_container_to_network(self, container, address, netmask, network_name_suffix = ''): print 'Moving container ', container, ' to network: ', address, ':', netmask if container not in self.containers: raise InvalidOperation("Don't have container + " + container + ".") with self.lock: net = network(address, netmask) if net.key() not in self.networks: self.create_network(address, netmask, network_name_suffix) net = self.networks[net.key()] if self.containers[container].network_key in self.networks: self.networks[container.network_key].detach_container(container) if not self.networks[container.network_key].used: self.networks[container.network_key].stop() del self.networks[container.network_key] net.attach_container(self.containers[container])
def __init__(self, parent): self.parent = parent self.device = parent.device self.data_layer = parent.data_layer self.apps = parent.apps self.marionette = parent.marionette self.actions = Actions(self.marionette) # Globals used for reporting ... self.errNum = 0 self.start_time = time.time() # Get run details from the OS. self.general = general(self) self.test_num = parent.__module__[5:] self.app = app(self) self.date_and_time = date_and_time(self) self.debug = debug(self) self.element = element(self) self.home = home(self) self.iframe = iframe(self) self.messages = Messages(self) self.network = network(self) self.reporting = reporting(self) self.statusbar = statusbar(self) self.test = test(self) self.visual_tests = visualtests(self) self.marionette.set_search_timeout(10000) self.marionette.set_script_timeout(10000) elapsed = time.time() - self.start_time elapsed = round(elapsed, 0) elapsed = str(datetime.timedelta(seconds=elapsed)) self.reporting.debug("Initializing 'UTILS' took {} seconds.".format(elapsed)) current_lang = parent.data_layer.get_setting("language.current").split('-')[0] self.reporting.info("Current Toolkit language: [{}]".format(current_lang)) try: btn = self.marionette.find_element('id', 'charge-warning-ok') btn.tap() except: pass parent.data_layer.set_setting('screen.automatic-brightness', True)
def test_tfnet(task, projdir, modelname, sessionname, dataset, taskargs, patchflag=False, patchsize=100): # FOLDER VARIABLES sessiondir = projdir + "nets/" + modelname + "_" + sessionname + "/" resultsdir = projdir + "testresults/" + modelname + "_" + sessionname + "/" datadir = projdir + "data/" + dataset test_dir = datadir + "/testing" if not os.path.exists(resultsdir): os.mkdir(resultsdir) # NETWORK INIT x = tf.placeholder("float", shape=[None, None, None, 3]) # xsize = tf.placeholder(tf.int32, shape=[2]) y_conv = network(x, modelname, taskargs) taskobj = get_task(task) sess = tf.Session() saver = tf.train.Saver() if os.path.isfile(sessiondir + "checkpoint"): saver.restore(sess, tf.train.latest_checkpoint(sessiondir)) else: print "Model not pretrained" # DATA INIT det_data, filenames = taskobj.read_testing_sets(test_dir) testdata = preprocess(det_data.testdata) # TESTING outs = [] for j in range(testdata.shape[0]): print j res = sess.run([y_conv], feed_dict={x: testdata[j : j + 1]}) outs.append(res[0]) taskobj.validate(outs, det_data.testdata, None, resultsdir, taskargs)
def train_tfnet( task, projdir, modelname, sessionname, dataset, pretrained, taskargs, maxouter=1000, maxinner=15, batchsize=50, step=2e-4, ): # FOLDER VARIABLES sessiondir = projdir + "nets/" + modelname + "_" + sessionname + "/" resultsdir = projdir + "validationresults/" + modelname + "_" + sessionname + "/" datadir = projdir + "data/" + dataset train_dir = datadir + "/training" label_dir = datadir + "/labels" if not os.path.exists(sessiondir): os.mkdir(sessiondir) if not os.path.exists(resultsdir): os.mkdir(resultsdir) sess = tf.Session() # NETWORK INIT x = tf.placeholder("float", shape=[None, None, None, 3]) # xsize = tf.placeholder(tf.int32, shape=[2]) y_conv = network(x, modelname, taskargs) taskobj = get_task(task) y_, loss = taskobj.loss(y_conv) train_step = tf.train.AdamOptimizer(step).minimize(loss) sess.run(tf.initialize_all_variables()) # SAVER saver = tf.train.Saver() if os.path.isfile(sessiondir + "checkpoint") and pretrained: saver.restore(sess, tf.train.latest_checkpoint(sessiondir)) ######### READ DATA det_data = taskobj.read_training_sets(train_dir, label_dir, taskargs) # print np.mean(det_data.traindata[:,:,:,1]) # print np.std(det_data.traindata[:,:,:,1]) traindata = preprocess(det_data.traindata) valdata = preprocess(det_data.valdata) print np.mean(traindata[:, :, :, 2]) print np.std(traindata[:, :, :, 2]) # 1/0 numtrain = traindata.shape[0] numval = valdata.shape[0] # valdata = det_data.valdata trainlabels = det_data.trainlabels vallabels = det_data.vallabels # TRAINING for outer in range(maxouter): # print 'outer', outer ######## VISUALISATION AND VALIDATION if outer % 1 == 0 and outer > -1: print "Validating..." results = [] outs = [] for j in range(numval): # image = mnist.test.images[i] res = sess.run([loss, y_conv], feed_dict={x: valdata[j : j + 1], y_: vallabels[j : j + 1]}) results.append(res[0]) outs.append(res[1]) taskobj.validate(outs, det_data.valdata, det_data.vallabels, resultsdir, taskargs) print outer, np.mean(results) ######### Training ######### AUGMENT IMAGES print "Augmenting..." augtrain, auglabel = taskobj.augment_images(traindata, trainlabels) # augtrain, auglabel = (traindata, trainlabels) print "Training..." for inner in range(maxinner): # print 'inner', inner ######## SHUFFLE HERE shuffle = np.arange(numtrain) np.random.shuffle(shuffle) train = augtrain[shuffle] labels = auglabel[shuffle] ######## TRAINING for batchindex in range(numtrain / batchsize): trainbatch, labelsbatch = get_batch(train, labels, batchindex, batchsize) sess.run(train_step, feed_dict={x: trainbatch, y_: labelsbatch}) saver.save(sess, sessiondir + "model.ckpt", global_step=outer) sess.close()
from bottle import route, run,request import images import container import network ContainerApi=container.dockerapi(url="tcp://0.0.0.0:2375") NetworkApi=network.network() ImageApi=images.image(url="tcp://0.0.0.0:2375") #create container @route("/create",method="POST") def create(): body = request.json image=body.get("image") hostname=body.get("hostname") name=body.get("name") bridge=body.get("bridge") netname=body.get("netname") gateway=body.get("gateway") ContainerApi.create(image,hostname,name,bridge,netname,gateway) return {"name":name, "hostname":hostname, "image":image, "netname":netname, "bridge":bridge, "gateway":gateway } #delete container @route("/delete/<ContainName>",method="DELETE")
import math import network def neg_logistic(c, x): return c - 1/(1 + math.e**(-x)) net = network.network(input=[1, 3, 5], output=[1, 1, 1], hidden_count=2) it = net.training_iterator(training_goal=0.03, history_size=10) it.send(None) #each row in training set consists of input list, output list, learning rate training_data = [([1, 0.25, -0.5], [1, -1, 0], neg_logistic(2, n)) for n in range(100)] try: for row in training_data: print it.send(row) except StopIteration: print "training goal reached"
numEpisodes = 100000 batch_size = 64 #if load parameter is passed load a network from a file if args.load: print "loading model..." f = file(args.load, 'rb') network = cPickle.load(f) if(network.batch_size): batch_size = network.batch_size f.close() else: print "building model..." #use batchsize none now so that we can easily use same network for picking single moves and evaluating batches network = network(batch_size=None) print "network size: "+str(network.mem_size.eval()) evaluate_model_single = theano.function( [input_state], network.output[0], givens={ network.input: input_state.dimshuffle('x', 0, 1, 2), } ) evaluate_model_batch = theano.function( [state_batch], network.output, givens={ network.input: state_batch,
import network import copy neunet = network.network(size=20, depth=2, mutaterate=0.2, numin=2, numout=2) neunet.arrangeNodes() neunet.checkNodes() inputlist = [1, 2] expectedoutputlist = [3, 6] neunet.setInputs(inputlist) result = neunet.calcOutputs() topscoreTable = neunet.scoreNetwork(expectedoutputlist) print(topscoreTable) loops = 0 while max(topscoreTable) > 0.0001: mutant = copy.deepcopy(neunet) mutant.mutateNodes() mutant.setInputs(inputlist) result = mutant.calcOutputs() scoreTable = mutant.scoreNetwork(expectedoutputlist) passtest = True i = 0 for field in scoreTable:
from network import network from windows import windows ''' n1=network(9) n1.addlink(4,5) n1.setpoints(800,600) ''' n1=network(3) n1.addlink(1,2) n1.addlink(2,3) w1=windows(800,600,n1)
def detect_class(projdir, detmodel, classmodel, detsession, clssession, dataset, detargs, classargs): ##### Only testing needed # FOLDER VARIABLES detsessiondir = projdir + 'nets/' + detmodel + '_' + detsession + '/' clssessiondir = projdir + 'nets/' + classmodel + '_' + clssession + '/' resultsdir = projdir + 'testresults/' + detmodel + '_' + classmodel + '_' + clssession + '/' datadir = projdir + 'data/' + dataset test_dir = datadir+'/testing' if not os.path.exists(resultsdir): os.mkdir(resultsdir) # Detmodel init # detgraph = tf.Graph() with detgraph.as_default(): detsess = tf.Session() detx = tf.placeholder("float", shape=[None, None, None, 3]) #xsize = tf.placeholder(tf.int32, shape=[2]) dety = network(detx,detmodel, detargs) detsaver = tf.train.Saver() if os.path.isfile(detsessiondir+'checkpoint'): detsaver.restore(detsess, tf.train.latest_checkpoint(detsessiondir)) else: print 'Detection model not pretrained' clsgraph = tf.Graph() with clsgraph.as_default(): clssess = tf.Session() # Classmodel init clsx = tf.placeholder("float", shape=[None, None, None, 3]) #xsize = tf.placeholder(tf.int32, shape=[2]) clsy = network(clsx, classmodel, classargs) clssaver = tf.train.Saver() if os.path.isfile(clssessiondir+'checkpoint'): clssaver.restore(clssess, tf.train.latest_checkpoint(clssessiondir)) else: print 'Classification model not pretrained' # Restore models with saver detobj = Detection() #clsobj = Classification() # Read test data det_data, filenames = detobj.read_testing_sets(test_dir) testdata = preprocess(det_data.testdata) #init classifications structure print 'GO' # for each test image: for j in range(testdata.shape[0]): print j classifications = {} for i in range(classargs['nouts']): classifications[i] = ([],[]) #imagename = str(j) + '.jpg' # run detection net on test image with detgraph.as_default(): detres = detsess.run([dety], feed_dict={detx: testdata[j:j+1]}) image = detres[0][0,:,:,0] image = scipy.ndimage.gaussian_filter(image, sigma=(1, 1), order=0) #fig = plt.figure(frameon=False) #ax = fig.add_subplot(111) #ax.imshow(image, aspect='normal') #fig.savefig(resultsdir+str(j)+'_detout.jpg') plt.imsave(resultsdir+filenames[j]+'_detout.jpg', image) #plt.clf() # put detections on image nmy, nmx = detection.nonmaxsuppresion(image) #print detargs['patchsize'] patches = detection.getpatches(nmy, nmx, testdata[j], detargs['patchsize']) #patches = detection.getpatches(nmy, nmx, det_data.testdata[j], detargs['patchsize']) # for each detection in image: for i in range(len(nmy)): # extract patch around detection patch = patches[(nmy[i],nmx[i])] #patch = preprocess(np.expand_dims(patch,0)) patch = np.expand_dims(patch,0) # run classification net on patch with clsgraph.as_default(): clssaver.restore(clssess, tf.train.latest_checkpoint(clssessiondir)) clsres = clssess.run([clsy], feed_dict={clsx: patch}) classification = np.argmax(clsres[0], 1)[0] #print clsres[0] ylist, xlist = classifications[classification] ylist.append(nmy[i]) xlist.append(nmx[i]) classifications[classification] = (ylist,xlist) # save coordinates, class and test image entry in file #string = imagename + '\t' + str(nmy[i]) + '\t' + str(nmx[i]) + '\t' + str(classification) # save image with detections overlaid in different colors #plt.imshow(original) colors = ['b', 'g', 'r'] #ax = plt.Axes(fig, [0., 0., 1., 1.]) #ax.set_axis_off() #fig.add_axes(ax) fig = plt.figure(frameon=False) ax = fig.add_subplot(111) ax.imshow(det_data.testdata[j], aspect='normal') for i in range(classargs['nouts']): cly, clx = classifications[i] ax.scatter(clx, cly, c=colors[i], s=20, marker='+') print len(cly) fig.savefig(resultsdir+filenames[j]+'_detclassifications.jpg') plt.clf() #ax.clear() plt.close('all')
import network as netw import traces as tra import info as nf import torus as tor import pylab as pl #root = Tkinter.Tk() #screen_width = root.winfo_screenwidth() #screen_height = root.winfo_screenheight() pos_info = '+0+600' pos_tra = '+300+600' pos_net = '+300+0' pos_sys = '+0+0' pos_torus = '+800+0' i = nf.info(position=pos_info) n = netw.network(info=i, position=pos_net) s = sys.system(info=i, position=pos_sys, network=n) n.system = s t = tra.traces(s, n, info=i, position=pos_tra) tor = tor.torus(s, n, t, info=i, position=pos_torus) if pl.get_backend() == 'TkAgg': s.fig.tight_layout() #n.fig.tight_layout() t.fig.tight_layout() tor.fig.tight_layout() pl.show()
costs = [] values = [] numEpisodes = 100000 batch_size = 64 #if load parameter is passed load a network from a file if args.load: print "loading model..." f = file(args.load, 'rb') network = cPickle.load(f) batch_size = network.batch_size f.close() else: print "building model..." network = network(batch_size=batch_size) print "network size: "+str(network.mem_size.eval()) #zeros used for running network on a single state without modifying batch size input_padding = theano.shared(np.zeros(np.concatenate(([network.batch_size],input_shape))).astype(theano.config.floatX)) evaluate_model_single = theano.function( [input_state], network.output[0], givens={ network.input: T.set_subtensor(input_padding[0,:,:,:], input_state), } ) evaluate_model_batch = theano.function( [state_batch], network.output,
def __init__(self): self.network=network() self.mem=memory()
def __init__(self, system, network, traces, info=None, position=None): torus_2D.torus_2D.__init__(self, system, network, traces, info, position) if __name__ == "__main__": import system as sys import network as netw import traces as tra import info as nf import pylab as pl i = nf.info() s = sys.system(info=i) n = netw.network(info=i) t = tra.traces(s, n, info=i) tor = torus(s, n, t, info=i) pl.show()
parser = argparse.ArgumentParser() parser.add_argument("source", type=str, help="Pickled network to steal params from.") parser.add_argument("dest", type=str, help="File to place new network in.") parser.add_argument( "--cpu", "-c", dest="cpu", action="store_const", const=True, default=False, help="Convert network to run on a CPU." ) args = parser.parse_args() print "loading model..." f = file(args.source, "rb") old_network = cPickle.load(f) f.close() params = old_network.params if args.cpu: print "converting gpu parameters..." new_params = [] for param in params: param = T._shared(param.get_value()) new_params.append(param) params = new_params new_network = network(batch_size=None, params=params) print "saving model..." f = file(args.dest, "wb") cPickle.dump(new_network, f, protocol=cPickle.HIGHEST_PROTOCOL) f.close()
#!/usr/bin/python2 from network import network from random import randint import pprint import img_orient as i_o import glob import os #classifycation definitions directories = ['rock', 'paper', 'scissors', 'green'] #initialize the network net = network(18,1,11,4) #inputs, hidden_layers, hidden_neurons, outputs #returns either 'rock', 'paper' or 'scissors' as strings def rps_classify(): truth_in = {} truth_out = {} net.loadWeights("rps_weigths.txt") truth_in[0] = i_o.get_converted_img('nao', 0) net.calcOuts(truth_in[0]) print net.outs[2] for i in range(len(net.outs[2])): if net.outs[2][i] > 0.5: idx = net.outs[2].index(max(net.outs[2]))
from consts import ZMQ_SERVER_NETWORK, ZMQ_PUBSUB_KV17 from network import network from helpers import serialize import zmq import sys # Initialize the cached network sys.stderr.write('Caching networkgraph...') net = network() sys.stderr.write('Done!\n') # Initialize a zeromq context context = zmq.Context() # Set up a channel to receive network requests sys.stderr.write('Setting up a ZeroMQ REP: %s\n' % (ZMQ_SERVER_NETWORK)) client = context.socket(zmq.REP) client.bind(ZMQ_SERVER_NETWORK) # Set up a channel to receive KV17 requests sys.stderr.write('Setting up a ZeroMQ SUB: %s\n' % (ZMQ_PUBSUB_KV17)) subscribe_kv17 = context.socket(zmq.SUB) subscribe_kv17.connect(ZMQ_PUBSUB_KV17) subscribe_kv17.setsockopt(zmq.SUBSCRIBE, '') # Set up a poller poller = zmq.Poller() poller.register(client, zmq.POLLIN) poller.register(subscribe_kv17, zmq.POLLIN) sys.stderr.write('Ready.\n')
self.ax.set_xlim(t[0], t[-1]) self.fig.canvas.draw() return t, V_i if __name__ == "__main__": import system as sys import network as netw s = sys.system() n = netw.network() tra = traces(s, n) pl.show()
#!/usr/bin/python2 from network import network import pprint truth_in = [[0,0],[0,1],[1,0],[1,1]] #Input set truth_out = [[0],[1],[1],[0]] #Desired output set net = network(2,1,3,1) #inputs, hidden_layers, hidden_neurons, outputs net.initWeights() #net.loadWeights("real_dice.txt") #Load the weight values from a text file net.debug = True #Print debug info in the terminal net.alpha = 1 #Learning rate net.adaptive_alpha = True #Adjust learning rate to increase if learning stagnates net.alpha_roof = 2 #Learning rate max value net.useGraph() #Use the pygame visualisation net.graph_freq = 1 #Display the network after every epoch #net.graph_image_seq = True #net.train(truth_in,truth_out,0,2000) #input_set, output_set, mode, epochs cnt = net.train(truth_in,truth_out,1,0.01) #input_set, output_set, mode, target_sse #net.saveWeights("real_dice.txt") #Save the weight values to a text file