def export_fmu(model_path, file_name): '''Parse signal exchange blocks and export boptest fmu and kpi json. Parameters ---------- model_path : str Path to orginal modelica model file_name : list Path(s) to modelica file and required libraries not on MODELICAPATH. Passed to file_name parameter of pymodelica.compile_fmu() in JModelica. Returns ------- fmu_path : str Path to the wrapped modelica model fmu kpi_path : str Path to kpi json ''' # Get signal exchange instances and kpi signals instances, signals = parse_instances(model_path, file_name) # Write wrapper and export as fmu fmu_path, _ = write_wrapper(model_path, file_name, instances) # Write kpi json kpi_path = os.path.join(os.getcwd(), 'kpis.json') with open(kpi_path, 'w') as f: json.dump(signals, f) # open up the FMU and save the kpis json man = Data_Manager() man.save_data_and_kpisjson(fmu_path=fmu_path) return fmu_path, kpi_path
def prim( paths ): data = Data_Manager() conn = defaultdict( list ) start = data.get_start() #don't change (12, 16, 17, 21) starting cities for x in start: visited = [] path = [] visited.append(x) #append start city key connections = data.get_conn(x) #(weight, type, connect) heapq.heapify(connections) while connections: #(weight, type, Connection) cost, c1, c2 = heapq.heappop(connections) if c2 not in visited: visited.append(c2) path.append((c1, c2, cost)) for x in data.get_conn(c2): heapq.heappush(connections, x) paths.append(path) #for path in paths: #for x in path: #print(str.format("({0:3}, {1:3}, {2:5.4}), ", x[0], x[1], x[2]), end = "") #print() allPaths = [] for x in paths: allPaths.append(_draw_paths(x)) return allPaths
def algo(pocetak, kraj): ad = Data_Manager() z = ad.dajMiGradove() D = {} P = {} lock = {} open = {} for gradovi in z: D[gradovi] = float('inf') P[gradovi] = "" D[pocetak] = 0 r = z while len(r) > 0: Q = None GRAD = '' for SVEU in r: if Q == None: l = D[SVEU] Q = l GRAD = SVEU elif D[SVEU] < Q: p = D[SVEU] Q = p GRAD = SVEU r.remove(GRAD) removITALL = GRAD W = ad.dajMiSpoj(removITALL) for allIN in W: if allIN[1] == GRAD: intthFirstSpot = allIN[2] inttheLastSpot = allIN[0] xy = GRAD all = D[xy] + inttheLastSpot if D[intthFirstSpot] == float('inf'): D[intthFirstSpot] = all P[intthFirstSpot] = xy elif D[intthFirstSpot] > all: D[intthFirstSpot] = all P[intthFirstSpot] = xy to = [] m = kraj while m != pocetak: if to.count(m) == 0: veliko = m to.insert(0, veliko) node = P[veliko] else: break veratiTOALL = pocetak to.insert(0, pocetak) return (D[kraj], to)
def alt_stats(path, numOfDays): xtraExpense = 0 weeks = math.ceil(float(numOfDays / 7)) totalDist = 0 totalPetCost = 600 * weeks totalDiesCost = 700 * weeks totalTrainCost = 2415 * weeks totalTrainTime = 0 totalCarTime = 0 i = 0 data = Data_Manager() for conn in path: start = conn[0] end = conn[1] connect = data.find_connect(start, end) if connect == None: dist = conn[2] dies = diesel(dist) pet = petrol(dist) time = get_time(dist) connect = Connection(conn[0], conn[1], 0, dies, pet, 0, time, dist) totalDist += connect.distance totalPetCost += connect.costByPetrol totalDiesCost += connect.costByDiesel if connect.costByTrain == 0: totalTrainCost += connect.costByPetrol + 115 totalTrainTime += connect.timeByCar totalTrainTime += connect.timeByTrain totalCarTime += connect.timeByCar if(totalPetCost + totalCarTime < totalTrainCost + totalCarTime): if(totalPetCost < totalDiesCost): return totalPetCost, totalCarTime, totalDist else: return totalDiesCost, totalCarTime, totalDist elif(totalDiesCost + totalCarTime < totalTrainCost + totalCarTime): return totalDiesCost, totalCarTime, totalDist else: return totalTrainCost, totalTrainTime, totalDist print("---Alternate atistics comparing different travel methods---") print(" *Costs are based on weekly rates using only car or train,") print(" *When calculating Train, uses price for petrol when train is not availaable") print() print(" By Train: ----") print(str.format(" Cost: ${0:.6}, Time: {1:.4} hours, Distance: {2:.6} km ({3:.6} miles)", totalTrainCost, totalTrainTime / 60, totalDist, totalDist * .621371)) print() print(" By Petrol Car: ----") print(str.format(" Cost: ${0:.6}, Time: {1:.4} hours, Distance: {2:.6} km ({3:.6} miles)", totalPetCost, totalCarTime / 60, totalDist, totalDist * .621371)) print() print(" By Diesel Car: ----") print(str.format(" Cost: ${0:.6}, Time: {1:.4} hours, Distance: {2:.6} km ({3:.6} miles)", totalDiesCost, totalCarTime / 60, totalDist, totalDist * .621371))
def _end_path(city): #get list of end cities short = (float('inf'), 0) data = Data_Manager() end = data.get_start() for c in end: path = dijkstra(city, c) if path[0] < short[0]: short = path dist = short[0] path = short[1] return (path[0], path[len(path) - 1], dist)
def get_data_manager(self, root_dir=None, data_name='syn#1', seed_num=1): ''' This function aims to construct a data manager instance to manage the data info for the subsequent processing. ''' data_info = data_name.split('#') data_kind, data_num = data_info[0], int(data_info[1]) is_real = False if data_kind.startswith('syn') or data_kind.startswith( '2d') else True has_outliers = True if data_kind.endswith('otlr') else False if data_kind == '2d': self.cls_num, num_samples, num_features, dim_reduced = 3, 1000, 2, False elif data_kind == 'tcga': self.cls_num, num_samples, num_features, dim_reduced = 33, 11135, 5000, False else: self.cls_num, num_samples, num_features, dim_reduced = 10, 1000, 2000, False data_manager = Data_Manager(root_dir = root_dir, is_real = is_real, data_kind = data_kind, \ data_num = data_num, has_outliers = has_outliers, dim_reduced = dim_reduced, \ num_of_features = num_features, num_of_samples = num_samples, num_of_cls = self.cls_num, \ seed = seed_num) return data_manager
def krajSviPuteva(x): a = (float('inf'), 0) b = Data_Manager() end = b.pocetakSav() for allOfIt in end: putvi = x putvi_2 = allOfIt STAZA = algo(putvi, putvi_2) if STAZA[0] < a[0]: a = STAZA mesto = a[0] STAZA = a[1] prosto_A = STAZA[0] prosto_B = len(STAZA) - 1 allToged = mesto return (prosto_A, STAZA[prosto_B], allToged)
def dijkstra(start, end): data = Data_Manager() cities = data.get_cities() D = {} P = {} for city in cities: D[city] = float('inf') P[city] = "" D[start] = 0 remain_cities = cities while len(remain_cities) > 0: short = None city = '' for x in remain_cities: if short == None: short = D[x] city = x elif D[x] < short: short = D[x] city = x remain_cities.remove(city) connects = data.get_conn(city) for x in connects: #|||(weight, type, cost, time) if x[1] == city: if D[x[2]] == float('inf'): D[x[2]] = D[city] + x[0] P[x[2]] = city elif D[x[2]] > D[city] + x[0]: D[x[2]] = D[city] + x[0] P[x[2]] = city path = [] node = end while node != start: if path.count(node) == 0: path.insert(0, node) node = P[node] else: break path.insert(0, start) return (D[end], path)
def pocniOdArray( puteljak ): infromacija = Data_Manager() papir = list a = defaultdict( papir ) value = 0 abc = infromacija.pocetakSav() for point in abc: myArray = 0 presliSve = [] putic = [] thisPint = point presliSve.append(thisPint) alpha = point spoj = infromacija.dajMiSpoj(point) heapq.heapify(spoj) while spoj: alphaAll = 0 getInfo = spoj kolikoJeSkupo, PartOne, PartTwo = heapq.heappop(getInfo) if PartTwo not in presliSve: getAllofInfo = PartTwo presliSve.append(getAllofInfo) all_Together = (PartOne, PartTwo, kolikoJeSkupo) putic.append(all_Together) putItBackIn = PartTwo for all in infromacija.dajMiSpoj(putItBackIn): abc = spoj heapq.heappush(abc, all) alphaTo = putic puteljak.append(alphaTo) put_1 = [] put_2 = [] put_3 = [] for all in puteljak: put_1.append(nacrtajSvePuteve(all)) return put_1
def print_results(a): totalDist = 0; totalCost = 0; totalTime = 0; i = 0; data = Data_Manager() print("---------Trip Through Germany - Shortest Path---------") print() print("---------Tracing path based on either Petrol, Diesel or Train per city on the path-------") print("Starting City:", data.city_by_key(a[0][0])) for conn in a: i += 1 start = conn[0] end = conn[1] city = data.city_by_key(start) connect = data.find_connect(start, end) if connect == None: dist = conn[2] dies = diesel(dist) pet = petrol(dist) time = get_time(dist) connect = Connection(conn[0], conn[1], 0, dies, pet, 0, time, dist) dist, type, cost, time = connect.opt_weight() totalDist += dist totalCost += cost totalTime += time print("Stop:", i, "--", city, conn[1]) print(str.format("Distance Traveled: {0:.4} km, by {1}, Travel Cost: ${2:.5}, Time Traveled: {3:.4} minutes", float(dist), type, float(cost), float(time))) print("-----------------------------------------------------------") print() print(" Result Totals for Alternating Methods: ----") print(str.format(" Cost: ${0:.6}, Time: {1:.4} hours, Distance: {2:.6} km ({3:.6} miles)", totalCost, totalTime / 60, totalDist, totalDist * .621371)) print()
from agent import Agent visualizer.init_visualizer() WINDOW_SIZE = 60 BATCH_SIZE = 30 EPISODE = 8 LEARNING_RATE = 0.001 VALIDATION = 0 # train data에서 이 비율만큼 validation data로 사용 ENSEMBLE_NUM = 16 USE_TOP_N_AGENT = ENSEMBLE_NUM // 3 ROLLING_TRAIN_TEST = False # 학습/ 테스트 data 설정 dm = Data_Manager('./gaps.db', 20151113, 20180615, split_ratio=(0.6, 0.2, 0.2)) df = dm.load_db() train_df, val_df, test_df = dm.generate_feature_df(df, WINDOW_SIZE) print('train: {} ~ {}'.format(train_df.iloc[0].name, train_df.iloc[-1].name)) print('val : {} ~ {}'.format(val_df.iloc[WINDOW_SIZE].name, val_df.iloc[-1].name)) print('test: {} ~ {}'.format(test_df.iloc[WINDOW_SIZE].name, test_df.iloc[-1].name)) print("데이터 수 train: {}, val: {}, test: {}".format(len(train_df), len(val_df), len(test_df))) visualizer.plot_dfs( [train_df, val_df.iloc[WINDOW_SIZE:], test_df.iloc[WINDOW_SIZE:]], ['train', 'val', 'test']) print("학습 데이터의 asset 개수 : ", len(train_df.columns.levels[0]))
def __init__(self, board_dim=8, time_steps=1, n_filters=256, conv_size=3, n_res=40, c=.1): """ :param board_dim: dimension of game board :param time_steps: number of time steps kept in state history :param n_filters: number of convolutional filters per conv layer :param conv_size: size of convolutions :param n_res: number of residual layers :param c: regularization scale constant """ self.board_dim = board_dim self.time_steps = time_steps self.losses = None self.n_conv_filters = n_filters self.conv_size = conv_size self.n_res_layers = n_res self.regularizer = tf.contrib.layers.l2_regularizer(scale=c) self.dm = Data_Manager(max_size=(board_dim**2 - 4) * 500) # moves per game TIMES num games to save # -------------- # Make Network # -------------- with tf.Graph().as_default() as net1_graph: self.input_layer = tf.placeholder(shape=[ None, self.board_dim, self.board_dim, (self.time_steps * 2 + 1) ], dtype=tf.float32, name='input') self.net = self._add_conv_layer(self.input_layer, name='conv1') for i in range(self.n_res_layers): self.net = self._add_res_layer(self.net, name='res{}'.format(i + 1)) self.policy_logits = self._policy_head(self.net) self.value_estimate = self._value_head(self.net) self.mcts_pi = tf.placeholder( shape=[None, (self.board_dim**2 + 1)], dtype=tf.float32, name='pi') self.winner_z = tf.placeholder(shape=[None, 1], dtype=tf.float32, name='z') # Loss, composed of cross entropy, mse, and regularization xent = tf.nn.softmax_cross_entropy_with_logits( labels=self.mcts_pi, logits=self.policy_logits) mse = tf.losses.mean_squared_error(self.winner_z, self.value_estimate) reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) self.loss = tf.reduce_mean(mse - xent + sum(reg_losses)) self.optimizer = tf.train.AdamOptimizer().minimize( self.loss) # tune learning rate # more ops self.init_op = tf.global_variables_initializer() self.saver = tf.train.Saver() # initialize session self.sess = tf.Session(graph=net1_graph) self.sess.run(self.init_op)
def main(): global args args = parser.parse_args() # Default settings default_model = 'resnet18' default_bs = 16 sz = 224 # Take actions based upon initial arguments if args.gpu: # Check for GPU and CUDA libraries HAS_CUDA = torch.cuda.is_available() if not HAS_CUDA: sys.exit('No Cuda capable GPU detected') else: HAS_CUDA = False checkpoint_dir = args.save_dir # Define hyper-parameters # Note - allow dropout to be changed when resuming model tmp = args.dropout tmp = re.sub("[\[\]]", "", tmp) drops = [float(item) for item in tmp.split(',')] lr = args.learning_rate epochs = args.epochs # All arguments imported, will start to setup model depending upon whether restarting from checkpoint or # from scratch if args.resume: if os.path.isdir(args.resume): print('Loading checkpoint...') sol_mgr, pt_model = utility.load_checkpoint( args.resume, lr, HAS_CUDA) else: # Define hidden layer details (note - if resuming will continue with values used earlier tmp = args.hidden_units tmp = re.sub("[\[\]]", "", tmp) n_hid = [int(item) for item in tmp.split(',')] # check data directory exists and assign data_dir = args.data_directory # Check it exists if not os.path.exists(data_dir): sys.exit('Data directory does not exist') # Create model, datasets etc from scratch # create datasets and dataloaders phrases = ['train', 'valid', 'test'] # Define data transforms data_transforms = { 'train': transforms.Compose([ transforms.RandomResizedCrop(sz), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'valid': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(sz), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'test': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(sz), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } bs = args.batch_size data = Data_Manager(data_dir, phrases, data_transforms, bs) # Load cat_to_name cat_to_name = utility.load_classes('cat_to_name.json') num_cat = len(cat_to_name) # Load pre-trained model if args.arch is not None: pt_model = args.arch else: pt_model = default_model model_pt = models.__dict__[pt_model](pretrained=True) num_ftrs = model_pt.fc.in_features # Create classifier model img_cl = Composite_Classifier(model_pt, n_hid, drops, num_cat) # Move to CUDA if available if HAS_CUDA: img_cl.cuda() # Define losses and hyper-parameters criterion = nn.CrossEntropyLoss() # Optimise just the parameters of the classifier layers optimizer_ft = optim.SGD(img_cl.cf_layers.parameters(), lr=lr, momentum=0.9) exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1) # Freeze the pre-trained model layers for param in img_cl.model_pt.parameters(): param.requires_grad = False # Create model manager to control training, validation, test and predict with the model and data sol_mgr = Solution_Manager(img_cl, criterion, optimizer_ft, exp_lr_scheduler, data, phrases, HAS_CUDA=HAS_CUDA) sol_mgr.model.class_to_idx = data.image_datasets['train'].class_to_idx # Train model sol_mgr.train(epochs=epochs) # Evaluate model against test set sol_mgr.test_with_dl() # Save Checkpoint utility.save_checkpoint(args.save_dir, sol_mgr, pt_model, HAS_CUDA)
def load_checkpoint(dir_name, lr, HAS_CUDA): # load model parameters if HAS_CUDA: device = torch.cuda.current_device() print(f'Cuda device: {device}') checkpoint = torch.load( dir_name + '/' + 'checkpoint.pth.tar', map_location=lambda storage, loc: storage.cuda(device)) print('Loaded CUDA version') else: checkpoint = torch.load(dir_name + '/' + 'checkpoint.pth.tar', map_location='cpu') # load pretrained model pt_model = checkpoint['pt_model'] model_pt = models.__dict__[pt_model](pretrained=True) # Recreate model img_cl = Composite_Classifier(model_pt, checkpoint['n_hid'], checkpoint['drops'], checkpoint['num_cat']) # load model state disctionary img_cl.load_state_dict(checkpoint['model']) # Recreate optimiser optimizer_ft = optim.SGD(img_cl.cf_layers.parameters(), lr=0.001, momentum=0.9) optimizer_ft.load_state_dict(checkpoint['optimizer']) old_lr = optimizer_ft.param_groups[0]['lr'] last_epoch_trained_upon = checkpoint['epochs'] exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1) # Now move optimise to GPU if necessary if HAS_CUDA: for state in optimizer_ft.state.values(): for k, v in state.items(): if isinstance(v, torch.Tensor): state[k] = v.to(device) if old_lr != lr: optimizer_ft.param_groups[0]['lr'] = lr else: # if lr has not been updated put scheduler back to where it was exp_lr_scheduler.last_epoch = last_epoch_trained_upon # Recreate data object data = Data_Manager(checkpoint['data_dir'], checkpoint['phases'], checkpoint['data_tfms'], checkpoint['bs']) # Recreate model manager class instance phases = checkpoint['phases'] model_mgr = Solution_Manager(img_cl, checkpoint['loss_function'], optimizer_ft, exp_lr_scheduler, data, phases, HAS_CUDA) # restore model manager state variables model_mgr.epochs = checkpoint['epochs'] model_mgr.loss_function = checkpoint['loss_function'] model_mgr.best_accuracy = checkpoint['best_accuracy'] model_mgr.best_corrects = checkpoint['best_corrects'] model_mgr.best_loss = checkpoint['best_loss'] model_mgr.model.class_to_idx = checkpoint['class_to_idx'] if HAS_CUDA: model_mgr.model.cuda() # Freeze the pre-trained model layers for param in img_cl.model_pt.parameters(): param.requires_grad = False print('Checkpoint loaded') return model_mgr, pt_model
def sveIspisi(a): finalSTep_1 = 0; finalSTep_2 = 0; finalSTep_3 = 0; dobro = 0; xr = Data_Manager() print("Welcome to Trip to Germany!") print("Trip consists of of these cities all together") print("Rostock ") print("Lubeck (home of the best marzipan) ") print("Hamburg (Oma/Opa want to drive under the river - a taxi can do this as well)") print("Bremen ") print("Hannover (Consumer Electronics haven - purchase each a new iPad at 180 Euros each) ") print("Kassel ") print("Dusseldorf ") print("Koln (taxi will be needed to visit the castle 10km away from the hauptbahnhof) ") print("St. Augustine ") print("Bonn ") print("Wiesbaden ") print("** Frankfurt ") print("Mannheim ") print("Karlsruhe ") print("Baden Baden (Oma wants to visit a Spa here, therefore, you will need to spend the day) ") print("** Stuttgart ") print("** Munchen (Munich)") print("Nurnberg") print("Dresden ") print("Leipzig ") print("** Berlin") print("Basel, Switzerland (Opa and Dad want to purchase a nice watch and this is the best place for such a purchase - you will be spending $6k/watch) ") print() print("Here is the way using Petrol, Diesel or Train for every city") print("Cities of this trip:", xr.dajmigradpoGrad(a[0][0])) for sastavi in a: dobro = dobro + 1 b = sastavi[0] f = sastavi[1] city = xr.dajmigradpoGrad(b) ovajSPoj = xr.nadjiSpojeve(b, f) if ovajSPoj == None: index = 0 a = sastavi[2] b = sveGorivo(a) c = sveUP(a) d = sveVrijeme(a) xrx = sastavi[0] xrd = sastavi[1] ovajSPoj = Connection(xrx, xrd, 0, b, c, 0, d, a) putic, vrsta, cena, vrim = ovajSPoj.sveTezine() way_1 = finalSTep_1 + putic finalSTep_1 = way_1 way_2 = finalSTep_2 + cena finalSTep_2 = way_2 way_3 = finalSTep_3 + vrim finalSTep_3 = way_3 putit = sastavi[1] print("Distance Number:", dobro, "", city, putit) atm = float(putic) tip = vrsta fl = float(cena) vm = float(vrim) print(str.format("So know we know that traveling distance is: {0:.4} km, by {1}, traveling cost is: ${2:.5}, and traveling time is: {3:.4} minutes", atm, tip, fl, vm)) print("") print() print("Here are the alternating methods:") tour = finalSTep_2 tourtwo = finalSTep_3 / 60 tourthree = finalSTep_1 tourfour = finalSTep_1 * .621371 print(str.format(" and all together the Cost is : ${0:.6}, Time is: {1:.4} hours, Distance is: {2:.6} km ({3:.6} miles)", tour, tourtwo, tourthree, tourfour)) print()
class Othello_Network(): def __init__(self, board_dim=8, time_steps=1, n_filters=256, conv_size=3, n_res=40, c=.1): """ :param board_dim: dimension of game board :param time_steps: number of time steps kept in state history :param n_filters: number of convolutional filters per conv layer :param conv_size: size of convolutions :param n_res: number of residual layers :param c: regularization scale constant """ self.board_dim = board_dim self.time_steps = time_steps self.losses = None self.n_conv_filters = n_filters self.conv_size = conv_size self.n_res_layers = n_res self.regularizer = tf.contrib.layers.l2_regularizer(scale=c) self.dm = Data_Manager(max_size=(board_dim**2 - 4) * 500) # moves per game TIMES num games to save # -------------- # Make Network # -------------- with tf.Graph().as_default() as net1_graph: self.input_layer = tf.placeholder(shape=[ None, self.board_dim, self.board_dim, (self.time_steps * 2 + 1) ], dtype=tf.float32, name='input') self.net = self._add_conv_layer(self.input_layer, name='conv1') for i in range(self.n_res_layers): self.net = self._add_res_layer(self.net, name='res{}'.format(i + 1)) self.policy_logits = self._policy_head(self.net) self.value_estimate = self._value_head(self.net) self.mcts_pi = tf.placeholder( shape=[None, (self.board_dim**2 + 1)], dtype=tf.float32, name='pi') self.winner_z = tf.placeholder(shape=[None, 1], dtype=tf.float32, name='z') # Loss, composed of cross entropy, mse, and regularization xent = tf.nn.softmax_cross_entropy_with_logits( labels=self.mcts_pi, logits=self.policy_logits) mse = tf.losses.mean_squared_error(self.winner_z, self.value_estimate) reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) self.loss = tf.reduce_mean(mse - xent + sum(reg_losses)) self.optimizer = tf.train.AdamOptimizer().minimize( self.loss) # tune learning rate # more ops self.init_op = tf.global_variables_initializer() self.saver = tf.train.Saver() # initialize session self.sess = tf.Session(graph=net1_graph) self.sess.run(self.init_op) def _add_conv_layer(self, input_layer, name=None): """ Create a general convolutional layer. :param input_layer: the previous network layer to build on :param name: name of this layer :return: the output layer """ conv = tf.layers.conv2d(inputs=input_layer, filters=self.n_conv_filters, kernel_size=[self.conv_size, self.conv_size], padding="same", kernel_regularizer=self.regularizer, name=name) bn = slim.batch_norm(conv) output = tf.nn.relu(bn) return output def _add_res_layer(self, input_layer, name): """ Create a general residual layer :param input_layer: the previous network layer to build on :param name: the name of this layer :return: the output layer """ conv1 = tf.layers.conv2d(inputs=input_layer, filters=self.n_conv_filters, kernel_size=[self.conv_size, self.conv_size], padding="same", kernel_regularizer=self.regularizer, name='{}_c1'.format(name)) bn1 = slim.batch_norm(conv1) relu1 = tf.nn.relu(bn1) conv2 = tf.layers.conv2d(inputs=relu1, filters=self.n_conv_filters, kernel_size=[self.conv_size, self.conv_size], padding="same", kernel_regularizer=self.regularizer, name='{}_c2'.format(name)) bn2 = slim.batch_norm(conv2) skip_connection = input_layer + bn2 output = tf.nn.relu(skip_connection) return output def _policy_head(self, input_layer): """ Estimate move probability distribution by applying the policy head network to input_layer. :param input_layer: layer to apply policy head to. :param softmax: whether or not to softmax the logits into a probability distribution. :return: vector of board_dim * board_dim + 1 logits. """ conv = tf.layers.conv2d(inputs=input_layer, filters=2, kernel_size=[1, 1], padding="same", kernel_regularizer=self.regularizer) bn = slim.batch_norm(conv) relu = tf.nn.relu(bn) fc = tf.layers.dense(inputs=tf.contrib.layers.flatten(relu), units=(self.board_dim * self.board_dim + 1), kernel_regularizer=self.regularizer) return fc def _value_head(self, input_layer): """ Estimate value of board state by applying vlossalue head network to input_layer. :param input_layer: the layer to apply value head to :return: scalar estimating value of board position (between -1 and 1) """ conv = tf.layers.conv2d(inputs=input_layer, filters=1, kernel_size=[1, 1], padding="same", kernel_regularizer=self.regularizer) bn = slim.batch_norm(conv) relu1 = tf.nn.relu(bn) fc1 = tf.layers.dense(inputs=tf.contrib.layers.flatten(relu1), units=256, kernel_regularizer=self.regularizer) relu2 = tf.nn.relu(fc1) fc2 = tf.layers.dense(inputs=relu2, units=1, kernel_regularizer=self.regularizer) output = tf.nn.tanh(fc2) return output def add_training_data(self, states, pis, zs): """ Add data to the data manager. :param states: N X board_size X board_size * 3 array of states :param pis: N X (board_size**2 + 1) array of move distributions :param zs: N X 1 array of winners. """ self.dm.add_data(states, pis, zs) def estimate_policy(self, state, soft=True): """ Estimate policy distribution for a state. :param state: Must be batch_size X board_size X board_size X 3, even if batch size is 1 :param soft: Whether to softmax the logits before returning or not. Default True. :return: estimated policy distribution """ feed_dict = {self.input_layer: state} logits = self.sess.run([self.policy_logits], feed_dict=feed_dict)[0] if soft: policy = softmax(logits) return policy else: return logits def estimate_value(self, state): """ Estimate value of a state (probability of current player winning) :param state: Must be batch_size X board_size X board_size X 3, even if batch size is 1 :return: estimated value, betwen -1 and 1 """ feed_dict = {self.input_layer: state} return self.sess.run([self.value_estimate], feed_dict=feed_dict)[0] def save_weights(self, path="/tmp/model.ckpt"): """ Save the current weights of the network :param path: the path to saved files """ self.saver.save(self.sess, path) def load_weights(self, path="/tmp/model.ckpt"): """ Load network weights from file. :param path: the path to saved files """ self.saver.restore(self.sess, path) def train(self, n_iters=1000, batch_size=1024, verbose=True): """ Train the network some amount. :param n_iters: How many batches to train on. :param batch_size: Size of each batch. :return: list of losses """ losses = [] # sample mini-batch of 2048 print("Training Network") for i in trange(n_iters): state_batch, pi_batch, z_batch = self.dm.get_batch(batch_size) feed_dict = { self.input_layer: state_batch, self.mcts_pi: pi_batch, self.winner_z: z_batch } _ = self.sess.run([self.optimizer], feed_dict=feed_dict) if i % 2 == 0: l = self.sess.run( [self.loss], feed_dict=feed_dict ) # probably don't need to run loss every time losses.append(l) if verbose: if i % 100 == 0: print("{}: loss: {}".format(i, l)) if self.losses is not None: self.losses.extend(losses) else: self.losses = losses # Save losses to pickle file cPickle.dump(self.losses, open("loss.cpkl", 'wb'))
def uSuprotnomStanju_2(into, dane): dayandMonth = [] xtraExpense = 0 podjeli = dane / 7 s = math.ceil(float(podjeli)) all = 0 podjeli = 600 * s sveKuljeno = podjeli podjeliOne = 700 * s sveodD = podjeliOne podj = 2415 * s sveodT = podj sveodV = 0 sveodAu = 0 i = 0 arrayOne = [] arrayTwo = [] informacija = Data_Manager() for sopjJE in into: sp = sopjJE[0] ab = sp drp = sopjJE[1] db = drp brb = informacija.nadjiSpojeve(ab, db) if brb == None: one = sopjJE[2] to = one two = sopjJE[2] on = two three = sveUP(to) mr = three four = sveVrijeme(to) vr = four tr = sopjJE[0] brg = sopjJE[1] brb = Connection(tr, brg, 0, on, mr, 0, vr, to) ted = all + brb.range all = ted tedOne = sveKuljeno + brb.CjenaP sveKuljeno = tedOne opa = sveodD + brb.CjenaDizela sveodD = opa if brb.CjenaVoza == 0: abc = sveodT + brb.CjenaP + 115 sveodT = abc zed = sveodV + brb.vrijemeAuta sveodV = zed sveodV = sveodV + brb.VrijemeVoza sveodAu = sveodAu + brb.vrijemeAuta natri = sveKuljeno + sveodAu nacetri = sveodT + sveodAu if(natri < nacetri): if(sveKuljeno < sveodD): vratinazadOpet = sveKuljeno, sveodAu, all return vratinazadOpet else: vratise = sveodD, sveodAu, all return vratise Uno = sveodD + sveodAu Due = sveodT + sveodAu elif(Uno < Due): allIt = sveodD, sveodAu, all return allIt else: allOtherStuff = sveodT, sveodV, all return allOtherStuff
def uSuprotnomStanju(into, dane): dayandMonth = [] xtraExpense = 0 podjeli = dane / 7 s = math.ceil(float(podjeli)) all = 0 podjeli = 600 * s sveKuljeno = podjeli podjeliOne = 700 * s sveodD = podjeliOne podj = 2415 * s sveodT = podj sveodV = 0 sveodAu = 0 i = 0 arrayOne = [] arrayTwo = [] informacija = Data_Manager() for sopjJE in into: sp = sopjJE[0] ab = sp drp = sopjJE[1] db = drp brb = informacija.nadjiSpojeve(ab, db) if brb == None: one = sopjJE[2] to = one two = sopjJE[2] on = two three = sveUP(to) mr = three four = sveVrijeme(to) vr = four tr = sopjJE[0] brg = sopjJE[1] brb = Connection(tr, brg, 0, on, mr, 0, vr, to) ted = all + brb.range all = ted tedOne = sveKuljeno + brb.CjenaP sveKuljeno = tedOne opa = sveodD + brb.CjenaDizela sveodD = opa if brb.CjenaVoza == 0: abc = sveodT + brb.CjenaP + 115 sveodT = abc zed = sveodV + brb.vrijemeAuta sveodV = zed sveodV = sveodV + brb.VrijemeVoza sveodAu = sveodAu + brb.vrijemeAuta natri = sveKuljeno + sveodAu nacetri = sveodT + sveodAu if(natri < nacetri): if(sveKuljeno < sveodD): vratinazadOpet = sveKuljeno, sveodAu, all return vratinazadOpet else: vratise = sveodD, sveodAu, all return vratise Uno = sveodD + sveodAu Due = sveodT + sveodAu elif(Uno < Due): allIt = sveodD, sveodAu, all return allIt else: allOtherStuff = sveodT, sveodV, all return allOtherStuff print("---Alternate atistics comparing different travel methods---") print(" *Costs are based on weekly rates using only car or train,") print(" *When calculating Train, uses price for petrol when train is not availaable") print() print(" By Train: ----\n") a = sveodT b = sveodV / 60 d = all * .621371 print(str.format(" Cost: ${0:.6} '\n', Time: {1:.4} hours '\n', Distance: {2:.6} km ({3:.6} miles)", a, b, all, d)) print() print(" By Petrol Car: ----\n") ye = sveKuljeno de = sveodAu / 60 ze = all * .621371 print(str.format(" Cost: ${0:.6}, Time: {1:.4} hours, Distance: {2:.6} km ({3:.6} miles)", ye, de, all, ze)) print() print(" By Diesel Car: ----\n") svejeOnDa = all * .621371 print(str.format(" Cost: ${0:.6}, Time: {1:.4} hours, Distance: {2:.6} km ({3:.6} miles)", sveodD, sveodAu / 60, all,svejeOnDa))