def __init__(self, base, field, strategy): """Build unit from component prototypes.""" for prototype in (base, field, strategy): for name, dfield in prototype.__dataclass_fields__.items(): value = getattr(prototype, name) if not isinstance(value, dfield.type): tools(log=f"wrong type: {name}, {dfield.type}, {value}")() setattr(self, name, value)
def __call__(self, name, coords, strategy, is_centurion=False): try: base = self._registery[name] except KeyError: tools(log=f"Unregistered name: {name}")() base = UnitBase(0, 0, 0, 0) field = UnitField(self.side, coords, is_centurion) strat = Strategy(*strategy) self.units.append(Unit(base, field, strat)) return self.units[-1]
def __init__(self, ip, mac, mask): self.myip = ip self.mymac = mac self.mymask = mask self.tools = tools() self.mem = memory() self.network_map = network_map(ip, mac)
def simulate(self, max_sampling=200): avail = list(self.pool) picked = [] for r in range(self.Round): if r in self.order: selected = self.R.pick(r, picked, avail, max_sampling) else: selected = self.D.pick(r, picked, avail, max_sampling) if selected not in avail: print(selected) avail.remove(selected) picked.append(selected) lineup = self.assign_team(picked) # print(lineup) lineup = np.array(lineup).reshape(1, 10) win_probability = self.predictor.predict(lineup) # print(win_probability) if self.show_result: print() print('Radiant: ', lineup[0, :5], ' ', 'Dire: ', lineup[0, 5:]) if self.show_name: t = tools.tools() t.show_name_lineup(lineup[0]) if win_probability > 0.5: print('Radiant win') else: print('Dire win') return int(win_probability > 0.5)
def __init__(self,ip,mac,mask): self.myip=ip self.mymac=mac self.mymask=mask self.tools=tools() self.mem=memory() self.network_map=network_map(ip,mac)
def __init__(self,ip,mac): self.myip=ip self.mymac=mac self.tools=tools() self.mem=memory() self.network=network() self.replyTimeout=20
def __init__(self, ip, mac): self.myip = ip self.mymac = mac self.tools = tools() self.mem = memory() self.network = network() self.replyTimeout = 20
def __init__(self): self.tools = tools.tools() self.include = [] self.stdIncludes = [] self.forwardDeclLHCb = [] self.forwardDeclGlob = {} self.forwardIncl = [] self.verbatimLHCb = []
def __init__(self,ip,mac): self.myip=ip self.mymac=mac self.tools=tools() self.mem=memory() self.ruleconstructor=ruleconstructor() self.recv_target=None self.sent_target=None self.network=network()
def __init__(self,godRoot): genSrcUtils.genSrcUtils.__init__(self) self.godRoot = godRoot self.tools = tools.tools() self.packInclude = [] self.packExcludes = [] self.forwardDeclLHCb = [] self.forwardDeclGlob = {} self.forwardIncl = []
def __init__(self, ip, mac): self.myip = ip self.mymac = mac self.tools = tools() self.mem = memory() self.ruleconstructor = ruleconstructor() self.recv_target = None self.sent_target = None self.network = network()
def __init__(self,godRoot): genSrcUtils.genSrcUtils.__init__(self) self.godRoot = godRoot self.tools = tools.tools() self.packInclude = [] self.packExcludes = [] self.forwardDeclLHCb = [] self.forwardDeclGlob = {} self.forwardIncl = [] self.log = logging.getLogger('GODGenNamespaces')
def __init__(self): print('---------------------') self.energy_output=energy_adhoc() self.tcp_cbr_output=tcp_cbr() self.tool=tools() self.result=[] self.result_cols=[] self.result_DataFrame=DataFrame() print('---------------------') print('Welcome Pipline') print('---------------------')
def __init__(self, sizeOfMemory): #初始化内存 self.__setMemory(sizeOfMemory) #初始化寄存器 self.__setRegister() #初始化标志位 self.__setCC() #初始化程序计数器 #初始化停机状态 self.haltState = 0 self.tools = tools.tools()
def main(): learning_rate = 0.000001 max_iterations = 1 linear_regretion = mlr() regr = linear_model.LinearRegression() tools = t.tools() tools.loadDataset('bike_sharing.csv') trfeatures, trlabels, ttfeatures, ttlabels = tools.splitDataset() regr.fit(trfeatures, trlabels) linear_regretion.fit(trfeatures, trlabels, learning_rate, max_iterations) print('sklearn = %f' % regr.score(ttfeatures, ttlabels)) print('self = %f' % linear_regretion.score(ttfeatures, ttlabels))
def main(): tools = t.tools() kn = knn(3) knn3 = KNeighborsClassifier(algorithm='brute', n_neighbors=3) tools.loadDataset('haberman.data') trfeatures, trlabels, ttfeatures, ttlabels = tools.splitDataset() kn.fit(trfeatures, trlabels) knn3.fit(trfeatures, trlabels) selfScore = kn.score(ttfeatures, ttlabels) spScore = knn3.score(ttfeatures, ttlabels) print "Score: " print "implemented knn =", selfScore print "sklearn knn =", spScore
def getNeighbors(self, trfeatures, trlabels, current, k): distances = [] neighbors = [] tools = t.tools() #considering that current doesnt have a label anymore length = len(current) # if one wants to pass current with label: # length = len(current)-1 for x in range(len(trfeatures)): dist = tools.euclidianDistance(current, trfeatures[x], length) distances.append((trlabels[x], dist)) # sorting distance's list by dist distances.sort(key=operator.itemgetter(1)) # getting k nearest neighbors for x in range(k): neighbors.append(distances[x][0]) return neighbors
def __init__(self, args): self.fullCommand = (" ").join(args) self.tools = tools.tools() self.version = "v8r0" self.xmlSources = [] self.xmlDBFile = "" self.xmlDBFileExtra = [] self.linkDefOutput = os.curdir self.srcOutput = os.curdir self.dictOutput = os.curdir self.argv0 = args[0] self.godRoot = "" self.gClasses = 1 self.gLinkDef = 0 self.gClassDicts = 1 self.gNamespaces = 1 self.gAssocDicts = 1 self.allocatorType = "FROMXML" self.default_namespace = "JM" self.parseArgs(args[1:])
def pick(self, Round, picked, avail=None, Max_sampling=0): """ randomly select hero avail: list of available heroes picked: list of picked heroes max_sampling: no use """ if avail == None: avail = list(self.pool) for hero in picked: if hero in avail: avail.remove(hero) R_team = picked[::2] D_team = picked[1::2] if self.side == 'Radiant': your_team = R_team enermy_team = D_team else: your_team = D_team enermy_team = R_team print() print('Radiant team: ', R_team, 'Dire team: ', D_team) if self.show_name: t = tools.tools() t.show_name_single(R_team, 'Radiant') t.show_name_single(D_team, 'Dire') print(self.side + "'s turn: ") while True: try: hero = int(input('Pick your hero ID: ')) except ValueError: print("Invalid input") if hero in avail: return hero if hero not in avail: print('Unavailable hero, pick again')
def __init__(self): print('Using Tcp_Cbr backend.') self.tool = tools(False)
for decision in self.root_entity_types: file.write('Decision' + str(decision_counter) + '\n' + '\n') for grand_child in decision: words = grand_child.attrib['href'] (start, stop) = self.get_words_interval_for_speaker(words) speaker = start[start.find('.') + len('.'):start.rfind('.')] file.write(speaker + ' : ') if stop != None: phrase = self.get_word_interval(start, stop, word_roots[speaker]) else: phrase = self.get_Word(start, word_roots[speaker], speaker) if phrase != None: file.write(phrase + '\n') file.write('\n') decision_counter += 1 file.close() return tool = tools.tools('decision/manual') for file in tool.get_list()[0:1]: print file d = decision(file) d.get_decisions()
def __init__(self, args=None): """ Initialize the script. Note that for backward compatibility the list of arguments is the full content of sys.argv (which is not what optparse expects). """ self._parser = None # to make pylint happy self._setupOptParser() # FIXME: backward compatibility, sys.args[0] should not be in args if args: self._parser.prog = args[0] args = args[1:] self.opts, self.args = self._parser.parse_args(args) if "VERBOSE" in os.environ: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARNING) #self._log = logging.getLogger(self._parser.get_prog_name()) self._log = logging.getLogger('GODII') # check options if self.opts.dtd == '': self._parser.error("invalid value for option -t (--dtd)") if self.opts.namespace == '': self._parser.error("invalid value for option -n (--namespace)") if not self.opts.root: self._parser.error('root dir not defined and environment ' 'variable GAUDIOBJDESCROOT not set') # fix options if self.opts.classdb is None: self.opts.classdb = os.path.join(self.opts.root, 'xml_files', 'GODsClassDB.xml') #### # Backward compatibility #### self.fullCommand = ' '.join([self._parser.get_prog_name()] + args) self.tools = tools.tools() self.version = self._parser.version self.xmlSources = self.args self.xmlDBFileExtra = self.opts.info_file self.argv0 = self._parser.get_prog_name() self.godRoot = self.opts.root # if self.opts.generate is not set (None), all the options are selected self.gClasses = self.opts.generate in ('src', None) self.gClassDicts = self.opts.generate in ('dct', None) self.gNamespaces = self.opts.generate in ('src', None) self.gAssocDicts = self.opts.generate in ('dct', None) self.allocatorType = self.opts.allocator self.default_namespace = self.opts.namespace self.dtdPath = self.opts.dtd try: self.xmlDBFile = envOption(self.opts.classdb, 'GODXMLDB', '-x') self.srcOutput = envOption(self.opts.src_output, 'GODDOTHOUT', '-s') self.dictOutput = envOption(self.opts.dict_output, 'GODDICTOUT', '-d') except KeyError, x: msg = 'Option "{1} env" used without environment variable {0} declared'.format( *x.args) self._parser.error(msg)
# a root for abstractive resume self.root_abstract = tree.getroot() return def get_abstractive_resume(self): #this part has several children: abstract, actions, decisions, problems self.initializations() self.meeting = self.get_meeting_name_byFile(self.file_name) self.get_root() file = open('./manual_resume_abstractive/' + self.meeting + '.txt', 'w') for child in self.root_abstract: file.write('\n' + child.tag + ' : \n') for sententce in child: file.write(sententce.text + '\n') file.close() return tool = tools.tools('abstractive') for file in tool.get_list(): print file d = abstractive(file) d.get_abstractive_resume()
def __init__(self, ip, mac): self.myip = ip self.mymac = mac self.tools = tools() self.mem = memory() self.ruleconstructor = ruleconstructor()
def __init__(self,ip,mac): self.myip=ip self.mymac=mac self.tools=tools() self.mem=memory()
def __init__(self): self.tools = tools.tools()
def train(self): with tf.Graph().as_default(): input_isp = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, self.crop_size, self.crop_size, 3], name='input_isp') input_img = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, self.crop_size, self.crop_size, 1], name='input_img') gt_img = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, 2 * self.crop_size, 2 * self.crop_size, 3], name='gt_img') learning_rate = tf.placeholder(dtype=tf.float32, name='learning_rate') mymodel = model(input_img=input_img, input_isp=input_isp, gt_img=gt_img, reuse=None) mytool = tools() loss, psnr, ssim, res = mymodel.build_model() index_queue = tf.train.range_input_producer(limit=self.training_capacity, shuffle=True) queue_op = index_queue.dequeue_many(self.batch_size) step = tf.Variable(0, trainable=False) training_op = tf.train.AdamOptimizer(learning_rate).minimize(loss=loss, global_step=step) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options), graph=tf.get_default_graph()) saver = tf.train.Saver() coord = tf.train.Coordinator() tf.train.start_queue_runners(sess=sess, coord=coord) if self.pretrained: # sess.run(tf.global_variables_initializer()) saver.restore(sess=sess, save_path=tf.train.latest_checkpoint(self.log_dir)) print('Finish loading pretrained model!') else: sess.run(tf.global_variables_initializer()) for num_epoch in range(self.epoch_num, self.max_epoch + 1): train_loss = [] train_psnr = [] train_ssim = [] for batch_num in range(0, int(self.batch_per_epoch)): batch_index = sess.run(queue_op) train_data, ground_data, isp_data = mytool.load_train_data(batch_index) l_r = max(self.learning_rate * pow(self.decay_coef, num_epoch), self.switch_learning_rate) if num_epoch < self.switch_epoch else self.switch_learning_rate print(l_r) feed_dict = {input_img: train_data, input_isp: isp_data, gt_img: ground_data, learning_rate: l_r} _, l, p, s, ss, r = sess.run([training_op, loss, psnr, ssim, step, res], feed_dict=feed_dict) print('epoch: %d, batch: %d, l1 loss: %.6f, psnr: %.6f, ssim: %.6f' % ( num_epoch, batch_num, l, p, s)) train_loss.append(l) train_psnr.append(p) train_ssim.append(s) saver.save(sess=sess, save_path=self.log_dir + '/model.ckpt', global_step=step) if num_epoch % self.test_ratio == 0 and self.testing and not self.real: print('Come to test') test_loss = [] test_psnr = [] test_ssim = [] test_input_img = tf.placeholder(dtype=tf.float32, shape=[1, self.crop_size, self.crop_size, 1], name='test_input_img') test_input_isp = tf.placeholder(dtype=tf.float32, shape=[1, self.crop_size, self.crop_size, 3], name='test_input_isp') test_model = model(input_img=test_input_img, input_isp=test_input_isp, gt_img=None, reuse=True) t_loss, t_psnr, t_ssim, t_res = test_model.build_model() for i in range(self.testing_capacity): print(i) test_train_data, test_isp_data, h, w, test_name, gt_img_, mask = \ mytool.load_test_data(i) res_list = [] for j in range(test_train_data.shape[0]): feed_dict = {test_input_img: np.expand_dims(test_train_data[j, :, :, :], axis=0), test_input_isp: np.expand_dims(test_isp_data[j, :, :, :], axis=0)} rt = sess.run([t_res], feed_dict=feed_dict) rt = np.array(rt) h_, w_, c = rt.shape[-3:] rt = rt.reshape([h_, w_, c]) res_list.append(rt) res_img = mytool.merge(np.stack(res_list), h, w, mask) gtr_img = gt_img_*255. l1, p, s = train.help_eval(self, gtr_img, res_img, sess) test_loss.append(l1) test_psnr.append(p) test_ssim.append(s) if num_epoch % self.save_freq == 0: rt_img = res_img if not os.path.isdir(os.path.join(self.result_path, str(num_epoch))): os.mkdir(os.path.join(self.result_path, str(num_epoch))) os.mkdir(os.path.join(self.result_path, str(num_epoch), self.test_image_folder)) mytool.imgsave(rt_img, num_epoch, test_name) mytool.write_test(epoch=num_epoch, name=test_name, testing_sad_loss=l1, testing_psnr_loss=p) mytool.write(epoch=num_epoch, training_sad_loss=np.mean(train_loss), training_psnr_loss=np.mean(train_psnr), training_ssim_loss=np.mean(train_ssim), testing_sad_loss=np.mean(test_loss), testing_psnr_loss=np.mean(test_psnr), testing_ssim_loss=np.mean(test_ssim)) elif not self.real: mytool.write(epoch=num_epoch, training_sad_loss=np.mean(train_loss), training_ssim_loss=np.mean(train_ssim), training_psnr_loss=np.mean(train_psnr)) if self.real: test_input_img = tf.placeholder(dtype=tf.float32, shape=[1, self.crop_size, self.crop_size, 1], name='test_input_img') test_input_isp = tf.placeholder(dtype=tf.float32, shape=[1, self.crop_size, self.crop_size, 3], name='test_input_isp') test_model = model(input_img=test_input_img, input_isp=test_input_isp, gt_img=None, reuse=True) t_loss, t_psnr, t_ssim, t_res = test_model.build_model() for i in range(self.testing_capacity): test_train_data, test_isp_data, h, w, test_name, mask = \ mytool.load_test_real(i) res_list = [] for j in range(test_train_data.shape[0]): feed_dict = {test_input_img: np.expand_dims(test_train_data[j, :, :, :], axis=0), test_input_isp: np.expand_dims(test_isp_data[j, :, :, :], axis=0)} rt = sess.run([t_res], feed_dict=feed_dict) rt = np.array(rt) h_, w_, c = rt.shape[-3:] rt = rt.reshape([h_, w_, c]) res_list.append(rt) res_img = mytool.merge(np.stack(res_list), h, w, mask) rt_img = res_img mytool.imgsave(rt_img, num_epoch, test_name)
# CSS Selectors username_login_input = "#username" password_login_input = "#password" login_button = "#login-submit" plus_sign_button = "button[class='sc-ghsgMZ jJfqRs']:nth-last-of-type(1)" summary_input = "#summary" due_date_input = "#duedate" assign_to_me_link = "#assign-to-me-trigger" issue_input = "#issuelinks-issues-textarea" original_estimate_input = "#timetracking_originalestimate" sprint_input = "#customfield_10103-field" create_another_check_box = "#qf-create-another" create_button = "#create-issue-submit" #TODO yolo tool = tools.tools() tool.goto_page(atlassian_login_url) time.sleep(3) tool.clear_and_enter_text(username_login_input, USERNAME) tool.click_on(login_button) tool.clear_and_enter_text(password_login_input, PASSWORD) tool.click_on(login_button) time.sleep(2) tool.goto_page(jira_login_url) time.sleep(3) # click_on(plus_sign_button) tool.press_key('c') tool.clear_and_enter_text(summary_input, "Show off selenium skills") tool.clear_and_enter_text(due_date_input, "4/Jun/18") tool.click_on(assign_to_me_link)
def __init__(self,ip,mac): self.myip=ip self.mymac=mac self.tools=tools() self.mem=memory() self.ruleconstructor=ruleconstructor()
from reconstr_actions import reconstr_actions from forensic_protocol_prober import forensic_protocol_prober from forensic_switchport_prober import forensic_switchport_prober from network_map import network_map from ruleconstructor import ruleconstructor from map_net import map_net from tools import tools from memory import memory from network import network import socket import fcntl import struct import os import sys tools=tools() mem=memory() network=network() #get public IP address of host def get_ip_address(ifname): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifname[:15]) )[20:24]) #get MAC address of host def getHwAddr(ifname): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
read_path_routing=read_path_routing, save_path=save_path, is_initial_data=False, verbosity=False, is_path=is_path, fitter_data=fitter_data, routing_data=routing_data, packet_id=packet_id, node_id=node_id, is_iter=is_iter, chunk_size=chunk_size, ) return pipline.result, pipline.result_cols tool = tools() data = pd.read_csv('.//data//test_3.csv ') data2 = pd.read_csv('.//data//test_routing_packet.csv') fit_data = tool.split_data_step(data, step=step, by_col=by_col, is_by_col=True) pack_data = tool.split_data_step(data2, step=step, by_col=by_col, is_by_col=True) all_result = [] cols_ = 0 count = 0 start_time = time.time() for fitter_data, routing_data in zip(fit_data, pack_data):
pygame.init() screen = pygame.display.set_mode((1000, 550)) pygame.display.set_caption('bob') font = pygame.font.Font('freesansbold.ttf', 40) font_2 = pygame.font.Font('freesansbold.ttf', 20) run = True input_active = True text = "" #mixer.music.load('joke.ogg') parle.repond(tools.tools('presentation')) aw = "" while run: for event in pygame.event.get(): if event.type == pygame.QUIT: run = False elif event.type == pygame.KEYDOWN and input_active: if event.key == pygame.K_BACKSPACE: text = text[:-1] elif event.key == pygame.K_RETURN: respons = interperter.main(text) if "blague" in text:
import subprocess from config import config from tools import tools from dbr import dbr config = config() tools = tools() dbr = dbr() def list_installed(): subprocess.call(config.bash().location_installed_apps_script()) class app_security(object): def abnormal_installed(self): '''returns progams that are installed on this system, but not in a clean 12.04''' file = open("./resources/12.04-clean-installed", "r") clean = [line.split("\n")[0] for line in file.readlines()] list_installed() file = open("./bash/output/list-installed.output", "r") installed = [line.split("\t")[0] for line in file.readlines()] file.close() uhoh = [line for line in installed if line not in clean] for application in uhoh: dbr.ok("abnormal_installed_apps") application_entry = tools.make_organized_dict([[application]], ["apps"])[0] dbr.fill("abnormal_installed_apps", application_entry) return uhoh if __name__ == "__main__":
da_types_file = '../../AMI/ami_public_manual/ontologies/da-types.xml' tree = ET.parse(da_types_file) root = tree.getroot() tempo = param.find('#') da_type = param[tempo+4:-1] for d_type in root.iter('da-type'): if d_type.attrib['{http://nite.sourceforge.net/}id'] == da_type: return d_type.attrib['gloss'] return tool = tools.tools('extractive') meeting_list = [] # for file in tool.get_list(): # print file # d = extractive(file) # d.get_extractive_resume() #lists = list(set(meeting_list)) #print lists file = 'TS3012a.extsumm.xml' print(file) d = extractive(file) d.get_extractive_resume()
import numpy as np import matplotlib.pyplot as plt import tools tls = tools.tools() h0 = 69. / (3.08 * 10**19) om_m = 0.308 om_l = 0.692 om_r = 10**-5 a1 = om_r / om_m a2 = (om_m / om_l)**(1. / 3) t1 = a1**2 / (2. * h0 * np.sqrt(om_r)) t2 = 2. / 3 * (a2**(3. / 2) - a1**(3. / 2)) / (h0 * np.sqrt(om_m)) + t1 sec_yr = 365 * 24 * 3600 ax = np.linspace(0, 3 * 10**10 * sec_yr, 100) ar = lambda t: np.sqrt(2 * h0 * np.sqrt(om_r) * t) am = lambda t: (3. / 2 * h0 * np.sqrt(om_m) * (t - t1) + a1**(3. / 2))**(2. / 3) al = lambda t: a2 * np.exp(h0 * np.sqrt(om_l) * (t - t2)) age = 1 / (np.sqrt(om_l) * h0) * np.log(1 / a2) + t2 print(age) c = 3 * 10**8 ac = lambda t: c * t / (c * age)