def set_labels(self, label_names): assert hasattr(self, 'id'), "id must be set before fetching data" # select ids for each label labels = [] for label_name in label_names: label = db.fetch("SELECT id FROM " + db.tbl_label + " WHERE name=%s", [label_name], one=True, as_list=True) if label is None: label = db.execute("INSERT INTO " + db.tbl_label + "(name) VALUES(%s) RETURNING id", [label_name]) db.commit() labels.append(label) # get current labels current_labels = db.fetch("SELECT label FROM " + db.tbl_image_label + " WHERE image=%s", [self.id], as_list=True) # update database to_be_added = diff(labels, current_labels) to_be_deleted = diff(current_labels, labels) for label in to_be_added: db.execute("INSERT INTO " + db.tbl_image_label + "(image, label) VALUES(%s,%s)", [self.id, label]) for label in to_be_deleted: db.execute("DELETE FROM " + db.tbl_image_label + " WHERE image=%s AND label=%s", [self.id, label]) # if label is not used anymore, delete it permanently count = db.fetch("SELECT COUNT(image) FROM label_image WHERE label=%s", [label], one=True, as_list=True) if not count: db.execute("DELETE FROM " + db.tbl_label + " WHERE id=%s", [label]) db.commit() return labels
def simplediff(exp,res): e = sv_flattenify(exp) r = sv_flattenify(res) d = util.diff(e,r) if not d: return util.diff(exp, res) return d
async def watch( self, poll_interval=5 ) -> typing.AsyncGenerator[MCServerStatusWatched, None]: prev_status = None while True: cur_status = self.get_status() if not prev_status: prev_status = cur_status continue player_diff = util.diff(prev_status.players, cur_status.players) events = [] if player_diff is not None: events.append( MCServerStatusEvent(event=self.EVENT_PLAYER_ACTIVITY, info={ 'login': player_diff[0], 'logout': player_diff[1] })) yield MCServerStatusWatched(status=cur_status, events=events) prev_status = cur_status await asyncio.sleep(poll_interval)
def mutate(self): """ Performs the mutation. Applies mutation operator to each source file, then stores a diff between the original and mutated file. # mutants = # source files x # mutation operators """ count = 0 start = time() for src_file in self.project.source_files(): original_path = join(self.project.settings["source_path"], src_file) mutant_path = join(out_dir, src_file) mkdir_p(dirname(mutant_path)) for (op, invoke) in self.project.settings["mutants"].items(): if invoke: p = Popen(["txl", original_path, join("vendor", "conman", "%s.Txl" % op)], stdout=open(mutant_path, "w"), stderr=open("/dev/null")) self.store.put(diff(relpath(original_path), relpath(mutant_path)), op, src_file) count += 1 if count % 1000 == 0: print("Generated %d mutants. Elapsed time %.02f seconds." % (count, (time() - start))) stop = time() print("Generated %d mutants in %d seconds." % (count, (stop - start)))
def update_ports(protocol, db): name = '%s.%s' % (conf['reactive']['ipset_prefix'], protocol.upper()) ports = ipset.get_ports(name) blacklist = [i['port'] for i in db[protocol][0:conf['reactive']['count']]] # Remove old ports for p in diff(ports, blacklist): print("Removing port %s" % p) ipset.delete(name, p) # Add new ports for p in diff(blacklist, ports): print("Adding port %s" % p) ipset.add(name, p) # Removing whitelisted ports for p in conf['reactive'][f'whitelist_{protocol}']: print(f'Removing whitelisted port {p}') ipset.delete(name, str(p))
def sync_with_k8s(self): k8s_nodes_json = self.k8s_client.get_nodes() # TODO : change json to list of nodes_ip diffs = util.diff(self.current_nodes, k8s_nodes_json, cmp) if (len(diffs['removed'])) > 0 or (len(diffs['added'])) > 0: return (True, diffs) else: return (False, None)
def sync_with_libnetwork(self): # TODO : Sync with Libnetwork Prism MongoDB nodes_json = self.libprism_client.get_nodes() diffs = util.diff(self.current_nodes, nodes_json, cmp) if (len(diffs['removed'])) > 0 or (len(diffs['added'])) > 0: return (True, diffs) else: return (False, None)
def strat_trend_measure(dataframe, duration=3): # moving_fun(dataframe, 'highest', blanking=0, duration=duration, newname='trend_hi', fun=sumoversumabs) # moving_fun(dataframe, 'lowest', blanking=0, duration=duration, newname='trend_lo', fun=sumoversumabs) # moving_fun(dataframe, 'highest', blanking=0, duration=2*duration, newname='trend_hi*2', fun=sumoversumabs) # moving_fun(dataframe, 'lowest', blanking=0, duration=2*duration, newname='trend_lo*2', fun=sumoversumabs) # moving_fun(dataframe, 'highest', blanking=0, duration=10*duration, newname='trend_hi*10', fun=sumoversumabs) # moving_fun(dataframe, 'lowest', blanking=0, duration=10*duration, newname='trend_lo*10', fun=sumoversumabs) # # # percent change # dataframe['diff-hi'] = 0 # diffidx = list(dataframe.columns).index('diff-hi') # hiidx = list(dataframe.columns).index('highest') # idcs_all = list(dataframe.index) # dataframe.iloc[idcs_all[1:], diffidx] = ((dataframe.iloc[idcs_all[1:], hiidx].values - # dataframe.iloc[idcs_all[:-1], hiidx].values) / # dataframe.iloc[idcs_all[1:], hiidx].values) # # bandpass filters # dataframe['bandpass_hi_3-10'] = butter_bandpass_filter(dataframe['diff-hi'], 1/10, 1/3, 1, 1) # dataframe['bandpass_hi_10-30'] = butter_bandpass_filter(dataframe['diff-hi'], 1/30, 1/10, 1, 1) dataframe['bp_hi_3-10'] = butter_bandpass_filter( dataframe['highest'] - dataframe.loc[0, 'highest'], 1 / 10, 1 / 3, 1, 1) / dataframe['highest'] moving_fun(dataframe, 'bp_hi_3-10', 0, 3, 'sma_bp_hi_3-10') diff(dataframe, 'sma_bp_hi_3-10', 'dsma_bp_hi_3-10') dataframe['bp_hi_3-120'] = butter_bandpass_filter( dataframe['highest'] - dataframe.loc[0, 'highest'], 1 / 120, 1 / 3, 1, 1) / dataframe['highest'] moving_fun(dataframe, 'bp_hi_3-120', 0, 3, 'sma_bp_hi_3-120') diff(dataframe, 'sma_bp_hi_3-120', 'dsma_bp_hi_3-120') dataframe['bp_hi_3-2000'] = butter_bandpass_filter( dataframe['highest'] - dataframe.loc[0, 'highest'], 1 / 2000, 1 / 3, 1, 1) / dataframe['highest'] dataframe['bp_hi_10-30'] = butter_bandpass_filter( dataframe['highest'] - dataframe.loc[0, 'highest'], 1 / 30, 1 / 10, 1, 1) / dataframe['highest'] dataframe['bp_hi_30-120'] = butter_bandpass_filter( dataframe['highest'] - dataframe.loc[0, 'highest'], 1 / 120, 1 / 30, 1, 1) / dataframe['highest'] dataframe['bp_hi_500-2000'] = butter_bandpass_filter( dataframe['highest'] - dataframe.loc[0, 'highest'], 1 / 2000, 1 / 500, 1, 1) / dataframe['highest'] dataframe['bp_hi_1000-5000'] = butter_bandpass_filter( dataframe['highest'] - dataframe.loc[0, 'highest'], 1 / 5000, 1 / 1000, 1, 1) / dataframe['highest']
def iptc_status_update(self): current_tbls = self.iptc_ctrl.get_current_iptc() diffs = util.diff(self.iptc_ctrl.before_tbls, current_tbls, cmp) if (len(diffs['removed'])) > 0 or (len(diffs['added'])) > 0: self.iptc_ctrl.need_update = True return (True, diffs, current_tbls) else: self.iptc_ctrl.need_update = False self.iptc_ctrl.update_tbls = [] return (False, None, None)
def main(): p = argparse.ArgumentParser( description= 'Given two images and some kernels, report the difference per kernel.') p.add_argument('a', help='input image filename') p.add_argument('b', help='expected image filename') p.add_argument('kernels', nargs='*', help='kernel directory') p.add_argument( '-gamma', type=float, default=1.0, help='gamma correction to use for images (default: no correction)') p.add_argument('-crop_x', type=int, default=0, help='crop X offset in pixels, range is [0..width-1]') p.add_argument( '-crop_y', type=int, default=0, help= 'crop Y offset in pixels, range is [0..height-1] where 0 is the TOP') p.add_argument('-crop_w', type=int, default=0, help='crop width in pixels') p.add_argument('-crop_h', type=int, default=0, help='crop height in pixels') args = p.parse_args() img1 = util.load_image(args.a, args) img2 = util.load_image(args.b, args) assert img1.shape == img2.shape, (img1.shape, img2.shape) print('# Loaded images. Shape is', img1.shape) img_input = tf.constant(img1) img_expected = tf.constant(img2) sess = util.make_session() for kfn in args.kernels: step, kernel = util.load_kernel(kfn) n = kernel.shape[0] border = (n + 1) // 2 # Convolve and calculate costs. img_actual = util.convolve(img_input, kernel) dcost = sess.run( util.diff_cost(util.diff(img_actual, img_expected, border))) rcost = sess.run(util.reg_cost(kernel)) print(kfn, 'n', n, 'diffcost %.12f' % dcost, 'regcost', rcost, 'avg-px-err', util.avg_px_err(dcost, args.gamma))
def visual_servo(self): if self.target is None: self.stop() return STOP delta = diff((self.center[0], self.center[1] + self.correction), self.target) if norm(delta) < TOLERANCE: self.stop() return STOP else: current_cmd = 0 cur_time = time.time() timeout_x = 0.0 timeout_y = 0.0 cmd_to_add = None if delta[0] < -SUBTOLERANCE_X: cmd_to_add = LEFT elif delta[0] > SUBTOLERANCE_X: cmd_to_add = RIGHT if cmd_to_add: timeout_x = min( abs(delta[0]) * P_MOVE_SLEEP_X, MAX_MOVE_SLEEP_TIME_Y) print "move time x: ", abs(delta[0]) * P_MOVE_SLEEP_X current_cmd |= cmd_to_add cmd_to_add = None if delta[1] < -SUBTOLERANCE_Y: cmd_to_add = UP elif delta[1] > SUBTOLERANCE_Y: cmd_to_add = DOWN if cmd_to_add: timeout_y = min( abs(delta[1]) * P_MOVE_SLEEP_Y, MAX_MOVE_SLEEP_TIME_Y) current_cmd |= cmd_to_add self.send_cmd_fn(timeout_x + cur_time, timeout_y + cur_time, current_cmd) return current_cmd
def __refresh_devices(self): try: stored_devices = to_dict( self.__local_storage.read(Discovery.__devices_table[0]), "id") new_devices, missing_devices, existing_devices = diff( self.__device_pool, stored_devices) if new_devices: for device_id in new_devices: self.__handle_new_device(device_id, stored_devices[device_id]) if missing_devices: for device_id in missing_devices: self.__handle_missing_device(device_id) if existing_devices: for device_id in existing_devices: self.__handle_existing_device(device_id, stored_devices[device_id]) except Exception as ex: logger.error("refreshing devices failed - {}".format(ex))
def sv_simplediff(exp, res): # check for simple, common things that might be # errors in the test setup before defaulting to # the more intensive error checking. # The intent here is to simplify the process of # determining where the source of the error came # from. # whitespace mismatch: e = exp.strip() r = res.strip() if e == r: return "Trailing whitespace in file" el = e.splitlines() rl = r.splitlines() # an extra line somewhere is probaby a whoops # abort on first mismatched line if len(el) != len(rl): # check for random blank lines def cblank(l): blanklines = 0 for s in l: if not s.strip(): blanklines += 1 return blanklines a = cblank(el) b = cblank(rl) if a > 2 or b > 2: return "Random blank lines in files" return util.diff(el, rl)
def mutate(self): """ Performs the mutation. Applies mutation operator to each source file, then stores a diff between the original and mutated file. # mutants = # source files x # mutation operators """ count = 0 start = time() for src_file in self.project.source_files(): original_path = join(self.project.settings["source_path"], src_file) mutant_path = join(out_dir, src_file) mkdir_p(dirname(mutant_path)) for (op, invoke) in self.project.settings["mutants"].items(): if invoke: p = Popen([ "txl", original_path, join("vendor", "conman", "%s.Txl" % op) ], stdout=open(mutant_path, "w"), stderr=open("/dev/null")) self.store.put( diff(relpath(original_path), relpath(mutant_path)), op, src_file) count += 1 if count % 1000 == 0: print( "Generated %d mutants. Elapsed time %.02f seconds." % (count, (time() - start))) stop = time() print("Generated %d mutants in %d seconds." % (count, (stop - start)))
def test_require(self): self.assertNoDiff( util.diff(util.run_mock('commands/require_single.sieve'), 'commands/require_single.out'))
def test_10_10(self): self.assertEqual(diff(10, 10), 0)
def test__5_5(self): self.assertEqual(diff(-5, 5), 10)
def main(): MAIN_T = time.time() p = argparse.ArgumentParser( description='Given two images, determine the convolution kernel so that ' 'a * k = b') p.add_argument('a', help='input image filename') p.add_argument('b', help='expected image filename') p.add_argument('k', help='kernel directory') p.add_argument( '-n', type=int, default=5, help='kernel size is NxN (default: 5, or automatically set to size ' 'of loaded kernel)') p.add_argument( '-sym', type=boolchoice, default=True, choices=[True, False], help='kernel will be symmetric if set to True (default: True)') p.add_argument( '-gamma', type=float, default=1.0, help='gamma correction to use for images (default: no correction)') p.add_argument( '-reg_cost', type=float, default=0., help='regularization cost: the sum of weights is multiplied by this ' 'and added to the cost (default: zero: no regularization)') p.add_argument( '-border', type=int, default=-1, help='how many pixels to remove from the border (from every edge of the ' 'image) before calculating the difference (default: auto based on ' 'kernel size)') p.add_argument('-learn_rate', type=float, default=2.**-10, help='learning rate for the optimizer') p.add_argument('-epsilon', type=float, default=.09, help='epsilon for the optimizer') p.add_argument( '-max_steps', type=int, default=0, help='stop after this many steps (default: zero: never stop)') p.add_argument('-log_every', type=int, default=100, help='log stats every N steps (0 to disable)') p.add_argument('-save_every', type=int, default=500, help='save kernel and image every N steps (0 to disable)') p.add_argument('-crop_x', type=int, default=0, help='crop X offset in pixels, range is [0..width-1]') p.add_argument( '-crop_y', type=int, default=0, help= 'crop Y offset in pixels, range is [0..height-1] where 0 is the TOP') p.add_argument('-crop_w', type=int, default=0, help='crop width in pixels') p.add_argument('-crop_h', type=int, default=0, help='crop height in pixels') p.add_argument( '-fps', type=float, default=5, help='how often to update the viewer, set to zero to disable viewer') args = p.parse_args() if not os.path.exists(args.k): os.mkdir(args.k) step = -1 else: step, w1 = util.load_kernel(args.k) args.n = w1.shape[0] if step >= args.max_steps and args.max_steps != 0: print('Current step %d is over max %d. Exiting.' % (step, args.max_steps)) return 0 log = util.Logger(args.k + '/log.txt') log.log('--- Start of run ---') log.log('Cmdline:', sys.argv) # Load images. img1 = util.load_image(args.a, args) img2 = util.load_image(args.b, args) assert img1.shape == img2.shape, (img1.shape, img2.shape) log.log('Loaded images. Shape is', img1.shape, '(NHWC)') vimg1 = util.vis_nhwc(img1, doubles=0, gamma=args.gamma) vimg2 = util.vis_nhwc(img2, doubles=0, gamma=args.gamma) # Load and initialize weights. if step >= 0: log.log('Loaded weights, shape is', w1.shape, '(HWIO)') else: assert step == -1, step step = 0 log.log('Starting with random weights.') w1 = np.random.normal(size=(args.n, args.n, 1, 1), scale=.2).astype(np.float32) m = args.n // 2 w1[m, m, 0, 0] = 1. # Bright middle pixel. if args.sym: w1 = util.make_symmetric(w1) else: w1 = tf.Variable(w1) if args.border == -1: args.border = (args.n + 1) // 2 log.log('Automatically set border to', args.border) log.log('Current args:', args.__dict__) log.log('Starting at step', step) # Convolution. input_img = tf.constant(img1) expected_img = tf.constant(img2) actual_img = util.convolve(input_img, w1) # <-- THIS IS THE CALCULATION. # Cost. diff = util.diff(actual_img, expected_img, args.border) diffcost = util.diff_cost(diff) # L2 cost = diffcost # Regularization. reg = util.reg_cost(w1) # L1 if args.reg_cost != 0: cost += reg * args.reg_cost # Optimizer. global_step = tf.Variable(step, dtype=tf.int32, trainable=False, name='global_step') train_step = tf.train.AdamOptimizer( args.learn_rate, args.epsilon).minimize(cost, global_step=global_step) log.log('Starting TF session.') sess = util.make_session(outdir=args.k) # Get ready for viewer. log_last_step = [step] log_last_time = [time.time()] def periodic_log(): """Does a log.log() of stats like step number and current error.""" now = time.time() rstep, rcost, rreg, rdiffcost = sess.run( [global_step, cost, reg, diffcost]) if log_last_step[0] == rstep: return # Dupe call. log.log( 'steps', rstep, 'total-cost %.9f' % rcost, 'diffcost %.9f' % rdiffcost, 'reg %.9f' % rreg, 'avg-px-err %.6f' % util.avg_px_err(rdiffcost, args.gamma), 'steps/sec %.2f' % ((rstep - log_last_step[0]) / (now - log_last_time[0])), ) log_last_step[0] = rstep log_last_time[0] = now render_time = [0.] def render(): """Returns an image showing the current weights and output.""" # TODO: vertically align labels. t0 = time.time() rout, rdiff, rw = sess.run([actual_img, diff, w1]) render_out = util.vstack([ util.hstack([ util.vstack([util.cache_label('input:'), vimg1], 5), util.vstack([ util.cache_label('actual:'), util.vis_nhwc(rout, doubles=0, gamma=args.gamma) ], 5), util.vstack([util.cache_label('expected:'), vimg2], 5), ], 5), util.cache_label('difference:'), util.vis_nhwc(rdiff, doubles=0), util.cache_label('kernel:'), util.vis_hwoi(rw, doubles=2), ], 5) render_out = util.border(render_out, 5) t1 = time.time() render_time[0] += t1 - t0 return render_out def periodic_save(): rstep, rdiffcost, rw = sess.run([global_step, diffcost, w1]) util.save_kernel(args.k, rstep, rw) rfn = args.k + '/render-step%08d-diff%.9f.png' % (rstep, rdiffcost) util.save_image(rfn, render()) calc_time = [0.] def calc_fn(): """ Run train_step. Then do every-N-steps housekeeping. """ t0 = time.time() sess.run(train_step) # <--- THIS IS WHERE THE MAGIC HAPPENS. t1 = time.time() calc_time[0] += t1 - t0 nsteps = sess.run(global_step) if args.log_every != 0: if nsteps == 1 or nsteps % args.log_every == 0: periodic_log() if args.save_every != 0: if nsteps % args.save_every == 0: periodic_save() if args.max_steps == 0: return True # Loop forever. return nsteps < args.max_steps log.log('Start optimizer.') START_T = time.time() if args.fps == 0: while True: if not calc_fn(): break else: util.viewer(calc_fn, render, fps=args.fps, hang=False) STOP_T = time.time() # Final log and save. log.log('Stop optimizer.') log.log('Render time %.3fs (%.02f%% of optimizer)' % (render_time[0], 100. * render_time[0] / (STOP_T - START_T))) periodic_log() periodic_save() nsteps = sess.run(global_step) - step log.log('Steps this session %d, calc time %.3fs (%.02f%% of optimizer)' % (nsteps, calc_time[0], 100. * calc_time[0] / (STOP_T - START_T))) log.log('Calc steps/sec %.3f, with overhead steps/sec %.3f' % (nsteps / calc_time[0], nsteps / (STOP_T - START_T))) END_T = time.time() log.log('Total time spent: %.3fs' % (END_T - INIT_T)) for k, v in [ ('before main', MAIN_T - INIT_T), ('setting up', START_T - MAIN_T), ('optimizing', STOP_T - START_T), ('finishing up', END_T - STOP_T), ]: log.log(' - time spent %s: %.3fs (%.02f%% of total)' % (k, v, 100. * v / (END_T - INIT_T))) log.close()
# count_vectorizer count_vectorizer = CountVectorizer(stop_words=stop_words) count_vectorizer.fit_transform(text) words, word_values = get_top_n_words(n_top_words=15, count_vectorizer=count_vectorizer, text_data=text) lemmatized = lemmatization(text) clean_text = remove_stopwords(lemmatized) # remove punctuation from each word table = str.maketrans('', '', string.punctuation) punctuation_stripped = [w.translate(table) for w in clean_text] # Difference between the clean_text and stripped resultant = diff(clean_text, punctuation_stripped) # remove garbage final_text = custom_remove_garbage(stripped, resultant) # word tokenization for line in text: words_list = words_list + word_tokenize(line) # words_list = [word_tokenize(line) for line in text] # creates list of lists """ Create the Dictionary and Corpus needed for Topic Modeling The two main inputs to the LDA topic model are the dictionary(id2word) and the corpus. """ # Create Dictionary id2word = corpora.Dictionary([final_text])
def test_5_2(self): self.assertEqual(diff(5, 2), 3)
def __refresh_local_storage(self): try: logger.info("refreshing local storage ...") local_devices = to_dict( self.__local_storage.read(Discovery.__devices_table[0]), "id") remote_devices = get_cloud_devices(*get_cloud_credentials()) new_devices, missing_devices, existing_devices = diff( local_devices, remote_devices) if new_devices: for device_id in new_devices: logger.info("adding record for '{}' ...".format(device_id)) try: self.__local_storage.create( Discovery.__devices_table[0], { "id": device_id, **remote_devices[device_id] }) except Exception as ex: logger.error( "adding record for '{}' failed - {}".format( device_id, ex)) if missing_devices: for device_id in missing_devices: try: device_data = self.__local_storage.read( Discovery.__devices_table[0], id=device_id) now = time.time() age = now - float(device_data[0]["last_seen"]) if age > conf.Discovery.grace_period: logger.info( "removing record for '{}' due to exceeded grace period ..." .format(device_id)) try: self.__local_storage.delete( Discovery.__devices_table[0], id=device_id) except Exception as ex: logger.error( "removing record for '{}' failed - {}". format(device_id, ex)) else: logger.info( "remaining grace period for missing '{}': {}s". format(device_id, conf.Discovery.grace_period - age)) except Exception as ex: logger.error( "can't calculate grace period for missing '{}' - {}" .format(device_id, ex)) if existing_devices: for device_id in existing_devices: logger.info( "updating record for '{}' ...".format(device_id)) try: self.__local_storage.update( Discovery.__devices_table[0], remote_devices[device_id], id=device_id) except Exception as ex: logger.error( "updating record for '{}' failed - {}".format( device_id, ex)) except Exception as ex: logger.error("refreshing local storage failed - {}".format(ex))
def test_2_5(self): self.assertEqual(diff(2, 5), 3)
def test_require_list(self): self.assertNoDiff( util.diff(util.run_mock('commands/require_list.sieve'), 'commands/require_list.out'))
draw_rects(vis_roi, subtargets, rect_color) if not contains(next_targets, subtargets[0]): fixed_rect = [ max(0, x1 - BUFFER) + sx1, max(0, y1 - BUFFER) + sy1, max(0, x1 - BUFFER) + sx2, max(0, y1 - BUFFER) + sy2 ] s_last, c_last = size_and_center(fixed_rect) next_targets.append( (fixed_rect, 0, VELOCITY_DECAY * velocity + (1 - VELOCITY_DECAY) * np.array(diff(c_last, c)), karlness)) else: # draw_rects(vis, [rect], (0,0,255)) if misses < MAX_MISSES: next_targets.append((rect, misses + 1, np.zeros(2), 0)) targets = next_targets if is_auto: if firing and not primed: launcher.prime() primed = True locked_counter = 0 else: # either not firing, or already primed victims = [ tgt for tgt in targets if tgt[3] >= KARLNESS_THRESHOLD
def test_stop(self): self.assertNoDiff( util.diff(util.run_mock('commands/stop.sieve'), 'commands/stop.out'))
def set_ipset(ipsetname, blacklist): ipset = get_ipset(ipsetname) for ip in diff(blacklist, ipset): add_ip(ip, ipsetname) for ip in diff(ipset, blacklist): remove_ip(ip, ipsetname)
def test_if_elsif_else(self): self.assertNoDiff(util.diff(util.run_mock('control/if_3.sieve'), 'control/if_3.out'))
karlness_update = 1 karlness = KARLNESS_DECAY * karlness + (1 - KARLNESS_DECAY) * karlness_update rect_color = (0, 255 * (1 - karlness), 255 * karlness) draw_rects(vis_roi, subtargets, rect_color) if not contains(next_targets, subtargets[0]): fixed_rect = [max(0, x1-BUFFER)+sx1,max(0, y1-BUFFER)+sy1,max(0, x1-BUFFER)+sx2,max(0, y1-BUFFER)+sy2] s_last, c_last = size_and_center(fixed_rect) next_targets.append(( fixed_rect, 0, VELOCITY_DECAY*velocity+(1-VELOCITY_DECAY)*np.array(diff(c_last, c)), karlness )) else: # draw_rects(vis, [rect], (0,0,255)) if misses < MAX_MISSES: next_targets.append((rect, misses+1, np.zeros(2), 0)) targets = next_targets if is_auto: if firing and not primed: launcher.prime() primed = True locked_counter = 0 else: # either not firing, or already primed victims = [tgt for tgt in targets if tgt[3] >= KARLNESS_THRESHOLD]
def test_simple_if(self): self.assertNoDiff(util.diff(util.run_mock('control/if_1.sieve'), 'control/if_1.out'))