def __init__(self): super(ImageSearch, self).__init__() model_name = config.image_config['modelname'] detect_name = config.image_config['detectname'] self.extract_model = util.get_model(model_name).load_model() self.detect_model = util.get_model( detect_name).load_model() if detect_name else None
def reset_model(self, dataset, test_id = 0): self._dataset = dataset if dataset == 'msra': init_proto_name, init_model_name = util.get_model(dataset, 'baseline', test_id) proto_name, model_name = util.get_model(dataset, 'pose_ren', test_id) else: init_proto_name, init_model_name = util.get_model(dataset, 'baseline') proto_name, model_name = util.get_model(dataset, 'pose_ren') print(init_proto_name, init_model_name) print(proto_name, model_name) self._net = caffe.Net(proto_name, caffe.TEST, weights=model_name) self._net_init = caffe.Net(init_proto_name, caffe.TEST, weights=init_model_name)
def languages_del(lang_id): """ Deletes a language Params: lang_id (int): the language to delete Returns: a redirect to the language view page """ model = util.get_model() lang = model.Language.query.filter_by(id=lang_id).scalar() if lang is None: error = "Failed to delete language \'{}\' as it does not exist.".format( lang_id) current_app.logger.info(error) flash(error, "danger") return redirect(url_for("languages.languages_view")) try: model.db.session.delete(lang) model.db.session.commit() flash("Deleted language \'{}\'".format(lang.name), "warning") except IntegrityError: model.db.session.rollback() error = "Failed to delete language \'{}\' as it's referenced in another DB element".format( lang_id) current_app.logger.info(error) flash(error, "danger") return redirect(url_for("languages.languages_view"))
def display_lang_add_form(lang_id): """ Displays the language add template Params: lang_id (int): lang_id Returns: a rendered language add/edit template """ model = util.get_model() if lang_id is None: # add return render_template("language/add_edit.html", action_label="Add") else: # edit lang = model.Language.query.filter_by(id=lang_id).scalar() if lang is None: error = "Failed to edit language \'{}\' as language doesn't exist.".format( lang_id) current_app.logger.info(error) flash(error, "danger") return redirect(url_for("languages.languages_view")) return render_template("language/add_edit.html", action_label="Edit", lang_id=lang_id, name=lang.name, syntax_mode=lang.syntax_mode, is_enabled=lang.is_enabled, run_script=lang.run_script, default_template=lang.default_template)
def login_submit(): """processes login requests""" model = util.get_model() if "email" not in request.form: flash("Email not found", "danger") abort(401) if "password" not in request.form: flash("Password not found", "danger") abort(401) email = request.form.get("email") password = request.form.get("password") try: user = model.User.query.filter_by(email=email).one() except NoResultFound as e: flash("Invalid username or password", "danger") abort(401) is_matching = user.verify_password(password) if is_matching: flash("Login successful", "success") login_user(user) return redirect("/admin") else: flash("Invalid username or password", "danger") abort(401)
def clarifications_del(clar_id): """ Deletes a clarification Params: clar_id (int): the clarification to delete Returns: a redirect to the clarification view page """ model = util.get_model() clar = model.Clarification.query.filter_by(id=clar_id).scalar() if clar is None: error = "Failed to delete clarification \'{}\' as it doesn't exist.".format( clar_id) current_app.logger.info(error) flash(error, "danger") return redirect(url_for("clarifications.clarifications_view")) try: model.db.session.delete(clar) model.db.session.commit() flash("Deleted clarification \'{}\'".format(clar_id), "warning") except IntegrityError: model.db.session.rollback() error = "Failed to delete clarification \'{}\' as it's referenced in another DB element".format( clar_id) current_app.logger.info(error) flash(error, "danger") return redirect(url_for("clarifications.clarifications_view"))
def add_clar(): """ Adds or edits a clarification Note: must be called from within a request context Returns: a redirect to the clarification view page """ model = util.get_model() subject = request.form.get("subject") contents = request.form.get("contents") if subject is None: error = "Failed to add clarification due to undefined subject." current_app.logger.info(error) flash(error, "danger") return redirect(url_for("clarifications.clarifications_view")) if contents is None: error = "Failed to add clarification due to undefined contents." current_app.logger.info(error) flash(error, "danger") return redirect(url_for("clarifications.clarifications_view")) thread = str(uuid.uuid4()) is_public = True #This is a general clarification, which are always public clar = model.Clarification(current_user, subject, contents, thread, is_public) model.db.session.add(clar) model.db.session.commit() return redirect(url_for("clarifications.clarifications_view"))
def display_user_add_form(user_id): """ Displays the user add template Params: user_id (int): user_id Returns: a rendered user add/edit template """ model = util.get_model() if user_id is None: # add return render_template("users/add_edit.html", action_label="Add", user=None) else: # edit user = model.User.query.filter_by(id=user_id).scalar() if user is None: error = "Failed to edit user \'{}\' as user doesn't exist.".format( user_id) current_app.logger.info(error) flash(error, "danger") return redirect(url_for("users.users_view")) return render_template("users/add_edit.html", action_label="Edit", user=user)
def scoreboard(): model = util.get_model() defendants = model.User.query.filter(model.User.user_roles.any(id="defendant")).all() problems = model.Problem.query.all() contest = model.Contest.query.first() #TODO: replace with correct contest # compute scoreboard scores = collections.OrderedDict() for user in defendants: user_scores = collections.OrderedDict() for problem in problems: runs = model.Run.query.filter_by(is_submission=True, user=user, contest=contest, problem=problem).all() grid = [] for run in runs: if not run.is_judged: val = RunState.judging elif run.is_passed: val = RunState.passed else: val = RunState.failed grid.append(val) user_scores[problem.id] = grid scores[user.id] = user_scores return render_template("defendant/scoreboard.html", users=defendants, problems=problems, scores=scores, RunState=RunState)
def verify_password(email, password): """verifies a user's password""" model = util.get_model() user = model.User.query.filter_by(email=email).scalar() if not user or not user.verify_password(password): return False return True
def pred(indata, out): config = util.initialize_from_env() model = util.get_model(config) with tf.Session() as session: model.restore(session) with open(out, "w") as output_file: tensorized_example = model.tensorize_example(indata, is_training=False) feed_dict = { i: t for i, t in zip(model.input_tensors, tensorized_example) } _, _, _, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores = session.run( model.predictions, feed_dict=feed_dict) predicted_antecedents = model.get_predicted_antecedents( top_antecedents, top_antecedent_scores) indata["predicted_clusters"], _ = model.get_predicted_clusters( top_span_starts, top_span_ends, predicted_antecedents) indata["top_spans"] = list( zip((int(i) for i in top_span_starts), (int(i) for i in top_span_ends))) indata['head_scores'] = [] output_file.write(json.dumps(indata)) output_file.write("\n")
def submit_clarification(): """submit a clarification""" model = util.get_model() current_user_id = get_jwt_identity() user = model.User.query.filter_by(id=current_user_id).scalar() subject = request.json.get('subject', None) contents = request.json.get('contents', None) problem_slug = request.json.get('problem_slug', None) parent_id = request.json.get('parent_id', None) problem = model.Problem.query.filter_by(slug=problem_slug).scalar() #if problem == None: thread = "" if parent_id == None: thread = str(uuid.uuid4()) else: thread = model.Clarification.query.filter_by( id=parent_id).scalar().thread is_public = False #user submitted clarifications are always false clar = model.Clarification(current_user, subject, contents, thread, is_public) clar.problem = problem model.db.session.add(clar) model.db.session.commit() return "{}"
def display_problem_add_form(problem_id): """ Displays the problem add template Params: problem_id (int): problem_id Returns: a rendered problem add/edit template """ model = util.get_model() problemtypes = model.ProblemType.query.all() if problem_id is None: # add return render_template("problems/add_edit.html", action_label="Add", problem=None, problemtypes=problemtypes) else: # edit problem = model.Problem.query.filter_by(id=problem_id).scalar() if problem is None: error = "Failed to edit problem \'{}\' as problem doesn't exist.".format( problem_id) current_app.logger.info(error) flash(error, "danger") return redirect(url_for("problems.problems_view")) return render_template("problems/add_edit.html", action_label="Edit", problem=problem, problemtypes=problemtypes)
def display_contest_add_form(contest_id): """ Displays the contest add template Params: contest_id (int): contest_id Returns: a rendered contest add/edit template """ model = util.get_model() if contest_id is None: # add return render_template( "contests/add_edit.html", action_label="Add", contest=None, user_emails=[user.email for user in model.User.query.all()], problem_slugs=[a.slug for a in model.Problem.query.all()]) else: # edit contest = model.Contest.query.filter_by(id=contest_id).scalar() if contest is None: error = "Failed to edit contest \'{}\' as contest doesn't exist.".format( contest_id) current_app.logger.info(error) flash(error, "danger") return redirect(url_for("contests.contests_view")) return render_template( "contests/add_edit.html", action_label="Edit", contest=contest, user_emails=[user.email for user in model.User.query.all()], problem_slugs=[a.slug for a in model.Problem.query.all()])
def display_config_add_form(config_id): """ Displays the config add template Params: config_id (int): config_id Returns: a rendered config add/edit template """ model = util.get_model() if config_id is None: # add return render_template("configurations/add_edit.html", action_label="Add", config=None) else: # edit config = model.Configuration.query.filter_by(id=config_id).scalar() if config is None: error = "Failed to edit config \'{}\' as config doesn't exist.".format(config_id) current_app.logger.info(error) flash(error, "danger") return redirect(url_for("configurations.configurations_view")) return render_template("configurations/add_edit.html", action_label="Edit", config=config)
def train(): train_dir, validation_dir = get_train_val_dirs() train_dataset = get_dataset(train_dir) val_dataset = get_dataset(validation_dir) class_names = np.array(sorted([item.name for item in train_dir.glob('*') if item.name != "LICENSE.txt"])) util.class_names = class_names train_dataset = train_dataset.map(process_path, num_parallel_calls=AUTOTUNE) val_dataset = val_dataset.map(process_path, num_parallel_calls=AUTOTUNE) train_dataset = configure_for_performance(train_dataset) val_dataset = configure_for_performance(val_dataset) print(train_dataset) model = util.get_model() base_learning_rate = 0.0001 model.compile(optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate), loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy']) initial_epochs = 10 history = model.fit(train_dataset, epochs=initial_epochs, validation_data=val_dataset) model.save('./tlmodel') tar = tarfile.open("tlmodel.tar.gz", "w:gz") tar.add("./tlmodel", arcname="tlmodel") tar.close() shutil.move("tlmodel.tar.gz", model_artifacts_path)
def runs_view(page): """ The runs view page Returns: a rendered runs view template """ model = util.get_model() run_type = request.args.get("run_type") run_status = request.args.get("run_status") num_pending = model.Run.query.filter_by(finished_execing_time=None).count() if run_type == "submissions": run_query = model.Run.query.filter_by(is_submission=True) elif run_type == "tests": run_query = model.Run.query.filter_by(is_submission=False) else: run_query = model.Run.query if run_status == "judged": run_query = run_query.filter(model.Run.finished_execing_time != None) elif run_status == "pending": run_query = run_query.filter_by(finished_execing_time=None) runs = run_query.order_by(model.Run.submit_time.desc()).paginate(page, 30) return render_template("runs/view.html", runs=runs, run_type=run_type, run_status=run_status, num_pending=num_pending)
def main(cfg: DictConfig, train_id: str) -> None: cwd = Path.cwd() myutil.print_config(cfg) # Setting seed myutil.set_random_seed(0) model_file_name = "{}_best.pth".format(cfg.model.name) # Checking history directory history_dir = cwd / 'history' / train_id if (history_dir / model_file_name).exists(): pass else: return # Setting result directory # All outputs will be written into (p / 'result' / train_id). if not (cwd / 'result').exists(): (cwd / 'result').mkdir(parents=True) result_dir = cwd / 'result' / train_id if result_dir.exists(): # removing files in result_dir? pass else: result_dir.mkdir(parents=True, exist_ok=True) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Testing testset = get_dataset(cfg) net = myutil.get_model(cfg) net.module.load_state_dict(torch.load(str(history_dir / model_file_name))) predict(result_dir, testset, net, device)
def problems_del(problem_id): """ Deletes a problem Params: problem_id (int): the problem to delete Returns: a redirect to the problem view page """ model = util.get_model() problem = model.Problem.query.filter_by(id=problem_id).scalar() if problem is None: error = "Failed to delete problem \'{}\' as it doesn't exist.".format( problem.slug) current_app.logger.info(error) flash(error, "danger") return redirect(url_for("problems.problems_view")) try: model.db.session.delete(problem) model.db.session.commit() flash("Deleted problem \'{}\'".format(problem.slug), "warning") except IntegrityError: model.db.session.rollback() error = "Failed to delete problem \'{}\' as it's referenced in another DB element".format( problem.slug) current_app.logger.info(error) flash(error, "danger") return redirect(url_for("problems.problems_view"))
def get_problem(slug): """get a specified problem""" model = util.get_model() problem = model.Problem.query.filter_by(slug=slug).scalar() return make_response(jsonify(problem.get_output_dict()), 200)
def train(steps=28, epochs=1, unet=0): model = get_model(IMAGE_H, IMAGE_W, INPUT_CHANNELS, SAVED_MODEL, unet) callbacks = [ ProgbarLogger(count_mode='steps', stateful_metrics=None), ModelCheckpoint('rgb_weights.h5', monitor='val_loss', save_best_only=False, verbose=0), util.MemLeakCallback() ] print("Training network") history = model.fit_generator( #Do training from generator generator=variation_gen(batch_size), steps_per_epoch= steps, #Number of samples obtained using batch_gen or variation_gen (28 for whole dataset) validation_data=batch_generator(batch_size), epochs=epochs, #Number of passes over whole dataset validation_steps=1, verbose=1, shuffle=False, callbacks=callbacks) model.save("./rgb.h5") return
def update_user_metadata(): """update user metadata""" model = util.get_model() current_user_id = get_jwt_identity() current_user = model.User.query.filter_by(id=current_user_id).scalar() if ("judge" not in current_user.user_roles and "operator" not in current_user.user_roles): return make_response(jsonify({'error': 'Unauthorized access'}), 401) user_email = request.json.get('user_email') user_misc_metadata = request.json.get('misc_metadata') if not all([user_email, user_misc_metadata]): return make_response( jsonify({'error': 'Invalid request, missing field'}), 400) if not isinstance(user_misc_metadata, dict): return make_response( jsonify({'error': 'Invalid request, misc_metadata must be a dict'}), 400) matching_user = model.User.query.filter_by(email=user_email).scalar() if not matching_user: return make_response( jsonify({'error': "Invalid request, Couldn't find user"}), 400) matching_user.merge_metadata(user_misc_metadata) model.db.session.commit() return make_response(jsonify({'status': 'Success'}), 200)
def get_scoreboard(): """get the scoreboard""" model = util.get_model() current_user_id = get_jwt_identity() current_user = model.User.query.filter_by(id=current_user_id).scalar() if len(current_user.contests) == 0: return make_response(jsonify({'error': 'User has no contests'}), 400) contest = current_user.contests[0] defendants = model.User.query\ .filter(model.User.user_roles.any(id="defendant"))\ .filter(model.User.contests.any(id=contest.id))\ .all() problems = model.Problem.query\ .filter(model.Problem.contests.any(id=contest.id))\ .filter(model.Problem.is_enabled)\ .all() user_points = [] for user in defendants: problem_states = {} penalty = 0 for problem in problems: is_passed = 0 < len( model.Run.query.filter_by(is_submission=True, is_passed=True, user=user, contest=contest, problem=problem).all()) problem_states[problem.slug] = is_passed failed_subs = model.Run.query.filter_by(is_submission=True, is_passed=False, user=user, contest=contest, problem=problem).all() for sub in failed_subs: penalty += 1 # TODO we may want to use the time submitted instead of 1 # to match ICPC scoring user_points.append({ "user": user.get_output_dict(), "num_solved": len([x for x in problem_states.values() if x]), "penalty": penalty, "problem_states": problem_states }) user_points.sort(key=lambda x: (x["num_solved"], -x["penalty"]), reverse=True) return make_response(jsonify(user_points))
def load_best_model(cnn: bool, in_shape: Tuple[int, ...]) \ -> tf.keras.Model: """Load best BiLSTM or CNN-BiLSTM model.""" model = get_model(cnn, in_shape, len(ACCENTS)) model.build(in_shape) weight_file = path.join(ARTIFACT_DIR, f'{model.name}_loss.hdf5') model.load_weights(weight_file) return model
def get_languages(): """get all languages""" model = util.get_model() langs = model.Language.query.all() filter_langs = [x.get_output_dict() for x in langs if x.is_enabled] return make_response(jsonify(filter_langs), 200)
def submissions(): model = util.get_model() submissions = model.Run.query.filter_by(user=current_user, is_submission=True)\ .order_by(model.Run.submit_time.desc())\ .all() return render_template("defendant/submissions.html", submissions=submissions)
def __init__(self, dataset, center_loader=None, param=None, use_gpu=False): self._dataset = dataset self._center_loader = center_loader init_proto_name, init_model_name = util.get_model(dataset, 'baseline') proto_name, model_name = util.get_model(dataset, 'pose_ren') self._fx, self._fy, self._ux, self._uy = util.get_param(dataset) if param is None else param self._net = caffe.Net(proto_name, caffe.TEST, weights=model_name) self._net_init = caffe.Net(init_proto_name, caffe.TEST, weights=init_model_name) self._input_size = self._net.blobs['data'].shape[-1] self._cube_size = 150 if use_gpu: caffe.set_mode_gpu() caffe.set_device(0) else: caffe.set_mode_cpu()
def __init__(self, dataset, model, center_loader=None, param=None): self._dataset = dataset self._center_loader = center_loader proto_name, model_name = util.get_model(dataset, model) self._fx, self._fy, self._ux, self._uy = util.get_param(dataset) if param is None else param self._net = caffe.Net(proto_name, caffe.TEST, weights=model_name) self._input_size = self._net.blobs['data'].shape[-1] self._cube_size = 150
def getAllCkptMods(): thresholds = range(200, 2000, 200) models = [] for threshold in thresholds: mod = util.get_model(dshape, checkpoint=threshold, name="res") models.append(mod) # print(mod.symbol.debug_str()) return models
def main(args): assert args.dataset in ['mnist', 'cifar', 'svhn'], \ "Dataset parameter must be either 'mnist', 'cifar' or 'svhn'" assert args.attack in ['fgsm', 'bim-a', 'bim-b', 'jsma', 'cw-l2', 'all', 'cw-lid'], \ "Attack parameter must be either 'fgsm', 'bim-a', 'bim-b', " \ "'jsma', 'cw-l2', 'all' or 'cw-lid' for attacking LID detector" model_file = os.path.join(PATH_DATA, "model_%s.h5" % args.dataset) assert os.path.isfile(model_file), \ 'model file not found... must first train model using train_model.py.' if args.dataset == 'svhn' and args.attack == 'cw-l2': assert args.batch_size == 16, \ "svhn has 26032 test images, the batch_size for cw-l2 attack should be 16, " \ "otherwise, there will be error at the last batch-- needs to be fixed." print('Dataset: %s. Attack: %s' % (args.dataset, args.attack)) # Create TF session, set it as Keras backend if args.attack == 'cw-l2' or args.attack == 'cw-lid': config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.4 sess = tf.Session(config=config) K.set_session(sess) warnings.warn("Important: remove the softmax layer for cw attacks!") # use softmax=False to load without softmax layer model = get_model(args.dataset, softmax=False) model.compile(loss=cross_entropy, optimizer='adadelta', metrics=['accuracy']) model.load_weights(model_file) else: sess = tf.Session() K.set_session(sess) model = load_model(model_file) _, _, X_test, Y_test = get_data(args.dataset) _, acc = model.evaluate(X_test, Y_test, batch_size=args.batch_size, verbose=0) print("Accuracy on the test set: %0.2f%%" % (100 * acc)) if args.attack == 'cw-lid': # white box attacking LID detector - an example X_test = X_test[:1000] Y_test = Y_test[:1000] if args.attack == 'all': # Cycle through all attacks for attack in ['fgsm', 'bim-a', 'bim-b', 'jsma', 'cw-l2']: craft_one_type(sess, model, X_test, Y_test, args.dataset, attack, args.batch_size) else: # Craft one specific attack type craft_one_type(sess, model, X_test, Y_test, args.dataset, args.attack, args.batch_size) print('Adversarial samples crafted and saved to %s ' % PATH_DATA) sess.close()