Ejemplo n.º 1
0
	def Run(self):
		try:
			((html)) = ((Net.Conn().Httplib("pgp.mit.edu","GET","/pks/lookup?search="+str(self.UrlCheck(self.Keyword))+"&op=index","pgp.mit.edu","")))
			if html:
				((self.Results)) += ((html)) 
		except Exception,err:
			pass
Ejemplo n.º 2
0
 def Run(self):
     try:
         ((html)) = ((Net.Conn().Httplib(
             "www.bing.com", "GET",
             "/search?q=%40" + str(self.UrlCheck(self.Keyword)),
             "www.bing.com", "SRCHHPGUSR=ADLT=DEMOTE&NRSLT=50")))
         if html:
             ((self.Results)) += ((html))
     except Exception, err:
         pass
Ejemplo n.º 3
0
 def Run(self):
     try:
         ((url)) = ((
             "http://www.google.com/search?num=500&start=50&hl=en&meta=&q=%40\""
             + self.UrlCheck(self.Keyword) + "\""))
         ((html)) = ((Net.Conn().Requests(url)))
         if html:
             ((self.Results)) += ((html))
     except Exception, err:
         pass
Ejemplo n.º 4
0
 def Run(self):
     try:
         ((html)) = ((Net.Conn().Httplib(
             "search.yahoo.com", "GET", "/search?p=\"%40" +
             self.UrlCheck(self.Keyword) + "\"&b=500&pz=10",
             "search.yahoo.com", "")))
         if html:
             ((self.Results)) += ((html))
     except Exception, err:
         pass
def worker_func(env_name, worker_id, params_queue, rewards_queue, device,
                noise_std, nhid):

    env = gym.make(env_name)
    net = Net(env.observation_space.shape[0], env.action_space.shape[0],
              nhid).to(device)
    net.eval()

    while True:
        params = params_queue.get()
        if params is None:
            break
        net.load_state_dict(params)

        for _ in range(ITERS_PER_UPDATE):
            seed = np.random.randint(low=0, high=65535)
            np.random.seed(seed)
            noise, neg_noise = sample_noise(net, device=device)
            pos_reward, pos_steps = eval_with_noise(env,
                                                    net,
                                                    noise,
                                                    noise_std,
                                                    device=device)
            neg_reward, neg_steps = eval_with_noise(env,
                                                    net,
                                                    neg_noise,
                                                    noise_std,
                                                    device=device)
            rewards_queue.put(
                RewardsItem(seed=seed,
                            pos_reward=pos_reward,
                            neg_reward=neg_reward,
                            steps=pos_steps + neg_steps))
Ejemplo n.º 6
0
 def Run(self):
     try:
         ((url)) = ((
             "http://searchdns.netcraft.com/?restriction=site+contains&host=%s&lookup=wait..&position=limited"
             % (self.target)))
         ((html)) = ((Net.Conn().Urllib2(url, None, self.headers)))
         if html:
             ((reg)) = ((re.findall('url=\S+"', html, re.I)))
             print ""
             ((Printer.MyPrinter().nprint("Searching \"" + (self.target) +
                                          "\" Websites Correlation...")))
             if reg:
                 ((Printer.MyPrinter().nprint("Found %s sites " %
                                              (len(reg)))))
                 print ""
                 for x in range(len(reg)):
                     ((host)) = ((reg[x].split('"')[0]))
                     print((" - %s" % (host.split("url=")[1])))
                 print ""
             else:
                 ((Printer.MyPrinter().iprint("Not found sites")))
     except Exception, err:
         pass
Ejemplo n.º 7
0
def main():
    df = pd.read_csv(config["train"]["data_path"])
    y = np.array(df[config["train"]["label_column"]])

    df = preprocess(df)

    label_nbr = len(df[config["train"]["label_column"]].unique())
    label_names = config["train"]["label"]

    y = np.array(df[config["train"]["label_column"]])
    df = df.drop([config["train"]["label_column"]] +
                 config['train']['to_drop'],
                 axis=1)
    X = np.array(df)

    print(X.shape, y.shape)

    try:
        device = torch.device(config["train"]["device"])
    except:
        device = torch.device("cpu")

    classifier = Net(input_dim=df.shape[1],
                     hidden_dim=config["train"]["hidden_dim"]).to(device)
    criterion = torch.nn.functional.mse_loss

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=.25,
                                                        random_state=42)

    X_train, y_train = torch.tensor(X_train).float(), torch.tensor(
        y_train).float()
    X_test, y_test = torch.tensor(X_test).float(), torch.tensor(y_test).float()

    # create dataloader with specified batch_size
    ds_train = torch.utils.data.TensorDataset(X_train, y_train)
    dataloader_train = torch.utils.data.DataLoader(
        ds_train, batch_size=config["train"]["batch_size"], shuffle=True)

    ds_test = torch.utils.data.TensorDataset(X_test, y_test)
    dataloader_test = torch.utils.data.DataLoader(
        ds_test, batch_size=config["train"]["batch_size"], shuffle=True)

    trainer = Trainer(classifier, device=device, criterion=criterion)
    trainer.train(dataloader_train,
                  dataloader_test,
                  config["train"]["epochs"],
                  config["train"]["log_every"],
                  task="regression")

    # eval step

    metrics = {}
    metrics["mse"] = trainer.metric
    mlflow.log_params(metrics)

    mlflow.pytorch.log_model(
        pytorch_model=classifier,
        artifact_path="model",
        registered_model_name=config["mlflow"]["model_name"])

    api_request_model = get_request_features(df)
    with open("request_model.json", "w") as rmodel:
        json.dump(api_request_model, rmodel, indent=4)

    # checking if there are any productions models,
    # so we can put at least one in production

    model_name = config['mlflow']['model_name']

    try:
        mlflow.pytorch.load_model(f"models:/{model_name}/Production")
    except:
        client = MlflowClient()
        version = client.search_model_versions(
            f"name='{model_name}'")[0].version

        client.transition_model_version_stage(name=model_name,
                                              version=version,
                                              stage="Production")
                        action='store_true',
                        help="Enable CUDA mode")
    parser.add_argument("--lr", type=float, default=LEARNING_RATE)
    parser.add_argument("--noise-std", type=float, default=NOISE_STD)
    parser.add_argument("--iters", type=int, default=MAX_ITERS)

    args = parser.parse_args()

    device = "cuda" if args.cuda else "cpu"

    writer = SummaryWriter(comment="%s-es_lr=%.3e_sigma=%.3e" %
                           (args.env, args.lr, args.noise_std))

    env = gym.make(args.env)

    net = Net(env.observation_space.shape[0], env.action_space.shape[0],
              args.hid)
    print(net)

    params_queues = [mp.Queue(maxsize=1) for _ in range(PROCESSES_COUNT)]
    rewards_queue = mp.Queue(maxsize=ITERS_PER_UPDATE)
    workers = []

    for idx, params_queue in enumerate(params_queues):
        p_args = (args.env, idx, params_queue, rewards_queue, device,
                  args.noise_std, args.hid)
        proc = mp.Process(target=worker_func, args=p_args)
        proc.start()
        workers.append(proc)

    print("All started!")
def build_net(env, seeds, nhid, noise_std):
    torch.manual_seed(seeds[0])
    net = Net(env.observation_space.shape[0], env.action_space.shape[0], nhid)
    for seed in seeds[1:]:
        net = mutate_net(net, seed, noise_std, copy_net=False)
    return net
Ejemplo n.º 10
0
def main():
    df = pd.read_csv(config["train"]["data_path"])
    y = np.array(df[config["train"]["labels_column"]])

    df = preprocess(df)

    label_nbr = len(df[config["train"]["labels_column"]].unique())
    label_names = config["train"]["labels"]

    y = np.array(df[config["train"]["labels_column"]])
    df = df.drop([config["train"]["labels_column"]] +
                 config['train']['to_drop'],
                 axis=1)
    X = np.array(df)

    print(X.shape, y.shape)

    try:
        device = torch.device(config["train"]["device"])
    except:
        device = torch.device("cpu")

    classifier = Net(input_dim=df.shape[1],
                     output_dim=label_nbr,
                     hidden_dim=config["train"]["hidden_dim"]).to(device)
    criterion = torch.nn.CrossEntropyLoss()

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=.25,
                                                        random_state=42)

    X_train, y_train = torch.tensor(X_train).float(), torch.tensor(
        y_train).float()
    X_test, y_test = torch.tensor(X_test).float(), torch.tensor(y_test).float()

    # create dataloader with specified batch_size
    ds_train = torch.utils.data.TensorDataset(X_train, y_train)
    dataloader_train = torch.utils.data.DataLoader(
        ds_train, batch_size=config["train"]["batch_size"], shuffle=True)

    ds_test = torch.utils.data.TensorDataset(X_test, y_test)
    dataloader_test = torch.utils.data.DataLoader(
        ds_test, batch_size=config["train"]["batch_size"], shuffle=True)

    trainer = Trainer(classifier, device=device, criterion=criterion)
    trainer.train(dataloader_train, dataloader_test, config["train"]["epochs"],
                  config["train"]["log_every"])

    # eval step

    y_true, y_pred, scores = get_preds_labels_scores(dataloader_test,
                                                     classifier, device)

    metrics = eval_model_per_class(y_true, y_pred, label_names)
    metrics["accuracy"] = trainer.metric / 100
    mlflow.log_params(metrics)

    mlflow.pytorch.log_model(
        pytorch_model=classifier,
        artifact_path="model",
        registered_model_name=config["mlflow"]["model_name"])

    conf_matrix_fname = save_confusion_matrix(y_true, y_pred, label_names)
    mlflow.log_artifact(conf_matrix_fname)
    os.remove(conf_matrix_fname)

    roc_curve_fname = save_roc_curve(y_true, scores, label_names)
    mlflow.log_artifact(roc_curve_fname)
    os.remove(roc_curve_fname)

    pr_curve_fname = save_pr_curve(y_true, scores, label_names)
    mlflow.log_artifact(pr_curve_fname)
    os.remove(pr_curve_fname)

    eval_fnames = eval_classification_model_predictions_per_feature(
        config["train"]["data_path"],
        classifier,
        config['train']['labels_column'],
        config['train']['labels'],
        config['train']['to_drop'],
        use_torch=True,
        device=device,
        preprocess=preprocess)
    for eval_fname in eval_fnames:
        mlflow.log_artifact(eval_fname)
        os.remove(eval_fname)

    api_request_model = get_request_features(df)
    with open("request_model.json", "w") as rmodel:
        json.dump(api_request_model, rmodel, indent=4)

    # checking if there are any productions models,
    # so we can put at least one in production

    model_name = config['mlflow']['model_name']

    try:
        mlflow.pytorch.load_model(f"models:/{model_name}/Production")
    except:
        client = MlflowClient()
        version = client.search_model_versions(
            f"name='{model_name}'")[0].version

        client.transition_model_version_stage(name=model_name,
                                              version=version,
                                              stage="Production")