예제 #1
0
def verify_exploit(exploit_dir,
                   service_dir,
                   branch,
                   timeout,
                   config,
                   encrypt=False,
                   log=None):
    if not os.path.isdir(exploit_dir):
        print("[*] Exploit directory '%s' does not exist" % exploit_dir)
        return False, log

    if not os.path.isdir(service_dir):
        print("[*] Service directory '%s' does not exist" % service_dir)
        return False, log

    # Create random flag value
    flag = random_string(10)

    # Start the service
    service_dirname = get_dirname(service_dir)
    service_container_name = "%s-%s" % (service_dirname,
                                        branch.replace('/', '_'))
    result, log = start_service(service_dir, branch, service_container_name, \
            flag, log=log)
    if not result:
        return False, log

    time.sleep(2)

    # Run the exploit
    exploit_dirname = get_dirname(exploit_dir)
    exploit_container_name = "exploit-%s" % branch.replace('/', '_')
    exploit_result, log = run_exploit(exploit_dir, exploit_container_name, \
            timeout, log=log)

    # Clean up containers
    docker_cleanup(service_container_name)
    docker_cleanup(exploit_container_name)

    log = print_and_log("[*] Exploit returned : %s" % exploit_result, log)
    log = print_and_log("[*] Solution flag : %s" % flag, log)
    if exploit_result == flag:
        print("[*] Exploit worked successfully")
        if encrypt:
            print("[*] Encrypting the verified exploit")
            # Set your own team as target team, and signer is not needed.
            target_team = config["player_team"]
            encrypted_file = encrypt_exploit(exploit_dir, target_team, config)
            if encrypted_file is None:
                print("[*] Failed to encrypt exploit")
            else:
                print("[*] Your exploit is encrypted in %s" % encrypted_file)
                print("[*] Now you may commit and push this encrypted exploit "\
                      "to the corresponding branch of your service repository")
        return True, log
    else:
        log = print_and_log("[*] Exploit returned a wrong flag string", log)
        return False, log
예제 #2
0
def read_data(path):
    """returns path X,Y of data where X[i] is an image and Y[i] is its label"""
    data = {}
    val = {"no": -1, "undetermined": 0, "yes": 1}
    print("started reading data")
    start_time = time.time()
    labels = []
    for (dirpath, dirnames, filenames) in os.walk(path):
        labels.extend(dirnames)
        break
    label_val = assign_vals(labels)
    for (dirpath, dirnames, filenames) in os.walk(path):
        label = utils.get_dirname(dirpath)
        if label not in label_val:
            continue
        data[label] = filenames
    X = []
    Y = []
    for label in data:
        for img_name in data[label]:
            path_to_img = utils.join_list([path, label, img_name])
            X.append(cv2.imread(path_to_img, 0))
            Y.append(label_val[label])
    elapsed_time = time.time() - start_time
    minutes = elapsed_time / 60
    seconds = elapsed_time % 60
    print("finished reading data in %d min and %d seconds" %
          (minutes, seconds))
    X = np.array(X)
    Y = np.array(Y)
    return X, Y
예제 #3
0
파일: plouf.py 프로젝트: Mazrog/plouf
def init():
    """
    Initialize plouf project for this repository, creating plouffile.
    """

    if valid_repo():
        click.confirm('A \"%s\" has been found in this repository, override it?' % plouf_files["config"], abort=True, prompt_suffix='')

    data = {
        'name': click.prompt('project name', default=utils.get_dirname()),
        'description': click.prompt('description', default=''),
        'author': click.prompt('author', default=''),
        'version': click.prompt('version', default='0.1.0')
    }

    click.echo(json.dumps(data, indent=2))
    click.confirm('Is this ok?', default=True, abort=True)
    
    try:
        with open(get_pf_path(), 'w') as pf:
            json.dump(data, pf, indent=4)
        
        utils.success('Initialized empty plouf repository.')

    except Exception as e:
        click.echo(
            click.style(e, fg="red"),
            err=True
        )

    pass
예제 #4
0
 def make_folder(cls, ref):
     if cls.exists(ref):
         if cls.is_folder(ref):
             return
         raise OSError("[Errno 20] Not a directory: '%s'" % ref)
     parent = utils.get_dirname(ref)
     if not cls.exists(parent):
         raise OSError("[Errno 2] No such file or directory: '%s'" % parent)
     if not cls.is_folder(parent):
         raise OSError("[Errno 20] Not a directory: '%s'" % parent)
     client = cls._get_client(ref)
     path = str(ref.path)
     if cls.debug: dprint(path)
     client.mkdir(path)
예제 #5
0
파일: webdav.py 프로젝트: zendbit/peppy
 def make_folder(cls, ref):
     if cls.exists(ref):
         if cls.is_folder(ref):
             return
         raise OSError("[Errno 20] Not a directory: '%s'" % ref)
     parent = utils.get_dirname(ref)
     if not cls.is_folder(parent):
         raise OSError("[Errno 20] Not a directory: '%s'" % parent)
     ref, client = cls._get_client(ref)
     path = str(ref.path)
     if cls.debug: dprint(path)
     responses = client.mkcol(path)
     # It's also possible (but not required) the parent could be cached, so
     # clean out its cache as well
     if cls.debug: dprint(parent)
     cls._purge_cache(parent)
예제 #6
0
파일: webdav.py 프로젝트: zendbit/peppy
    def make_file(cls, ref):
        folder_path = utils.get_dirname(ref)
        file_path = utils.get_filename(ref)

        dest_exists = cls.exists(folder_path)
        if dest_exists:
            dest_exists = cls.exists(ref)
            if dest_exists:
                raise OSError("[Errno 17] File exists: '%s'" % ref)
            elif not cls.is_folder(folder_path):
                raise OSError("[Errno 20] Not a directory: '%s'" % folder_path)
        else:
            cls.make_folder(folder_path)

        fh = cls.temp_file_class(ref, cls._save_file)
        return fh
예제 #7
0
파일: webdav.py 프로젝트: robmcmullen/peppy
    def make_file(cls, ref):
        folder_path = utils.get_dirname(ref)
        file_path = utils.get_filename(ref)

        dest_exists = cls.exists(folder_path)
        if dest_exists:
            dest_exists = cls.exists(ref)
            if dest_exists:
                raise OSError("[Errno 17] File exists: '%s'" % ref)
            elif not cls.is_folder(folder_path):
                raise OSError("[Errno 20] Not a directory: '%s'" % folder_path)
        else:
            cls.make_folder(folder_path)

        fh = cls.temp_file_class(ref, cls._save_file)
        return fh
예제 #8
0
    def make_file(cls, ref):
        folder_path = utils.get_dirname(ref)
        file_path = utils.get_filename(ref)

        dest_exists = cls.exists(folder_path)
        if dest_exists:
            dest_exists = cls.exists(ref)
            if dest_exists:
                raise OSError("[Errno 17] File exists: '%s'" % ref)
            elif not cls.is_folder(folder_path):
                raise OSError("[Errno 20] Not a directory: '%s'" % folder_path)
        else:
            try:
                cls.make_folder(folder_path)
            except IOError, e:
                raise OSError(e)
예제 #9
0
파일: webdav.py 프로젝트: robmcmullen/peppy
 def make_folder(cls, ref):
     if cls.exists(ref):
         if cls.is_folder(ref):
             return
         raise OSError("[Errno 20] Not a directory: '%s'" % ref)
     parent = utils.get_dirname(ref)
     if not cls.is_folder(parent):
         raise OSError("[Errno 20] Not a directory: '%s'" % parent)
     ref, client = cls._get_client(ref)
     path = str(ref.path)
     if cls.debug:
         dprint(path)
     responses = client.mkcol(path)
     # It's also possible (but not required) the parent could be cached, so
     # clean out its cache as well
     if cls.debug:
         dprint(parent)
     cls._purge_cache(parent)
def main():
    args = docopt(__doc__)

    embedding_dim = int(args['--dim'])
    max_context = int(args['--max-context'])
    neg_sample_factor = int(args['--neg-sample-factor'])

    batch_size = int(args['--batch'])
    lr = float(args['--lr'])
    epochs = int(args['--epochs'])

    np.random.seed(int(args['--seed']))
    torch.manual_seed(int(args['--seed']))
    torch.cuda.manual_seed_all(int(args['--seed']))
    device = torch.device(int(args['--device']))
    print(f"{device} will be used")
    num_workers = int(args['--num-workers'])
    fpath = args['--file']
    backup_interval = int(args['--backup-interval'])
    dname = args['--dirname']

    dset = FixedLengthContextDataset(fpath, max_context, neg_sample_factor)
    vocabulary_size = dset.num_authors

    # Symmetric vectors are used to compute cosine similarity
    if args['symmetric']:
        model = SymmetricEmbedding(vocabulary_size, embedding_dim)
    # Word2Vec Skip-gram. Unsymmetric vectors are used to compute cosine similarity
    elif args['skipgram']:
        model = SkipGram(vocabulary_size, embedding_dim)

    if dname == None:
        tmp = 'symmetric' if args['symmetric'] else 'skipgram'
        dname = get_dirname(f'embedding_{tmp}')
    else:
        os.makedirs(dname)

    if torch.cuda.is_available():
        model = model.to(device)
    loader = DataLoader(dset, batch_size, num_workers=num_workers)
    train(model, loader, dname, epochs, lr, backup_interval, device)
def read_data(path):
    """returns path X,Y of data where X[i] is an image and Y[i] is its label"""
    data = {}
    print("started reading data")
    start_time = time.time()
    labels = []
    for (dirpath, dirnames, filenames) in walk(path):
        labels.extend(dirnames)
        break
    label_val = assign_vals(labels)
    cnt_all = 0
    for (dirpath, dirnames, filenames) in walk(path):
        label = utils.get_dirname(dirpath)
        if label not in label_val: continue
        data[label] = filenames
        cnt_all += len(filenames)
    X = []
    Y = []
    cnt = lst = 0
    for label in data:
        for img_name in data[label]:
            path_to_img = utils.join_list([path, label, img_name])
            X.append(cv2.imread(path_to_img, 0))
            Y.append(label_val[label])
            cnt += 1
            p = int(cnt * 100.0 / cnt_all)
            if p == lst + 5:
                print('%d%% done' % p, cnt)
                lst = p

    elapsed_time = time.time() - start_time
    minutes = elapsed_time / 60
    seconds = elapsed_time % 60
    print("finished reading data in %d min and %d seconds" %
          (minutes, seconds))
    #X = np.array(X)
    Y = np.array(Y)
    return X, Y
예제 #12
0
    def build_tree(self):

        """

        Build tree using indentation level.

        Indentation indicates a change in hierarchy level.
        current line ending in '/' or not indicates regular file or not.

        The question in this loop: where to put this new line?, 
        which is asking "who is the parent of new line?"

        """ 

        parent_node = self.virtual_root
        prev_indent = -1

        for line in self.input:

            cur_indent = utils.get_indent_count(line, self.indent_size)
            distance = cur_indent - prev_indent
            # who is the parent?
            parent_node = self._find_new_parent(parent_node, distance)
            filename = (utils.get_dirname(line)
                        if utils.is_dir(line)
                        else utils.get_filename(line))

            child = dict(
                parent = parent_node, 
                children = [] if utils.is_dir(line) else None,
                data = { 
                    'filename': filename, 
                    'basedir': os.path.join(parent_node['data']['basedir'], filename) 
                },  
            ) 

            parent_node['children'].append(child)
            prev_indent = cur_indent
def main():
    args = docopt(__doc__)
    train_embedding = not args['--no-train-embedding']
    handle_foreign = args['--handle-foreign']
    enable_all_pools = args['--enable-all-pools']

    np.random.seed(int(args['--seed']))
    torch.manual_seed(int(args['--seed']))
    torch.cuda.manual_seed_all(int(args['--seed']))

    hidden = int(args['--hidden'])
    dropout = float(args['--dropout'])
    batch_size    = int(args['--batch'])
    lr     = float(args['--lr'])
    emb_lr     = float(args['--emb-lr'])
    weight_decay = float(args['--weight-decay'])
    epochs = int(args['--epochs'])
    device = torch.device(int(args['--device']))
    print(f"{device} will be used")
    ratio  = float(args['--ratio'])
    threshold = float(args['--threshold'])
    dname = args['--dirname']

    train_dset = QueryDataset(split='train', ratio=ratio,
                              equally_handle_foreign_authors=handle_foreign,
                              use_paper_author=args['--use-paper-author'],
                              oversample_false_collabs=args['--oversample-false-collabs'])
    valid_dset = QueryDataset(split='valid', ratio=ratio,
                              equally_handle_foreign_authors=handle_foreign)
    train_loader = DataLoader(train_dset, batch_size=1, num_workers=1, shuffle=True)
    valid_loader = DataLoader(valid_dset, batch_size=1, num_workers=1, shuffle=False)

    embedding_mode, embedding = load_embedding(
        args['--embedding'], train_embedding, device)
    classifier = Classifier(embedding, hidden, dropout, args['--deepset'],
                            equally_handle_foreign_authors=handle_foreign,
                            enable_all_pools=enable_all_pools)

    if torch.cuda.is_available():
        classifier.to(device)

    emb_params = set(embedding.parameters())
    cls_params = set(classifier.parameters()).difference(emb_params)

    optimizer1 = optim.SparseAdam(emb_params, lr=emb_lr)
    optimizer2 = optim.Adam(cls_params, lr=lr, weight_decay=weight_decay)

    train_embedding = 'on' if train_embedding else 'off'
    if dname == 'None':
        mode = f'{classifier.savename}_emb-{embedding_mode}'\
               f'_trainemb-{train_embedding}'
        dname = get_dirname(mode)
    else:
        os.makedirs(dname, exist_ok=True)
    path = os.path.join(dname, 'log.txt')
    with open(path, 'a') as f:
        f.write(repr(args) + '\n')
    backup_path = os.path.join(dname, 'classifier.pth')

    # TODO: Add checkpoint training feature

    pbar = tqdm(total=epochs, initial=0,
                bar_format="{desc:<5}{percentage:3.0f}%|{bar:10}{r_bar}")
    best_acc = 0
    for epoch in range(epochs):
        avg_loss, train_acc, val_acc, precision, recall, best_model = train_classifier(
            train_loader, valid_loader, classifier,
            [optimizer1, optimizer2], device, epoch, batch_size, dname, threshold)
        if val_acc > best_acc:
            torch.save(best_model.state_dict(), backup_path)
            best_acc = val_acc
        pbar.set_description(
            f'Train Loss: {avg_loss:.6f}, Train Acc:{train_acc:.2f} Valid Acc: {val_acc:.2f}% Precision: {precision:.2f} Recall: {recall:.2f}')
        pbar.update(1)
예제 #14
0
    # Build all train dataset
    merged = utils.merge_dataframes(final_dfs)
    # Sort by time
    merged = utils.sort_by_month(merged, month_col='MONTH', drop=False)
    utils.drop_inf(merged)
    # Normalize dataset
    print "NaNs before normalizing dataset", merged.isnull().sum().sum()
    merged_scaled, scaler = utils.normalize_dataset(merged[day_month_cols])
    merged[day_month_cols] = merged_scaled
    print "NaNs after normalizing dataset", merged.isnull().sum().sum()

    if train_size == 1.0:
        train, test = merged, None
    else:
        bound = int(train_size * merged.shape[0])
        train, test = merged[:bound], merged[bound:]

    dirname = utils.get_dirname(namespace.output)
    out_fname = utils.get_fname(namespace.output).split('.')[0]
    train_fname = utils.get_path(out_fname + '_train.csv', dirname)
    test_fname = utils.get_path(out_fname + '_test.csv', dirname)
    scaler_fname = utils.get_path(out_fname + '_scaler', dirname)
    print "Writing train to %s" % train_fname
    train.to_csv(train_fname, index=False)
    if test is not None:
        print "Writing test to %s" % test_fname
        test.to_csv(test_fname, index=False)

    print "Dumping scaler to %s" % scaler_fname
    utils.dump_scaler(scaler, scaler_fname)
    print "Done."