Exemplo n.º 1
0
def main():

    args = argparse()
    model = args.model
    output_basename = args.basename
    path = set_paths(output_basename, model)

    sim_param = dict(end=args.end, run=args.run, desktop=args.desktop, model=model, load=args.load)
    eng, model_name = start_matlab(path, sim_param, command=None)

    print("\n\033[32;1;4mWelcome to the PILIA python interfacer! Available commands are:")
    print("{:<7} - displays help\n{:<7} - update workspace with new configuration variables\n{:<7} - post-processes data\n{:<7} - exits the prompt\033[30;0;4m\n".format("help", "update", "pp", "exit"))

    while 1:
        print(">>>  ", end="", flush=True)
        command = input().split(sep=" ")
        if command[0] == 'help':
            print("You may configure the parameters in your CONF folder, then update the model to take the changes into account.")
        elif command[0] == 'run': run(sim_param, model_name, eng, command)
        elif command[0] == 'update': update(eng)
        elif command[0] == 'pp': postprocess(eng, path, output_basename, command, sim_param)
        elif command[0] == 'exec': eng.eval(command[1:])
        elif command[0] == 'exit':
            # os.system(r'del -Force -Recurse .\PILIA\PROJECTS\tolosat_adcs_kalman\V1\CONF\slprj')
            exit()
Exemplo n.º 2
0
def main(_):
    init()
    args = argparse()
    file_queue = tf.train.string_input_producer([args.inputpath])
    inputs = data_input.get_batch( file_queue , [args.height,args.width] ,  64 , 4 ,5 , is_training = false )
    output_height = args.height * args.scale
    output_width = args.width * args.scale
    if ( args.scale < 0 ):
        assert (args.height % args.scale == 0 )
        assert ( args.width % args.scale == 0 )
        output_height = args.height / args.scale
        output_width = args.width / args.width

    outputs = tf.image.resize_bicubic(inputs , [output_height , output_width ])
    if
    save_images


    for device_index in range(args.gpus)
        with tf.device("gpus:/{}".format(device_index))  :



if __name__ == "__main__":
    tf.app.run(main)
Exemplo n.º 3
0
def connection():
    connection_string = argparse()
    if not connection_string:
        import dronekit_sitl
        sitl = dronekit_sitl.start_default()
        connection_string = sitl.connection_string()


# Connect to the Vehicle.
#   Set `wait_ready=True` to ensure default attributes are populated before `connect()` returns.
    return connection_string
Exemplo n.º 4
0
def main():
    args = argparse()
    
    file_list = find_files(args.f, args.ext, args.r)

    todos = []
    for f_name in file_list:
        file_todos = find_todos(f_name)
        if file_todos:
            todos.append(file_todos)
    
    print_todos(args, todos)
Exemplo n.º 5
0
def main():
    args = argparse()

    file_list = find_files(args.f, args.ext, args.r)

    todos = []
    for f_name in file_list:
        file_todos = find_todos(f_name)
        if file_todos:
            todos.append(file_todos)

    print_todos(args, todos)
def bc_argparser(description="Behavioral Cloning Experiment"):
    """Create an argparse.ArgumentParser for behavioral cloning-related tasks"""
    parser = argparse(description)
    parser.add_argument('--note', help='w/e', type=str, default=None)
    parser.add_argument('--env_id', help='environment identifier', default='Hopper-v2')
    parser.add_argument('--horizon', help='maximum number of timesteps in an episode',
                        type=int, default=None)
    parser.add_argument('--seed', help='RNG seed', type=int, default=0)
    parser.add_argument('--checkpoint_dir', help='directory to save the models',
                        default=None)
    parser.add_argument('--log_dir', help='directory to save the log files',
                        default='data/logs')
    parser.add_argument('--summary_dir', help='directory to save the summaries',
                        default='data/summaries')
    parser.add_argument('--task', help='task to carry out', type=str,
                        choices=['clone',
                                 'evaluate_bc_policy'],
                        default='clone')
    parser.add_argument('--expert_path', help='.npz archive containing the demos',
                        type=str, default=None)
    parser.add_argument('--num_demos', help='number of expert demo trajs for imitation',
                        type=int, default=None)
    parser.add_argument('--save_frequency', help='save model every xx iterations',
                        type=int, default=100)
    parser.add_argument('--num_iters', help='cummulative number of iterations since launch',
                        type=int, default=int(1e6))
    parser.add_argument('--batch_size', help='minibatch size', type=int, default=64)
    parser.add_argument('--lr', help='adam learning rate', type=float, default=3e-4)
    parser.add_argument('--clip_norm', type=float, default=None)
    boolean_flag(parser, 'render', help='whether to render the interaction traces', default=False)
    parser.add_argument('--num_trajs', help='number of trajectories to evaluate/gather',
                        type=int, default=10)
    parser.add_argument('--exact_model_path', help='exact path of the model',
                        type=str, default=None)
    parser.add_argument('--model_ckpt_dir', help='checkpoint directory containing the models',
                        type=str, default=None)
    parser.add_argument('--demos_dir', type=str, help='directory to save the demonstrations',
                        default='data/expert_demonstrations')
    boolean_flag(parser, 'rmsify_obs', default=True)
    parser.add_argument('--hid_widths', nargs='+', type=int, default=[64, 64])
    parser.add_argument('--hid_nonlin', type=str, default='leaky_relu',
                        choices=['relu', 'leaky_relu', 'prelu', 'elu', 'selu', 'tanh'])
    parser.add_argument('--hid_w_init', type=str, default='he_normal',
                        choices=['he_normal', 'he_uniform', 'xavier_normal', 'xavier_uniform'])
    return parser
Exemplo n.º 7
0
def main():
    description = 'Exploit search and download utility'
    if pythonVersion > 2.6:
        parser = argparse.ArgumentParser(description)
        addArgumentCall = parser.add_argument
    else:
        parser = argparse(description)
        addArgumentCall = parser.add_option
    #
    if pythonVersion > 2.6:
        addArgumentCall(
            'query',
            metavar='query',
            type=str,
            nargs='*',
            help=
            'Exploit search query. See https://vulners.com/help for the detailed manual.'
        )
    # Arguments
    addArgumentCall(
        '-t',
        '--title',
        action='store_true',
        help=
        "Search JUST the exploit title (Default is description and source code)."
    )
    addArgumentCall('-j',
                    '--json',
                    action='store_true',
                    help='Show result in JSON format.')
    addArgumentCall(
        '-m',
        '--mirror',
        action='store_true',
        help=
        'Mirror (aka copies) search result exploit files to the subdirectory with your search query name.'
    )
    addArgumentCall('-c',
                    '--count',
                    nargs=1,
                    type=int,
                    default=10,
                    help='Search limit. Default 10.')
    if LOCAL_SEARCH_AVAILABLE:
        addArgumentCall(
            '-l',
            '--local',
            action='store_true',
            help=
            'Perform search in the local database instead of searching online.'
        )
        addArgumentCall(
            '-u',
            '--update',
            action='store_true',
            help=
            'Update getsploit.db database. Will be downloaded in the script path.'
        )

    if pythonVersion > 2.6:
        options = parser.parse_args()
        searchQuery = " ".join(options.query)
    else:
        options, args = parser.parse_args()
        searchQuery = " ".join(args)

    if isinstance(options.count, list):
        options.count = options.count[0]

    # Update goes first
    if LOCAL_SEARCH_AVAILABLE and options.update:
        downloadVulnersGetsploitDB(DBPATH)
        print(
            "Database download complete. Now you may search exploits using --local key './getsploit.py -l wordpress 4.7'"
        )
        exit()

    # Check that there is a query
    if not searchQuery:
        print(
            "No search query provided. Type software name and version to find exploit."
        )
        exit()

    # Select propriate search method for the search. Local/remote
    if LOCAL_SEARCH_AVAILABLE and options.local:
        if not os.path.exists(DBFILE):
            print(
                "There is no local database file near getsploit. Run './getsploit.py --update'"
            )
            exit()
        finalQuery, searchResults = exploitLocalSearch(
            searchQuery,
            lookupFields=['title'] if options.title else None,
            limit=options.count)
    else:
        finalQuery, searchResults = exploitSearch(
            searchQuery,
            lookupFields=['title'] if options.title else None,
            limit=options.count)

    outputTable = Texttable()
    outputTable.set_cols_dtype(['t', 't', 't'])
    outputTable.set_cols_align(['c', 'l', 'c'])
    outputTable.set_cols_width(['20', '30', '100'])
    tableRows = [['ID', 'Exploit Title', 'URL']]
    jsonRows = []
    for bulletinSource in searchResults.get('search'):
        bulletin = bulletinSource.get('_source')
        bulletinUrl = bulletin.get('vref') or 'https://vulners.com/%s/%s' % (
            bulletin.get('type'), bulletin.get('id'))
        tableRows.append(
            [bulletin.get('id'),
             bulletin.get('title'), bulletinUrl])
        if options.json:
            jsonRows.append({
                'id': bulletin.get('id'),
                'title': bulletin.get('title'),
                'url': bulletinUrl
            })
        if options.mirror:
            pathName = './%s' % slugify(searchQuery)
            # Put results it the dir
            if not os.path.exists(pathName):
                os.mkdir(pathName)
            with open("./%s/%s.txt" % (pathName, slugify(bulletin.get('id'))),
                      'w') as exploitFile:
                exploitData = bulletin.get('sourceData') or bulletin.get(
                    'description')
                if pythonVersion < 3.0:
                    exploitData = exploitData.encode('utf-8').strip()
                exploitFile.write(exploitData)
    if options.json:
        # Json output
        print(json.dumps(jsonRows))
    else:
        # Text output
        print("Total found exploits: %s" % searchResults.get('total'))
        if pythonVersion < 3:
            quoteStringHandler = urllib.quote_plus
        else:
            quoteStringHandler = urllib.parse.quote_plus
        print("Web-search URL: https://vulners.com/search?query=%s" %
              quoteStringHandler(finalQuery))
        # Set max coll width by len of the url for better copypaste
        maxWidth = max(len(element[2]) for element in tableRows)
        outputTable.set_cols_width([20, 30, maxWidth])
        outputTable.add_rows(tableRows)
        if pythonVersion < 3.0:
            # Just pass non-ascii
            print(outputTable.draw().decode('ascii', 'ignore'))
        else:
            # Any better solution here?
            print(outputTable.draw().encode('ascii', 'ignore').decode())
Exemplo n.º 8
0
def main():

    parser = argparse(
        description="experiments of supervision rate on dNN classifier",
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument(
        "outnpz",
        help=
        "filename of the output file (warning: file will be saved as [filename].npz)"
    )
    parser.add_argument(
        "-e",
        "--number-epoch",
        dest="num_epochs",
        type=int,
        default=100,
        help="number of epoch",
    )
    parser.add_argument(
        "-n",
        "--number-experiments",
        dest="num_exp",
        type=int,
        default=10,
        help="number of experiments",
    )
    parser.add_argument(
        "-p",
        "--validation-proportion",
        dest="prop_valid",
        type=int,
        default=20,
        help=
        "proportion of validation data for the split validation/train data (in %)",
    )
    parser.add_argument(
        "-b",
        "--size-minibatch",
        dest="size_minibatch",
        type=int,
        default=1000,
        help="size of minibatch",
    )

    num_epochs = args.num_epochs
    num_exp = args.num_exp
    prop_valid = args.prop_valid
    size_minibatch = args.size_minibatch

    print("Set network")
    input_var = T.tensor4('inputs')
    target_var = T.tensor4('targets')
    class_var = T.ivector('classes')

    network_enc, network_class = build_lae(input_var)
    params_init_network_enc = lasagne.layers.get_all_param_values(network_enc)
    params_init_network_class = lasagne.layers.get_all_param_values(
        network_class)

    print("number of params for enc", lasagne.layers.count_params(network_enc))
    print("number params for class ",
          lasagne.layers.count_params(network_class))

    # definition of what is "train" for encoder
    reconstruction_enc = lasagne.layers.get_output(network_enc)
    se_enc = lasagne.objectives.squared_error(reconstruction_enc, target_var)
    mse_enc = lasagne.objectives.aggregate(se_enc)
    params_enc = lasagne.layers.get_all_params(network_enc, trainable=True)
    updates_enc = lasagne.updates.nesterov_momentum(mse_enc,
                                                    params_enc,
                                                    learning_rate=0.1,
                                                    momentum=0.9)

    train_fn_enc = theano.function([input_var, target_var],
                                   mse_enc,
                                   updates=updates_enc)

    # definition of what is "train" for classifier
    reconstruction_enc = lasagne.layers.get_output(network_enc)
    prediction_class = lasagne.layers.get_output(network_class)
    ce_class = lasagne.objectives.categorical_crossentropy(
        prediction_class, class_var)
    ace_class = lasagne.objectives.aggregate(ce_class)
    params_class = lasagne.layers.get_all_params(network_class, trainable=True)
    updates_class = lasagne.updates.nesterov_momentum(ace_class,
                                                      params_class,
                                                      learning_rate=0.1,
                                                      momentum=0.9)

    train_fn_class = theano.function([input_var, class_var],
                                     ace_class,
                                     updates=updates_class)

    test_class_prediction = lasagne.layers.get_output(network_class,
                                                      deterministic=True)
    test_class_ce = lasagne.objectives.categorical_crossentropy(
        test_class_prediction, class_var)
    test_class_ace = lasagne.objectives.aggregate(test_class_ce)
    test_class_acc = T.mean(T.eq(T.argmax(test_class_prediction, axis=1),
                                 class_var),
                            dtype=theano.config.floatX)

    test_enc_reconstruction = lasagne.layers.get_output(network_enc,
                                                        deterministic=True)
    test_enc_se = lasagne.objectives.squared_error(test_enc_reconstruction,
                                                   target_var)
    test_enc_mse = lasagne.objectives.aggregate(test_enc_se)

    val_fn_class = theano.function([input_var, class_var],
                                   [test_class_ace, test_class_acc])
    val_fn_enc = theano.function([input_var, target_var], test_enc_mse)

    overall_time = time.time()
    # mnist dataset
    print("Loading mnist data...")
    X_train, y_train, X_test, y_test = load_dataset_mnist()
    seqm = np.arange(100, -10, -10)
    seqn = np.arange(num_exp)

    # 	m = 0

    ### results for NS training ###
    OptNbSample_ns = np.zeros([len(seqm), len(seqn)])
    OptNbEpoch_ns = np.zeros([len(seqm), len(seqn)])

    OptMseTrain_ns = np.zeros([len(seqm), len(seqn)])
    OptMseValid_ns = np.zeros([len(seqm), len(seqn)])
    TensorMseTrain_ns = np.zeros([len(seqm), len(seqn), num_epochs])
    TensorMseValid_ns = np.zeros([len(seqm), len(seqn), num_epochs])

    ### results for S training ###
    OptNbSample_s = np.zeros([len(seqm), len(seqn)])
    OptNbEpoch_s = np.zeros([len(seqm), len(seqn)])

    OptAccTrain_s = np.zeros([len(seqm), len(seqn)])
    OptAceTrain_s = np.zeros([len(seqm), len(seqn)])
    OptMseValid_s = np.zeros([len(seqm), len(seqn)])
    OptAccValid_s = np.zeros([len(seqm), len(seqn)])
    OptAceValid_s = np.zeros([len(seqm), len(seqn)])
    # 	TensorMseTrain_s = np.zeros( [len(seqm), len(seqn), num_epochs] )
    TensorMseValid_s = np.zeros([len(seqm), len(seqn), num_epochs])
    TensorAceTrain_s = np.zeros([len(seqm), len(seqn), num_epochs])
    TensorAceValid_s = np.zeros([len(seqm), len(seqn), num_epochs])
    # 	TensorAccTrain_s = np.zeros( [len(seqm), len(seqn), num_epochs] )
    TensorAccValid_s = np.zeros([len(seqm), len(seqn), num_epochs])

    ### results Test ###
    ArrayAccTest = np.zeros([len(seqm), len(seqn)])
    ArrayAceTest = np.zeros([len(seqm), len(seqn)])
    ArrayMseTest = np.zeros([len(seqm), len(seqn)])

    for m in np.arange(len(seqm)):

        prop_train_s = seqm[m]
        print("learning supervision rate", prop_train_s, "%")
        for n in seqn:
            print("re-initialize network parameters ... ")
            lasagne.layers.set_all_param_values(network_enc,
                                                params_init_network_enc)
            lasagne.layers.set_all_param_values(network_class,
                                                params_init_network_class)

            print("experiment:", n + 1, "/", len(seqn))
            T_ind = np.arange(len(y_train))
            np.random.shuffle(T_ind)
            X_train = X_train[T_ind]
            y_train = y_train[T_ind]

            if prop_valid <= 0 or prop_valid >= 100:
                print(
                    "WARNING: validation/Training proportion cannot be 0% or 100% : setting default 20%...."
                )
                prop_valid = 20

            nb_train_s = np.floor((prop_train_s / 100) * len(X_train)).astype(
                int)  # part used for the supervised learning
            nb_train_ns = np.floor(
                (1 - (prop_train_s / 100)) * len(X_train)
            ).astype(
                int
            )  # part used for the autoencoder (the rest for training the classifier)
            if nb_train_s != 0:
                # supervised / non-supervised split
                X_train_ns, X_train_s = X_train[:-nb_train_s], X_train[
                    -nb_train_s:]
                y_train_ns, y_train_s = y_train[:-nb_train_s], y_train[
                    -nb_train_s:]
            elif nb_train_s == 0:  # if supervision Rate 100% -> p = 0 -> the selection of indices [-0:] and [:-0] are permuted
                X_train_s, X_train_ns = X_train[:-nb_train_s], X_train[
                    -nb_train_s:]
                y_train_s, y_train_ns = y_train[:-nb_train_s], y_train[
                    -nb_train_s:]

            nb_valid_s = np.floor(
                (prop_valid / 100) * len(X_train_s)).astype(int)
            nb_valid_ns = np.floor(
                (prop_valid / 100) * len(X_train_ns)).astype(int)

            if nb_valid_s != 0:
                # train/validation split
                X_train_s, X_val_s = X_train_s[:-nb_valid_s], X_train_s[
                    -nb_valid_s:]
                y_train_s, y_val_s = y_train_s[:-nb_valid_s], y_train_s[
                    -nb_valid_s:]
            elif nb_valid_s == 0:  # if supervision Rate 100% -> p = 0 -> the selection of indices [-0:] and [:-0] are permuted
                X_val_s, X_train_s = X_train_s[:-nb_valid_s], X_train_s[
                    -nb_valid_s:]
                y_val_s, y_train_s = y_train_s[:-nb_valid_s], y_train_s[
                    -nb_valid_s:]
            print("number images for supervised learning (train/val):",
                  nb_train_s, "(", len(X_train_s), "/", len(X_val_s), ")")

            if nb_valid_ns != 0:
                # train/validation split
                X_train_ns, X_val_ns = X_train_ns[:-nb_valid_ns], X_train_ns[
                    -nb_valid_ns:]
                y_train_ns, y_val_ns = y_train_ns[:-nb_valid_ns], y_train_ns[
                    -nb_valid_ns:]
            elif nb_valid_ns == 0:  # if supervision Rate 100% -> p = 0 -> the selection of indices [-0:] and [:-0] are permuted
                X_val_ns, X_train_ns = X_train_ns[:-nb_valid_ns], X_train_ns[
                    -nb_valid_ns:]
                y_val_ns, y_train_ns = y_train_ns[:-nb_valid_ns], y_train_ns[
                    -nb_valid_ns:]
            print("number images for non-supervised learning (train/val):",
                  nb_train_ns, "(", len(X_train_ns), "/", len(X_val_ns), ")")

            # 			print(len(X_train_class), len(X_train_enc))

            # 			print("Starting training...")
            # iteration over epochs:
            training_time = time.time()
            MseTrain_lowest = sys.float_info.max
            MseVal_lowest = sys.float_info.max
            best_nbsample = 0
            params_nn_ns_best = lasagne.layers.get_all_param_values(
                network_enc)

            for e_ns in range(num_epochs):

                train_mse = 0
                train_batches = 0
                val_mse = 0
                val_batches = 0
                start_time = time.time()

                ### shuffle indices of train/valid data

                ind_train_ns = np.arange(len(y_train_ns))
                np.random.shuffle(ind_train_ns)
                X_train_ns = X_train_ns[ind_train_ns]
                y_train_ns = y_train_ns[ind_train_ns]

                #### batch TRAIN ENCODER ####
                for batch in iterate_minibatches(X_train_ns,
                                                 X_train_ns,
                                                 y_train_ns,
                                                 size_minibatch,
                                                 shuffle=True):
                    inputs, targets, classes = batch
                    train_mse += train_fn_enc(inputs, targets)
                    train_batches += 1

                MseTrain = 0
                if train_batches != 0:
                    MseTrain = (train_mse / train_batches)

                #### batch VALID ENCODER ####
                for batch in iterate_minibatches(X_val_ns,
                                                 X_val_ns,
                                                 y_val_ns,
                                                 size_minibatch,
                                                 shuffle=True):
                    inputs, targets, classes = batch
                    val_mse += val_fn_enc(inputs, targets)
                    val_batches += 1

                MseVal = 0
                if val_batches != 0:
                    MseVal = (val_mse / val_batches)
                t = time.time() - overall_time
                hours, minutes, seconds = t // 3600, (
                    t - 3600 *
                    (t // 3600)) // 60, (t - 3600 *
                                         (t // 3600)) - (60 *
                                                         ((t - 3600 *
                                                           (t // 3600)) // 60))
                print("-----UnSupervised-----")
                print("Total Time :",
                      "\t%dh%dm%ds" % (hours, minutes, seconds))
                print("")
                print("Epoch: ", e_ns + 1, "/", num_epochs,
                      "\tn:%d/%d" % (n + 1, len(seqn)),
                      "\tt: {:.3f}s".format(time.time() - start_time),
                      "\ts: %d" % (prop_train_s), "%")
                print("\t training recons MSE:\t{:.6f} ".format(MseTrain))
                print("\t validation recons MSE:\t{:.6f}".format(MseVal))
                print("")

                TensorMseTrain_ns[m][n][e_ns] = MseTrain
                TensorMseValid_ns[m][n][e_ns] = MseVal

                if MseVal < MseVal_lowest:
                    MseVal_lowest = MseVal
                    OptMseTrain_ns[m][n] = MseVal
                    OptMseTrain_ns[m][n] = MseTrain
                    OptNbSample_ns[m][n] = e_ns * len(X_train_ns)
                    OptNbEpoch_ns[m][n] = e_ns

                    params_nn_ns_best = lasagne.layers.get_all_param_values(
                        network_enc)
                    params_nn_s_best = lasagne.layers.get_all_param_values(
                        network_class)

            lasagne.layers.set_all_param_values(network_enc, params_nn_ns_best)

            AceTrain_lowest = sys.float_info.max
            AceVal_lowest = sys.float_info.max
            best_nbsample = 0
            params_nn_s_best = lasagne.layers.get_all_param_values(
                network_class)

            for e_s in range(num_epochs):

                train_ace = 0
                val_ace = 0
                val_acc = 0
                val_mse = 0
                train_batches = 0
                val_batches = 0
                start_time = time.time()

                ### shuffle indices of train/valid data

                ind_train_s = np.arange(len(y_train_s))
                np.random.shuffle(ind_train_s)
                X_train_s = X_train_s[ind_train_s]
                y_train_s = y_train_s[ind_train_s]

                #### batch TRAIN CLASSIFIER ####
                for batch in iterate_minibatches(X_train_s,
                                                 X_train_s,
                                                 y_train_s,
                                                 size_minibatch,
                                                 shuffle=True):
                    inputs, targets, classes = batch
                    train_ace += train_fn_class(inputs, classes)
                    train_batches += 1

                if train_batches != 0:
                    AceTrain = (train_ace / train_batches)
                else:
                    AceTrain = 0

                #### batch VALID CLASSIFIER ####
                for batch in iterate_minibatches(X_val_s,
                                                 X_val_s,
                                                 y_val_s,
                                                 size_minibatch,
                                                 shuffle=True):
                    inputs, targets, classes = batch
                    ace, acc = val_fn_class(inputs, classes)
                    val_ace += ace
                    val_acc += acc
                    val_mse += val_fn_enc(inputs, targets)
                    val_batches += 1

                if val_batches != 0:
                    MseVal = (val_mse / val_batches)
                    AceVal = (val_ace / val_batches)
                    AccVal = (val_acc / val_batches)
                else:
                    MseVal = 0
                    AceVal = 0
                    AccVal = 0

                t = time.time() - overall_time
                # 				hours, minutes, seconds = t//3600, (t - 3600*(t//3600))//60, ((t - 3600*(t//3600)) - 60*(t - (3600*(t//3600)))//60)
                hours, minutes, seconds = t // 3600, (
                    t - 3600 *
                    (t // 3600)) // 60, (t - 3600 *
                                         (t // 3600)) - (60 *
                                                         ((t - 3600 *
                                                           (t // 3600)) // 60))
                print("-----Supervised-----")
                print("Total Time :",
                      "\t%dh%dm%ds" % (hours, minutes, seconds))
                print("")
                print("Epoch: ", e_s + 1, "/", num_epochs,
                      "\tn:%d/%d" % (n + 1, len(seqn)),
                      "\tt: {:.3f}s".format(time.time() - start_time),
                      "\ts: %d" % (prop_train_s), "%")
                # 				print("Epoch :", e_s + 1, "/", num_epochs, "\tt: {:.3f}s".format( time.time() - start_time), "\tSR: {:1f}".format(prop_train_s), "%" )
                # 				print("Epoch :", e_s + 1, "/", num_epochs, "\t{:.3f}s".format( time.time() - start_time))
                print("\t training class ACE:\t{:.6f} ".format(AceTrain))
                print("\t validation class ACE:\t{:.6f}".format(AceVal))
                print("\t validation class MSE:\t{:.6f}".format(AceVal))
                print("\t validation class ACC:\t{:.6f}".format(AceVal))
                print("")
                TensorAceTrain_s[m][n][e_s] = AceTrain
                TensorMseValid_s[m][n][e_s] = MseVal
                TensorAceValid_s[m][n][e_s] = AceVal
                TensorAccValid_s[m][n][e_s] = AccVal

                if AceVal < AceVal_lowest:
                    AceVal_lowest = AceVal
                    OptAceTrain_s[m][n] = AceTrain
                    OptAceValid_s[m][n] = AceVal
                    OptMseValid_s[m][n] = MseVal
                    OptAccValid_s[m][n] = AccVal
                    OptNbSample_s[m][n] = e_s * len(X_train_s)
                    OptNbEpoch_s[m][n] = e_s
                    params_nn_s_best = lasagne.layers.get_all_param_values(
                        network_class)

            lasagne.layers.set_all_param_values(network_enc, params_nn_ns_best)
            lasagne.layers.set_all_param_values(network_class,
                                                params_nn_s_best)
            test_ace = 0
            test_acc = 0
            test_mse = 0
            test_batches = 0
            for batch in iterate_minibatches(X_test,
                                             X_test,
                                             y_test,
                                             size_minibatch,
                                             shuffle=True):
                inputs, targets, classes = batch
                ace, acc = val_fn_class(inputs, classes)
                test_ace += ace
                test_acc += acc
                test_mse += val_fn_enc(inputs, targets)
                test_batches += 1

            MseTest = (test_mse / test_batches)
            AceTest = (test_ace / test_batches)
            AccTest = (test_acc / test_batches)
            ArrayAccTest[m][n] = AccTest
            ArrayAceTest[m][n] = AceTest
            ArrayMseTest[m][n] = MseTest

            # 			print("Epoch :", epoch + 1, "/", num_epochs, "\t{:.3f}s".format( time.time() - training_time))
            print("Test Results | supervision rate ", prop_train_s,
                  "% | experiment ", n, "/", len(seqn))
            print("\t test recons MSE:\t\t{:.6f}".format(MseTest))
            print("\t test class ACE:\t\t{:.6f}".format(AceTest))
            print("\t test class ACC:\t\t{:.2f} %".format(100 * (AccTest)))

# 		print(s, m, n)

    t = time.time() - overall_time
    hours, minutes, seconds = t // 3600, (t - 3600 * (t // 3600)) // 60, (
        t - 3600 * (t // 3600)) - (60 * ((t - 3600 * (t // 3600)) // 60))
    print("Total Time :", "\t%dh%dm%ds" % (hours, minutes, seconds))

    print("saving results ... ")
    diconame = os.path.join('./', 'exp-supervision-rate-without_ns')
    diconame = '%s.%s' % (diconame, 'npz')
    np.savez(diconame,
             OptNbSample_ns=OptNbSample_ns,
             OptNbEpoch_ns=OptNbEpoch_ns,
             OptMseTrain_ns=OptMseTrain_ns,
             OptMseValid_ns=OptMseValid_ns,
             TensorMseTrain_ns=TensorMseTrain_ns,
             TensorMseValid_ns=TensorMseValid_ns,
             OptNbSample_s=OptNbSample_s,
             OptNbEpoch_s=OptNbEpoch_s,
             OptAccTrain_s=OptAccTrain_s,
             OptAceTrain_s=OptAceTrain_s,
             OptMseValid_s=OptMseValid_s,
             OptAccValid_s=OptAccValid_s,
             OptAceValid_s=OptAceValid_s,
             TensorMseValid_s=TensorMseValid_s,
             TensorAceTrain_s=TensorAceTrain_s,
             TensorAceValid_s=TensorAceValid_s,
             TensorAccValid_s=TensorAccValid_s,
             ArrayAccTest=ArrayAccTest,
             ArrayAceTest=ArrayAceTest,
             ArrayMseTest=ArrayMseTest)
Exemplo n.º 9
0
def main():
    description = """
        Mikrotik exploit from Vault 7 CIA Leaks automation tool
        Takeovers up to RouterOS 6.38.4.

        Usage: mikrot8over IP_ADDRESS
        """
    if six.PY2:
        parser = argparse.ArgumentParser(description)
        addArgumentCall = parser.add_argument
    else:
        parser = argparse(description)
        addArgumentCall = parser.add_option
    #
    if six.PY2:
        addArgumentCall('address', metavar='address', type=str, nargs=1,
                        help='Scan address or IPv4 network in CIDR format')

    # Arguments
    addArgumentCall('-p', '--port', type=int, nargs="*", default=[8291],
                    help='List of the port to scan. Default is 8291')
    addArgumentCall('-t', '--threads', nargs=1, type=int, default=10,
                    help='Number of scan threads. Default is 10 that fits the most of systems')
    addArgumentCall('-o', '--timeout', nargs=1, type=float, default=0.3,
                    help='Socket connection timeout')

    if six.PY2:
        options = parser.parse_args()
        address = " ".join(options.address)
    else:
        options, args = parser.parse_args()
        address = " ".join(args)

    ports = options.port
    threads = options.threads
    timeout = options.timeout

    if not address:
        print(description)
        print("No scan address provided. Exit.")
        exit()

    for port in ports:
        print("Starting scan for IP %s, port %s running in %s threads" % (address, port, threads))

        try:
            targets = ipcalc.Network(address)
            scan_args = ((str(ip), port, timeout) for ip in targets)
        except ValueError as error:
            print("Failed to parse network address %s with %s error" % (address, error))
            exit()

        with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
            results = list(tqdm(executor.map(lambda p: scan_target(*p), scan_args), total=len(targets)))

        output_table = texttable.Texttable()
        output_table.set_cols_dtype(['t', 't', 't'])
        output_table.set_cols_align(['c', 'l', 'c'])
        output_table.set_cols_width(['20', '30', '100'])
        table_rows = [['IP', 'Login', 'Password']]

        vulnerable_results = [result for result in results if result and result['users']]

        for data in vulnerable_results:
            for credentials in data['users']:
                if credentials[1]:
                    table_rows.append([data["ip_address"], credentials[0], credentials[1]])
        output_table.add_rows(table_rows)
        if not six.PY3:
            # Just pass non-ascii
            print(output_table.draw().encode('ascii', 'ignore'))
        else:
            # Any better solution here?
            print(output_table.draw().encode('ascii', 'ignore').decode())
Exemplo n.º 10
0
    m.term(Partition(index), R(coeffs))
    for index, coeffs in [
"""

output_footer = r"""    ])

"""

def save(path, csf):
    filename = 'output/csf-' + ''.join(map(str, path)) + '.py'
    with open(filename, 'w') as f:
        f.write(output_header.format(path=path))
        for index, coeffs in sorted(csf.iteritems()):
            while coeffs and coeffs[-1] == 0:
                coeffs.pop()
            if coeffs:
                f.write("    ({}, {}),\n".format(list(index), coeffs))
        f.write(output_footer)

# ---------------------------------------------------------

if __name__ == '__main__':
    doctest()
    setup_logging()
    n = argparse()
    for path, csf in compute_csfs(n):
        save(path, csf)

# ---------------------------------------------------------

Exemplo n.º 11
0
import tesorflow as tf
import argparse

arg = argparse()


class Graph():
    def __init__(self, is_training=True):
        tf.reset_default_graph()
        self.is_training = arg.is_training
        self.hidden_units = arg.hidden_units
        self.input_vocab_size = arg.input_vocab_size
        self.label_vocab_size = arg.label_vocab_size
        self.num_heads = arg.num_heads
        self.num_blocks = arg.num_blocks
        self.max_length = arg.ax_length
        self.lr = arg.lr
        self.dropout = arg.dropout_rate

        self.x = tf.placeholder(tf.int32, shape=(None, None))
        self.y = tf.placeholder(tf.int32, shape=(None, None))
        self.de_inp = tf.placeholder(tf.int32, shape=(None, None))

        # with tf.variable_scope("encoder"):
Exemplo n.º 12
0
def main():
    description = 'Exploit search and download utility'
    if pythonVersion > 2.6:
        parser = argparse.ArgumentParser(description)
        addArgumentCall = parser.add_argument
    else:
        parser = argparse(description)
        addArgumentCall = parser.add_option
    #
    if pythonVersion > 2.6:
        addArgumentCall('query', metavar='query', type=str, nargs='*', help='Exploit search query. See https://vulners.com/help for the detailed manual.')
    # Arguments
    addArgumentCall('-t', '--title', action='store_true',
                        help="Search JUST the exploit title (Default is description and source code).")
    addArgumentCall('-j', '--json', action='store_true',
                        help='Show result in JSON format.')
    addArgumentCall('-m', '--mirror', action='store_true',
                        help='Mirror (aka copies) search result exploit files to the subdirectory with your search query name.')
    addArgumentCall('-c', '--count', nargs=1, type=int, default=10,
                        help='Search limit. Default 10.')
    if LOCAL_SEARCH_AVAILABLE:
        addArgumentCall('-l', '--local', action='store_true',
                        help='Perform search in the local database instead of searching online.')
        addArgumentCall('-u', '--update', action='store_true',
                        help='Update getsploit.db database. Will be downloaded in the script path.')

    if pythonVersion > 2.6:
        options = parser.parse_args()
        searchQuery = " ".join(options.query)
    else:
        options, args = parser.parse_args()
        searchQuery = " ".join(args)

    if isinstance(options.count, list):
        options.count = options.count[0]

    # Update goes first
    if LOCAL_SEARCH_AVAILABLE and options.update:
        downloadVulnersGetsploitDB(DBPATH)
        print("Database download complete. Now you may search exploits using --local key './getsploit.py -l wordpress 4.7'")
        exit()

    # Check that there is a query
    if not searchQuery:
        print("No search query provided. Type software name and version to find exploit.")
        exit()


    # Select propriate search method for the search. Local/remote
    if LOCAL_SEARCH_AVAILABLE and options.local:
        if not os.path.exists(DBFILE):
            print("There is no local database file near getsploit. Run './getsploit.py --update'")
            exit()
        finalQuery, searchResults = exploitLocalSearch(searchQuery, lookupFields=['title'] if options.title else None, limit = options.count)
    else:
        finalQuery, searchResults = exploitSearch(searchQuery, lookupFields=['title'] if options.title else None, limit = options.count)

    outputTable = Texttable()
    outputTable.set_cols_dtype(['t', 't', 't'])
    outputTable.set_cols_align(['c', 'l', 'c'])
    outputTable.set_cols_width(['20', '30', '100'])
    tableRows = [['ID', 'Exploit Title', 'URL']]
    jsonRows = []
    for bulletinSource in searchResults.get('search'):
        bulletin = bulletinSource.get('_source')
        bulletinUrl = bulletin.get('vref') or 'https://vulners.com/%s/%s' % (bulletin.get('type'), bulletin.get('id'))
        tableRows.append([bulletin.get('id'), bulletin.get('title'), bulletinUrl])
        if options.json:
            jsonRows.append({'id':bulletin.get('id'), 'title':bulletin.get('title'), 'url':bulletinUrl})
        if options.mirror:
            pathName = './%s' % slugify(searchQuery)
            # Put results it the dir
            if not os.path.exists(pathName):
                os.mkdir(pathName)
            with open("./%s/%s.txt" % (pathName,slugify(bulletin.get('id'))), 'w') as exploitFile:
                exploitData = bulletin.get('sourceData') or bulletin.get('description')
                if pythonVersion < 3.0:
                    exploitData = exploitData.encode('utf-8').strip()
                exploitFile.write(exploitData)
    if options.json:
        # Json output
        print(json.dumps(jsonRows))
    else:
        # Text output
        print("Total found exploits: %s" % searchResults.get('total'))
        if pythonVersion < 3:
            quoteStringHandler = urllib.quote_plus
        else:
            quoteStringHandler = urllib.parse.quote_plus
        print("Web-search URL: %s" % 'https://vulners.com/search?query=%s' % quoteStringHandler(finalQuery))
        # Set max coll width by len of the url for better copypaste
        maxWidth = max(len(element[2]) for element in tableRows)
        outputTable.set_cols_width([20, 30, maxWidth])
        outputTable.add_rows(tableRows)
        print(outputTable.draw())
Exemplo n.º 13
0
    print('')
    print(
        '>>>>> Code by Young Jae Choi @ Phys. Dep. of POSTECH in Korea <<<<<'.
        center(120))
    print(('Code runtime : ' + time).center(120))
    print('')
    print(
        '=================================================================================================='
        .center(120))
    print(
        'Shuffle the sequence of an ASE readable atoms list file.'.center(120))
    print(
        '=================================================================================================='
        .center(120))
    print('')

    ## Argparse
    args = argparse()

    # @ Main
    from ase.io import read, write
    for alist_file in args.alist_files:
        alist = read(alist_file, ':')
        if not isinstance(alist, list):
            alist = [alist]
        from random import shuffle
        shuffle(alist)
        write('sffld-{}'.format(alist_file), alist, format=args.file_format)
        print(' * File sffld-{} has been written.'.format(alist_file))
    print('')
Exemplo n.º 14
0
def xpo_expert_argparser(description="XPO Expert Experiment"):
    """Create an argparse.ArgumentParser for XPO-expert-related tasks"""
    parser = argparse(description)
    parser.add_argument('--note', help='w/e', type=str, default=None)
    parser.add_argument('--env_id', help='environment identifier', default='Hopper-v2')
    boolean_flag(parser, 'from_raw_pixels', default=False)
    parser.add_argument('--horizon', help='maximum number of timesteps in an episode',
                        type=int, default=None)
    parser.add_argument('--seed', help='RNG seed', type=int, default=0)
    parser.add_argument('--checkpoint_dir', help='directory to save the models',
                        default=None)
    parser.add_argument('--log_dir', help='directory to save the log files',
                        default='data/logs')
    parser.add_argument('--summary_dir', help='directory to save the summaries',
                        default='data/summaries')
    boolean_flag(parser, 'render', help='whether to render the interaction traces', default=False)
    boolean_flag(parser, 'record', help='whether to record the interaction traces', default=False)
    parser.add_argument('--video_dir', help='directory to save the video recordings',
                        default='data/videos')
    parser.add_argument('--task', help='task to carry out', type=str,
                        choices=['train_xpo_expert',
                                 'evaluate_xpo_expert',
                                 'gather_xpo_expert'],
                        default='train_xpo_expert')
    parser.add_argument('--algo', help='pick an algorithm', type=str,
                        choices=['ppo', 'trpo'], default='ppo')
    parser.add_argument('--save_frequency', help='save model every xx iterations',
                        type=int, default=100)
    parser.add_argument('--num_iters', help='cummulative number of iters since launch',
                        type=int, default=int(1e6))
    parser.add_argument('--timesteps_per_batch', help='number of interactions per iteration',
                        type=int, default=1024)
    parser.add_argument('--batch_size', help='minibatch size', type=int, default=64)
    parser.add_argument('--optim_epochs_per_iter', type=int, default=10,
                        help='optimization epochs per iteraction')
    parser.add_argument('--lr', help='adam learning rate', type=float, default=3e-4)
    boolean_flag(parser, 'sample_or_mode', default=True,
                 help='whether to pick actions by sampling or taking the mode')
    parser.add_argument('--num_trajs', help='number of trajectories to evaluate/gather',
                        type=int, default=10)
    parser.add_argument('--exact_model_path', help='exact path of the model',
                        type=str, default=None)
    parser.add_argument('--model_ckpt_dir', help='checkpoint directory containing the models',
                        type=str, default=None)
    parser.add_argument('--demos_dir', type=str, help='directory to save the demonstrations',
                        default='data/expert_demonstrations')
    boolean_flag(parser, 'rmsify_obs', default=True)
    parser.add_argument('--nums_filters', nargs='+', type=int, default=[8, 16])
    parser.add_argument('--filter_shapes', nargs='+', type=int, default=[8, 4])
    parser.add_argument('--stride_shapes', nargs='+', type=int, default=[4, 2])
    parser.add_argument('--hid_widths', nargs='+', type=int, default=[64, 64])
    parser.add_argument('--hid_nonlin', type=str, default='leaky_relu',
                        choices=['relu', 'leaky_relu', 'prelu', 'elu', 'selu', 'tanh'])
    parser.add_argument('--hid_w_init', type=str, default='he_normal',
                        choices=['he_normal', 'he_uniform', 'xavier_normal', 'xavier_uniform'])
    boolean_flag(parser, 'gaussian_fixed_var', default=True)
    boolean_flag(parser, 'with_layernorm', default=False)
    parser.add_argument('--cg_iters', type=int, default=10,
                        help='number of conjugate gradient iterations')
    parser.add_argument('--cg_damping', type=float, default=0.1, help='conjugate gradient damping')
    parser.add_argument('--vf_iters', type=int, default=10,
                        help='number of iterations for value function adam optimization')
    parser.add_argument('--vf_lr', type=float, default=3e-4,
                        help='value function adam learning rate')
    parser.add_argument('--max_kl', type=float, default=0.01)
    parser.add_argument('--ent_reg_scale', help='scale of the policy entropy term',
                        type=float, default=0.)
    parser.add_argument('--clipping_eps', help='ppo annealed clipping parameter epsilon',
                        type=float, default=3e-1)
    parser.add_argument('--gamma', help='discount factor', type=float, default=0.995)
    parser.add_argument('--gae_lambda', help='gae lamdba parameter', type=float, default=0.99)
    parser.add_argument('--schedule', type=str, default='constant',
                        choices=['constant', 'linear'])
    return parser
Exemplo n.º 15
0
def gail_argparser(description="GAIL Experiment"):
    """Create an argparse.ArgumentParser for GAIL-related tasks"""
    parser = argparse(description)
    parser.add_argument('--note', help='w/e note', type=str, default=None)
    parser.add_argument('--env_id', help='environment identifier', default='Hopper-v2')
    boolean_flag(parser, 'from_raw_pixels', default=False)
    parser.add_argument('--horizon', help='maximum number of timesteps in an episode',
                        type=int, default=None)
    parser.add_argument('--seed', help='RNG seed', type=int, default=0)
    parser.add_argument('--checkpoint_dir', help='directory to save the models',
                        default=None)
    parser.add_argument('--log_dir', help='directory to save the log files',
                        default='data/logs')
    parser.add_argument('--summary_dir', help='directory to save the summaries',
                        default='data/summaries')
    boolean_flag(parser, 'render', help='whether to render the interaction traces', default=False)
    boolean_flag(parser, 'record', help='whether to record the interaction traces', default=False)
    parser.add_argument('--video_dir', help='directory to save the video recordings',
                        default='data/videos')
    parser.add_argument('--task', help='task to carry out', type=str,
                        choices=['imitate_via_gail',
                                 'evaluate_gail_policy'],
                        default='imitate_via_gail')
    parser.add_argument('--save_frequency', help='save model every xx iterations',
                        type=int, default=100)
    parser.add_argument('--num_iters', help='cummulative number of iters since launch',
                        type=int, default=int(1e6))
    parser.add_argument('--timesteps_per_batch', help='number of interactions per iteration',
                        type=int, default=1024)
    parser.add_argument('--batch_size', help='minibatch size', type=int, default=32)
    boolean_flag(parser, 'sample_or_mode', default=True,
                 help='whether to pick actions by sampling or taking the mode')
    parser.add_argument('--num_trajs', help='number of trajectories to evaluate/gather',
                        type=int, default=10)
    parser.add_argument('--exact_model_path', help='exact path of the model',
                        type=str, default=None)
    parser.add_argument('--model_ckpt_dir', help='checkpoint directory containing the models',
                        type=str, default=None)
    parser.add_argument('--expert_path', help='.npz archive containing the demos',
                        type=str, default=None)
    parser.add_argument('--num_demos', help='number of expert demo trajs for imitation',
                        type=int, default=None)
    parser.add_argument('--g_steps', type=int, default=3)
    parser.add_argument('--d_steps', type=int, default=1)
    parser.add_argument('--d_lr', type=float, default=3e-4)
    boolean_flag(parser, 'non_satur_grad', help='whether to use non-saturating gradients in d')
    boolean_flag(parser, 'rmsify_obs', default=True)
    parser.add_argument('--pol_nums_filters', nargs='+', type=int, default=[8, 16])
    parser.add_argument('--pol_filter_shapes', nargs='+', type=int, default=[8, 4])
    parser.add_argument('--pol_stride_shapes', nargs='+', type=int, default=[4, 2])
    parser.add_argument('--pol_hid_widths', nargs='+', type=int, default=[64, 64])
    parser.add_argument('--d_nums_filters', nargs='+', type=int, default=[8, 16])
    parser.add_argument('--d_filter_shapes', nargs='+', type=int, default=[8, 4])
    parser.add_argument('--d_stride_shapes', nargs='+', type=int, default=[4, 2])
    parser.add_argument('--d_hid_widths', nargs='+', type=int, default=[64, 64])
    parser.add_argument('--hid_nonlin', type=str, default='leaky_relu',
                        choices=['relu', 'leaky_relu', 'prelu', 'elu', 'selu', 'tanh'])
    parser.add_argument('--hid_w_init', type=str, default='he_normal',
                        choices=['he_normal', 'he_uniform', 'xavier_normal', 'xavier_uniform'])
    boolean_flag(parser, 'gaussian_fixed_var', default=True)
    boolean_flag(parser, 'with_layernorm', default=False)
    parser.add_argument('--cg_iters', type=int, default=10,
                        help='number of conjugate gradient iterations')
    parser.add_argument('--cg_damping', type=float, default=0.1, help='conjugate gradient damping')
    parser.add_argument('--vf_iters', type=int, default=10,
                        help='number of iterations for value function adam optimization')
    parser.add_argument('--vf_lr', type=float, default=3e-4,
                        help='value function adam learning rate')
    parser.add_argument('--max_kl', type=float, default=0.01)
    parser.add_argument('--pol_ent_reg_scale', type=float, default=0.,
                        help='scale of the policy entropy term')
    parser.add_argument('--d_ent_reg_scale', type=float, default=0.,
                        help='scale of the dicriminator entropy term')
    boolean_flag(parser, 'label_smoothing', default=True)
    boolean_flag(parser, 'one_sided_label_smoothing', default=True)
    parser.add_argument('--gamma', help='discount factor', type=float, default=0.995)
    parser.add_argument('--gae_lambda', help='gae lamdba parameter', type=float, default=0.97)
    parser.add_argument('--clip_norm', type=float, default=None)
    return parser
Exemplo n.º 16
0
    parser = argparse.ArgumentParser()
    parser.add_argument('--timing', action='store_true')
    parser.add_argument('--cprofile', action='store_true')
    parser.add_argument('--pyinstrument', action='store_true')
    return parser.parse_args()

def make_benchmark_cmd(bench):
    fcn = benchmarks[bench][0]
    args = benchmarks[bench][1]
    args_str = ", ".join([k+"="+str(args[k]) for k in args])
    cmd_str = fcn+"("+args_str+")"
    return cmd_str

import sys
if __name__ == "__main__":
    opts = argparse()
    if opts.timing:
        import timeit
        morse_run(1, 1000)
        for bench in benchmarks:
            cmd = make_benchmark_cmd(bench)
            fcn = benchmarks[bench][0]
            print bench, ":", timeit.timeit(cmd, 
                                            'from __main__ import ' + fcn, 
                                            number=5)
    if opts.cprofile:
        import cProfile
        for bench in benchmarks:
            cProfile.run(make_benchmark_cmd(bench), bench + ".pstats")
    if opts.pyinstrument:
        print "pyinstrument"
Exemplo n.º 17
0
                coeffs.pop()
            if coeffs:
                f.write("    ({}, {}),\n".format(list(index), coeffs))
        f.write(left_output_footer)
        f.write(right_output_header.format(path=path))
        tmp = defaultdict(lambda: [0]*(1+n*(n-1)//2))
        for ((lperm, deg), coeff) in right.iteritems():
            tmp[cycle_type[lperm]][deg] = coeff
        for index, coeffs in sorted(tmp.iteritems()):
            while coeffs and coeffs[-1] == 0:
                coeffs.pop()
            if coeffs:
                f.write("    ({}, {}),\n".format(list(index), coeffs))
        f.write(right_output_footer)

# ---------------------------------------------------------

if __name__ == '__main__':
    doctest()
    setup_logging()
    path = argparse()
    logger.info('starting left computation for path %s', path)
    left = compute_left(path)
    logger.info('starting right computation for path %s', path)
    right = compute_right(path)
    save(path, left, right)
    logger.info('done with path %s', path)

# ---------------------------------------------------------

Exemplo n.º 18
0
def main():

    description = 'Exploit search and download utility'
    if six.PY2:
        parser = argparse.ArgumentParser(description)
        addArgumentCall = parser.add_argument
    else:
        parser = argparse(description)
        addArgumentCall = parser.add_option
    #
    if six.PY2:
        addArgumentCall('query', metavar='query', type=str, nargs='*', help='Exploit search query. See https://vulners.com/help for the detailed manual.')

    # Arguments
    addArgumentCall('-t', '--title', action='store_true',
                        help="Search JUST the exploit title (Default is description and source code).")
    addArgumentCall('-j', '--json', action='store_true',
                        help='Show result in JSON format.')
    addArgumentCall('-m', '--mirror', action='store_true',
                        help='Mirror (aka copies) search result exploit files to the subdirectory with your search query name.')
    addArgumentCall('-c', '--count', nargs=1, type=int, default=10,
                        help='Search limit. Default 10.')
    if LOCAL_SEARCH_AVAILABLE:
        addArgumentCall('-l', '--local', action='store_true',
                        help='Perform search in the local database instead of searching online.')
        addArgumentCall('-u', '--update', action='store_true',
                        help='Update getsploit.db database. Will be downloaded in the script path.')

    if six.PY2:
        options = parser.parse_args()
        searchQuery = " ".join(options.query)
    else:
        options, args = parser.parse_args()
        searchQuery = " ".join(args)

    if isinstance(options.count, list):
        options.count = options.count[0]


    # If it's not a local search, init API connection
    if not options.local:
        if not os.path.exists(KEYFILE):
            print("To use getsploit you need to obtain Vulners API key at https://vulners.com")
            api_key = six.moves.input("Please, enter API key: ")
        else:
            api_key = open(KEYFILE, 'r').readlines()[0].strip()
        try:
            vulners_lib = sploitVulners(api_key=api_key)
        except ValueError as exc:
            if "Wrong Vulners API key" in "%s" % exc and os.path.exists(KEYFILE):
                os.unlink(KEYFILE)
            raise exc

        vulners_lib._Vulners__opener.headers.update({'User-Agent': 'Vulners Getsploit %s' % __version__})

        with open(KEYFILE, 'w') as key_file:
            key_file.write(api_key)

        # Vulners key is OK, save it to the file

    # Update goes first
    if LOCAL_SEARCH_AVAILABLE and options.update:
        vulners_lib.downloadGetsploitDb(os.path.join(DBPATH, "getsplit.db.zip"))
        print("Database download complete. Now you may search exploits using --local key './getsploit -l wordpress 4.7'")
        exit()

    # Check that there is a query
    if not searchQuery:
        print("No search query provided. Type software name and version to find exploit.")
        exit()


    # Select propriate search method for the search. Local/remote
    if LOCAL_SEARCH_AVAILABLE and options.local:
        if not os.path.exists(DBFILE):
            print("There is no local database file near getsploit. Run './getsploit --update'")
            exit()
        finalQuery, searchResults = exploitLocalSearch(searchQuery, lookupFields=['title'] if options.title else None, limit = options.count)
    else:
        finalQuery, searchResults = vulners_lib.searchExploit(searchQuery, lookup_fields=['title'] if options.title else None, limit = options.count)

    outputTable = texttable.Texttable()
    outputTable.set_cols_dtype(['t', 't', 't'])
    outputTable.set_cols_align(['c', 'l', 'c'])
    outputTable.set_cols_width(['20', '30', '100'])
    tableRows = [['ID', 'Exploit Title', 'URL']]
    jsonRows = []
    for bulletin in searchResults:
        bulletinUrl = bulletin.get('vref') or 'https://vulners.com/%s/%s' % (bulletin.get('type'), bulletin.get('id'))
        tableRows.append([bulletin.get('id'), bulletin.get('title'), bulletinUrl])
        if options.json:
            jsonRows.append({'id':bulletin.get('id'), 'title':bulletin.get('title'), 'url':bulletinUrl})
        if options.mirror:
            pathName = './%s' % slugify(searchQuery)
            # Put results it the dir
            if not os.path.exists(pathName):
                os.mkdir(pathName)
            with open("./%s/%s.txt" % (pathName, slugify(bulletin.get('id'))), 'w') as exploitFile:
                exploitData = bulletin.get('sourceData') or bulletin.get('description')
                if not six.PY3:
                    exploitData = exploitData.encode('utf-8').strip()
                exploitFile.write(exploitData)
    if options.json:
        # Json output
        print(json.dumps(jsonRows))
    else:
        # Text output
        print("Total found exploits: %s" % len(searchResults))
        if not six.PY3:
            quoteStringHandler = urllib.quote_plus
        else:
            quoteStringHandler = urllib.parse.quote_plus
        print("Web-search URL: https://vulners.com/search?query=%s" % quoteStringHandler(finalQuery))
        # Set max coll width by len of the url for better copypaste
        maxWidth = max(len(element[2]) for element in tableRows)
        outputTable.set_cols_width([20, 30, maxWidth])
        outputTable.add_rows(tableRows)
        if not six.PY3:
            # Just pass non-ascii
            print(outputTable.draw().encode('ascii', 'ignore'))
        else:
            # Any better solution here?
            print(outputTable.draw().encode('ascii', 'ignore').decode())
Exemplo n.º 19
0
def sam_argparser(description="SAM Experiment"):
    """Create an argparse.ArgumentParser for SAM-related tasks"""
    parser = argparse(description)
    parser.add_argument('--note', help='w/e note', type=str, default=None)
    parser.add_argument('--env_id', help='environment identifier', default='Hopper-v2')
    boolean_flag(parser, 'from_raw_pixels', default=False)
    parser.add_argument('--horizon', help='maximum number of timesteps in an episode',
                        type=int, default=None)
    parser.add_argument('--seed', help='RNG seed', type=int, default=0)
    parser.add_argument('--checkpoint_dir', help='directory to save the models',
                        default=None)
    parser.add_argument('--log_dir', help='directory to save the log files',
                        default='data/logs')
    parser.add_argument('--summary_dir', help='directory to save the summaries',
                        default='data/summaries')
    boolean_flag(parser, 'render', help='whether to render the interaction traces', default=False)
    boolean_flag(parser, 'record', help='whether to record the interaction traces', default=False)
    parser.add_argument('--video_dir', help='directory to save the video recordings',
                        default='data/videos')
    parser.add_argument('--task', help='task to carry out', type=str,
                        choices=['imitate_via_sam',
                                 'evaluate_sam_policy'],
                        default=None)
    parser.add_argument('--save_frequency', help='save model every xx iterations',
                        type=int, default=100)
    boolean_flag(parser, 'preload', help='whether to preload with trained tensors', default=False)
    parser.add_argument('--num_iters', help='cummulative number of iters since launch',
                        type=int, default=int(1e6))
    parser.add_argument('--timesteps_per_batch', help='number of interactions per iteration',
                        type=int, default=16)
    parser.add_argument('--batch_size', help='minibatch size', type=int, default=32)
    parser.add_argument('--window', help='window size for optional d training on recent data',
                        type=int, default=None)
    parser.add_argument('--num_trajs', help='number of trajectories to evaluate/gather',
                        type=int, default=10)
    parser.add_argument('--exact_model_path', help='exact path of the model',
                        type=str, default=None)
    parser.add_argument('--model_ckpt_dir', help='checkpoint directory containing the models',
                        type=str, default=None)
    parser.add_argument('--expert_path', help='.npz archive containing the demos',
                        type=str, default=None)
    parser.add_argument('--num_demos', help='number of expert demo trajs for imitation',
                        type=int, default=None)
    parser.add_argument('--g_steps', type=int, default=3)
    parser.add_argument('--d_steps', type=int, default=1)
    parser.add_argument('--actor_lr', type=float, default=3e-4)
    parser.add_argument('--critic_lr', type=float, default=3e-4)
    parser.add_argument('--d_lr', type=float, default=3e-4)
    boolean_flag(parser, 'non_satur_grad', help='whether to use non-saturating gradients in d')
    parser.add_argument('--actorcritic_nums_filters', nargs='+', type=int, default=[8, 16])
    parser.add_argument('--actorcritic_filter_shapes', nargs='+', type=int, default=[8, 4])
    parser.add_argument('--actorcritic_stride_shapes', nargs='+', type=int, default=[4, 2])
    parser.add_argument('--actorcritic_hid_widths', nargs='+', type=int, default=[32, 32])
    parser.add_argument('--d_nums_filters', nargs='+', type=int, default=[8, 16])
    parser.add_argument('--d_filter_shapes', nargs='+', type=int, default=[8, 4])
    parser.add_argument('--d_stride_shapes', nargs='+', type=int, default=[4, 2])
    parser.add_argument('--d_hid_widths', nargs='+', type=int, default=[32, 32])
    parser.add_argument('--hid_nonlin', type=str, default='leaky_relu',
                        choices=['relu', 'leaky_relu', 'prelu', 'elu', 'selu', 'tanh'])
    parser.add_argument('--hid_w_init', type=str, default='he_normal',
                        choices=['he_normal', 'he_uniform', 'xavier_normal', 'xavier_uniform'])
    parser.add_argument('--ac_branch_in', type=int, default=1)
    boolean_flag(parser, 'with_layernorm', default=False)
    parser.add_argument('--d_ent_reg_scale', type=float, default=0.,
                        help='scale of the dicriminator entropy term')
    boolean_flag(parser, 'label_smoothing', default=True)
    boolean_flag(parser, 'one_sided_label_smoothing', default=True)
    boolean_flag(parser, 'rmsify_rets', default=True)
    boolean_flag(parser, 'enable_popart', default=True)
    boolean_flag(parser, 'rmsify_obs', default=True)
    parser.add_argument('--gamma', help='discount factor', type=float, default=0.995)
    parser.add_argument('--mem_size', type=int, default=int(1e6))
    boolean_flag(parser, 'reset_with_demos', default=False)
    boolean_flag(parser, 'add_demos_to_mem', default=False)
    boolean_flag(parser, 'prioritized_replay', default=False)
    parser.add_argument('--alpha', help='how much prioritized', type=float, default=0.3)
    boolean_flag(parser, 'ranked', default=False)
    boolean_flag(parser, 'unreal', default=False)
    parser.add_argument('--beta', help='importance weights usage', default=1.0, type=float)
    parser.add_argument('--reward_scale', type=float, default=1.)
    parser.add_argument('--clip_norm', type=float, default=None)
    parser.add_argument('--noise_type', help='choices: adaptive-param_xx, normal_xx, ou_xx, none',
                        type=str, default='adaptive-param_0.2, ou_0.1, normal_0.1')
    parser.add_argument('--rew_aug_coeff', type=float, default=0.)
    parser.add_argument('--param_noise_adaption_frequency', type=float, default=50)
    parser.add_argument('--polyak', type=float, default=0.001, help='target networks tracking')
    parser.add_argument('--q_actor_loss_scale', type=float, default=1.)
    parser.add_argument('--d_actor_loss_scale', type=float, default=0.)
    parser.add_argument('--wd_scale', help='critic wd scale', type=float, default=0.001)
    parser.add_argument('--td_loss_1_scale', type=float, default=1.)
    parser.add_argument('--td_loss_n_scale', type=float, default=1.)
    boolean_flag(parser, 'n_step_returns', default=True)
    parser.add_argument('--n', help='number of steps for the TD lookahead', type=int, default=10)
    parser.add_argument('--training_steps_per_iter', type=int, default=50)
    parser.add_argument('--eval_steps_per_iter', type=int, default=100)
    parser.add_argument('--eval_frequency', type=int, default=500)
    return parser
Exemplo n.º 20
0
def main():
    cli_main(argparse())
Exemplo n.º 21
0
    parser.add_argument('--fasta', required=True, help='input Fasta file', type=str)
    parser.add_argument('--minimum', required=True, help='minimum GC content to be included', type=int)
    parser.add_argument('--maximum', required=True, help='maximum GC content to be included', type=int)
    parser.add_argument('--outfile', required=False, help='name of the outfile', type=str)
    parser.add_argument('--debug', required=False, default=False, type=bool)
    parser.add_argument('--version', '-v', action='version', version='%(prog)s 1.0')
    args = parser.parse_args()

    return args

####################################################################
#
# main
#
####################################################################
args = argparse()

fasta = args.fasta
minimum = args.minimum
maximum = args.maximum

# log lists
seq_lt_minimum = []
seq_mt_maximum = []
seq_in_range = []

seq_records_in_range = []
# loop thru fasta
for seq_record in SeqIO.parse(fasta, "fasta"):
    seq = seq_record.seq
    seqId = seq_record.id
Exemplo n.º 22
0
    # sleep so we can see the change in map
    time.sleep(30)

    print "Reaching the targetpoint"


def RTL_Mode():

    print("PreRTL......")
    vehicle.mode = VehicleMode("RTL")

    #Close vehicle object before exiting script


if __name__ == "__main__":
    argparse()
    connection_string1 = connection()

    vehicle = connect(connection_string1, wait_ready=True)
    #        global vehicle
    print "\nConnecting to vehicle on: %s" % connection_string1

    vehicle.wait_ready('autopilot_version')

    v_attributes()
    arm()
    # GUIDED_Mode()
    # RTL_Mode()
    print "Close vehicle object"
    vehicle.close()
Exemplo n.º 23
0
def main():
    description = 'Exploit search and download utility'
    if pythonVersion > 2.6:
        parser = argparse.ArgumentParser(description)
        addArgumentCall = parser.add_argument
    else:
        parser = argparse(description)
        addArgumentCall = parser.add_option
    #
    if pythonVersion > 2.6:
        addArgumentCall(
            'query',
            metavar='query',
            type=str,
            nargs='+',
            help=
            'Exploit search query. See https://vulners.com/help for the detailed manual.'
        )
    # Arguments
    addArgumentCall(
        '-t',
        '--title',
        action='store_true',
        help=
        "Search JUST the exploit title (Default is description and source code)."
    )
    addArgumentCall('-j',
                    '--json',
                    action='store_true',
                    help='Show result in JSON format.')
    addArgumentCall(
        '-m',
        '--mirror',
        action='store_true',
        help=
        'Mirror (aka copies) search result exploit files to the subdirectory with your search query name.'
    )

    if pythonVersion > 2.6:
        options = parser.parse_args()
        searchQuery = " ".join(options.query)
    else:
        options, args = parser.parse_args()
        searchQuery = " ".join(args)

    finalQuery, searchResults = exploitSearch(
        searchQuery, lookupFields=['title'] if options.title else None)
    outputTable = Texttable()
    outputTable.set_cols_dtype(['t', 't', 't'])
    outputTable.set_cols_align(['c', 'l', 'c'])
    outputTable.set_cols_width(['20', '30', '100'])
    tableRows = [['ID', 'Exploit Title', 'URL']]
    jsonRows = []
    for bulletinSource in searchResults.get('search'):
        bulletin = bulletinSource.get('_source')
        bulletinUrl = 'https://vulners.com/%s/%s' % (bulletin.get('type'),
                                                     bulletin.get('id'))
        tableRows.append(
            [bulletin.get('id'),
             bulletin.get('title'), bulletinUrl])
        if options.json:
            jsonRows.append({
                'id': bulletin.get('id'),
                'title': bulletin.get('title'),
                'url': bulletinUrl
            })
        if options.mirror:
            pathName = './%s' % slugify(searchQuery)
            # Put results it the dir
            if not os.path.exists(pathName):
                os.mkdir(pathName)
            with open("./%s/%s.txt" % (pathName, slugify(bulletin.get('id'))),
                      'w') as exploitFile:
                exploitData = bulletin.get('sourceData') or bulletin.get(
                    'description')
                if pythonVersion < 3.0:
                    exploitData = exploitData.encode('utf-8').strip()
                exploitFile.write(exploitData)
    if options.json:
        # Json output
        print(json.dumps(jsonRows))
    else:
        # Text output
        print("Total found exploits: %s" % searchResults.get('total'))
        if pythonVersion < 3:
            quoteStringHandler = urllib.quote_plus
        else:
            quoteStringHandler = urllib.parse.quote_plus
        print("Web-search URL: %s" % 'https://vulners.com/search?query=%s' %
              quoteStringHandler(finalQuery))
        # Set max coll width by len of the url for better copypaste
        maxWidth = max(len(element[2]) for element in tableRows)
        outputTable.set_cols_width([20, 30, maxWidth])
        outputTable.add_rows(tableRows)
        print(outputTable.draw())
Exemplo n.º 24
0
def main():
    cli_main(argparse())