示例#1
0
    Coordinates = namedtuple('Coordinates',
                             'height width start_x start_y finish_x finish_y')
    with open(args.infile) as file:
        try:
            content = list(map(int, file.read().split()))
        except ValueError:
            print('Inputs should be numbers')
    coordinates = Coordinates(*content)
    matrix = [[-1 for j in range(coordinates.width)]
              for i in range(coordinates.height)]
    final_steps_number, matrix = Li_algorithm(matrix, coordinates.start_x,
                                              coordinates.start_y,
                                              coordinates.finish_x,
                                              coordinates.finish_y)
    if final_steps_number != -1:
        print('You should spend at least {} '
              'steps in order to reach {} from {}'.format(
                  final_steps_number, coordinates[4:6], coordinates[2:4]))
    else:
        print('Sorry, {} can\'t be reached from {}'.format(
            coordinates[4:6], coordinates[2:4]))
    with open(args.outfile, 'w') as file:
        file.write(str(final_steps_number))


if __name__ == '__main__':
    start = time.time()
    arguments = parse_arguments()
    main(arguments)
    print('Taken time: {:.5f}'.format(time.time() - start))
示例#2
0
from theano.tensor.shared_randomstreams import RandomStreams
from models.dnn import DNN
from models.dropout_nnet import DNN_Dropout
from io_func.model_io import _nnet2file, _cfg2file, _file2nnet
from utils.utils import parse_arguments
from utils.utils import format_results
from utils.learn_rates import _lrate2file, _file2lrate
from utils.network_config import NetworkConfig
from learning.sgd import train_sgd, validate_by_minibatch
from util_func import log, parse_classes

if __name__ == '__main__':

    # check the arguments
    arg_elements = [sys.argv[i] for i in range(1, len(sys.argv))]
    arguments = parse_arguments(arg_elements)
    required_arguments = ['train_data', 'valid_data', 'nnet_spec', 'wdir']
    for arg in required_arguments:
        if arguments.has_key(arg) == False:
            print "Error: the argument %s has to be specified" % (arg); exit(1)

    # mandatory arguments
    train_data_spec = arguments['train_data']
    valid_data_spec = arguments['valid_data']
    nnet_spec = arguments['nnet_spec']
    wdir = arguments['wdir']
    multi_label = arguments['multi_label']
    if multi_label=="true": multi_label = True
    else:   multi_label = False

    # parse network configuration from arguments, and initialize data reading
示例#3
0
文件: run_RBM.py 项目: magic2du/dlnn
import time

from io_func.model_io import _nnet2file, _cfg2file, _file2nnet, log
from models.dnn import DNN
from models.srbm import SRBM
import theano.tensor as T
from utils.network_config import NetworkConfig
from utils.rbm_config import RBMConfig
from utils.utils import parse_arguments, save_two_integers, read_two_integers


if __name__ == '__main__':

    # check the arguments
    arg_elements = [sys.argv[i] for i in range(1, len(sys.argv))]
    arguments = parse_arguments(arg_elements)
    required_arguments = ['train_data', 'nnet_spec', 'wdir']
    for arg in required_arguments:
        if arguments.has_key(arg) == False:
            print "Error: the argument %s has to be specified" % (arg); exit(1)

    train_data_spec = arguments['train_data']
    nnet_spec = arguments['nnet_spec']
    wdir = arguments['wdir']

    # numpy random generator
    numpy_rng = numpy.random.RandomState(89677)
    theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
    log('> ... initializing the model')

    # parse network configuration from arguments, and initialize data reading
示例#4
0
def main(arg_elements):

    # check the arguments
    arguments = parse_arguments(arg_elements)
    required_arguments = [
        'data', 'nnet_param', 'nnet_cfg', 'output_file', 'layer_index',
        'batch_size'
    ]
    for arg in required_arguments:
        if arguments.has_key(arg) == False:
            print "Error: the argument %s has to be specified" % (arg)
            exit(1)

    # mandatory arguments
    data_spec = arguments['data']
    nnet_param = arguments['nnet_param']
    nnet_cfg = arguments['nnet_cfg']
    output_file = arguments['output_file']
    layer_index = int(arguments['layer_index'])
    batch_size = int(arguments['batch_size'])
    argmax = arguments.has_key('argmax') and string2bool(arguments['argmax'])

    # load network configuration and set up the model
    log('> ... setting up the model and loading parameters')
    numpy_rng = numpy.random.RandomState(89677)
    theano_rng = RandomStreams(numpy_rng.randint(2**30))
    cfg = cPickle.load(smart_open(nnet_cfg, 'r'))
    cfg.init_activation()
    model = None
    if cfg.model_type == 'DNN':
        model = DNN(numpy_rng=numpy_rng, theano_rng=theano_rng, cfg=cfg)
    elif cfg.model_type == 'CNN':
        model = CNN(numpy_rng=numpy_rng,
                    theano_rng=theano_rng,
                    cfg=cfg,
                    testing=True)

    # load model parameters
    _file2nnet(model.layers, filename=nnet_param)

    # initialize data reading
    cfg.init_data_reading_test(data_spec)

    # get the function for feature extraction
    log('> ... getting the feat-extraction function')
    extract_func = model.build_extract_feat_function(layer_index)

    output_mats = [
    ]  # store the features for all the data in memory. TODO: output the features in a streaming mode
    log('> ... generating features from the specified layer')
    while (not cfg.test_sets.is_finish()):  # loop over the data
        cfg.test_sets.load_next_partition(cfg.test_xy)
        batch_num = int(
            math.ceil(1.0 * cfg.test_sets.cur_frame_num / batch_size))

        for batch_index in xrange(batch_num):  # loop over mini-batches
            start_index = batch_index * batch_size
            end_index = min((batch_index + 1) * batch_size,
                            cfg.test_sets.cur_frame_num
                            )  # the residue may be smaller than a mini-batch
            output = extract_func(
                cfg.test_x.get_value()[start_index:end_index])
            output_mats.append(output)

    output_mat = numpy.concatenate(output_mats)
    if argmax:
        output_mat = output_mat.argmax(axis=1)

    # output the feature representations using pickle
    f = smart_open(output_file, 'wb')
    cPickle.dump(output_mat, f, cPickle.HIGHEST_PROTOCOL)

    log('> ... the features are stored in ' + output_file)