Beispiel #1
0
def _set_preproc (indata, prefix):
	pname=util.fix_preproc_name_inconsistency(indata[prefix+'name'])
	args=util.get_args(indata, prefix)

	sg('add_preproc', pname, *args)
	sg('attach_preproc', 'TRAIN')
	sg('attach_preproc', 'TEST')
	def __init__(self, parent, name = None, **args):
		if name == None:
			name = self._make_name()
		if isinstance(parent, Doc):
			# make sure we're unique
			if name in parent.children:
				raise Exception("A box by the name '%s' already exists within children of '%s' parent." % (name, parent.name()))
			self._is_root = False
			parent.children[name] = self
			self.parent = parent
			p_win = parent.win
		else:
			p_win = parent
		my_args = {
			'width': None,
			'height': None,
			'top': 0,
			'left': 0
		}
		my_args = get_args(my_args, args)
		for arg in my_args:
			self._args[arg] = my_args[arg]
		# create our subwin
		win_args = [self._args['top'], self._args['left']]
		if self._args['width'] != None and self._args['height'] != None:
			win_args.insert(0, self._args['height'])
			win_args.insert(0, self._args['width'])
		self.win = p_win.subwin(*win_args)
		# set our dimensions and location
		(self._height, self._width) = self.win.getmaxyx()
		self._top = my_args['top']
		self._left = my_args['left']
		self._name = name
		self.refresh()
Beispiel #3
0
def _get_subkernels (indata, prefix):
	subkernels={}
	prefix=prefix+'subkernel'
	len_prefix=len(prefix)

	# loop through indata (unordered dict) to gather subkernel data
	for key in indata:
		if key.find(prefix)==-1:
			continue

		# get subkernel's number
		try:
			num=key[len_prefix]
		except ValueError:
			raise ValueError, 'Cannot find number for subkernel: "%s"!' % data

		# get item's name
		name=key[len_prefix+2:]

		# append new item
		if not subkernels.has_key(num):
			subkernels[num]={}
		subkernels[num][name]=indata[key]

	# got all necessary information in new structure, now create a kernel
	# object for each subkernel
	for num, data in subkernels.iteritems():
		fun=eval(data['name']+'Kernel')
		args=util.get_args(data, '')
		subkernels[num]['kernel']=fun(*args)

	return subkernels
Beispiel #4
0
def main():
    config = util.get_args()
    config.cuda = not config.no_cuda
    torch.cuda.set_device(config.gpu)
    if config.cuda and torch.cuda.is_available():
        print("Using CUDA on GPU ", config.gpu)
    else:
        print("Not using CUDA.")
    net = partnet_model.PARTNET(config)
    net.load_state_dict(
        torch.load(config.save_path + '/partnet_final.pkl',
                   map_location=lambda storage, loc: storage.cuda(config.gpu)))
    if config.cuda:
        net.cuda()
    net.eval()

    if not os.path.exists(config.output_path + 'segmented'):
        os.makedirs(config.output_path + 'segmented')
    print("Loading data ...... ", end='\n', flush=True)

    shape = torch.from_numpy(loadmat(config.data_path +
                                     'demo.mat')['pc']).float()
    ##for your own new shape
    ##shape = normalize_shape(shape)
    with torch.no_grad():
        shape = shape.cuda()
        points_feature = net.pointnet(shape)
        root_feature = net.pcEncoder(shape)
        global m
        m = 0
        label = decode_structure(net, root_feature, points_feature, shape)

        #segmented results
        writeply(config.output_path + 'segmented/demo.ply', shape, label)
        print('Successfully output result!')
	def __init__(self, parent, **args):
		my_args = {
			'fields': {}
		}
		my_args = get_args(my_args, args)
		Message.__init__(self, parent, **args)
		self._form = Form(self, my_args['fields']);
Beispiel #6
0
def main():
    # get argument
    args = util.get_args()

    # print argument
    util.print_args(args)

    # run te
    mlus = []
    rcs = []

    # at every T step
    for t in range(args.num_test):
        if t % args.T == 0:
            repetita_args = util.get_repetita_args(args, t)
            print('command:', ' '.join(repetita_args))
            stdout = util.call(repetita_args)
            if stdout:
                print('stdout:', stdout)
                mlu, rc = util.parse_result(t, stdout, args)
                if len(mlu) == args.T:
                    mlus.append(mlu)
                if rc is not None:
                    rcs.append(rc)

    util.save(mlus, rcs, args)
Beispiel #7
0
 def __init__(self, client, args = {}):
     self.env(
         'histfile',
         os.path.join(os.path.expanduser('~'), '.om_history')
     )
     self.client = client
     self.args = util.get_args(self.args, args)
Beispiel #8
0
def _get_subkernels(indata, prefix):
    subkernels = {}
    prefix = prefix + 'subkernel'
    len_prefix = len(prefix)

    # loop through indata (unordered dict) to gather subkernel data
    for key in indata:
        if key.find(prefix) == -1:
            continue

        # get subkernel's number
        try:
            num = key[len_prefix]
        except ValueError:
            raise ValueError, 'Cannot find number for subkernel: "%s"!' % data

        # get item's name
        name = key[len_prefix + 2:]

        # append new item
        if not subkernels.has_key(num):
            subkernels[num] = {}
        subkernels[num][name] = indata[key]

    # got all necessary information in new structure, now create a kernel
    # object for each subkernel
    for num, data in subkernels.iteritems():
        fun = eval(data['name'] + 'Kernel')
        args = util.get_args(data, '')
        subkernels[num]['kernel'] = fun(*args)

    return subkernels
Beispiel #9
0
 def __init__(self):
     self.maxAcc = 0.0
     self.config = util.get_args()
     self.config.lr_decay = self.config.lr_decay * (1400 //
                                                    self.config.batch_size)
     self.config.lr_decay_begin = self.config.lr_decay_begin * (
         1400 // self.config.batch_size)
     self.config.maxSteps = self.config.epochs * 1400 // self.config.batch_size + 1
     print(self.config.maxSteps, " max steps")
     self.texti = loadData.TextIterator(self.config)
     self.config.text_vocab_size = len(self.texti.word2id)
     embed_weight = np.load("../wordEmb/vector_" +
                            self.config.wordemb_suffix + ".npy")
     self.model = NNManager.Model(self.config, self.config.model_name)
     self.model.emb.emb.weight.data.copy_(torch.from_numpy(embed_weight))
     if self.config.pretrain == 1:
         self.model.load_state_dict(
             torch.load(self.config.pretrain_path, map_location='cpu'))
     self.model.cuda()
     self.optimizer = torch.optim.Adam(
         self.model.parameters(),
         lr=self.config.learning_rate,
         weight_decay=self.config.weight_decay)
     self.lossfunc = torch.nn.CrossEntropyLoss().cuda()
     self.start = time.time()
Beispiel #10
0
def main():
    args = get_args()
    torch.backends.cudnn.enabled = False
    cudnn.benchmark = False
    torch.multiprocessing.set_sharing_strategy('file_system')

    train_loader = torch.utils.data.DataLoader(TrainDataset(
        args=args,
        transform=transforms.Compose([
            transforms.CenterCrop((224, 224)),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor()
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=0)

    video_val_loader = torch.utils.data.DataLoader(VideoDataset(
        args=args,
        transform=transforms.Compose([
            transforms.CenterCrop((224, 224)),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor()
        ]),
        test_mode=True),
                                                   batch_size=args.batch_size,
                                                   shuffle=False,
                                                   num_workers=0)

    print("start training")
    for epoch in range(args.epochs):
        train(train_loader, video_val_loader, args)
Beispiel #11
0
def run(args):
    (options, args) = util.get_args(args, OPTIONS, prog = 'index')
    archive = Archive()
    articles = map(archive.add, Slicer.sort(Slicer.objects.all(), desc = True))
    print "All articles:", articles
    build_main(options.dest, archive, options)
    print "Indexed articles:", map(lambda a: build_index(options.dest, a, archive, options),
                                   articles)
Beispiel #12
0
def _evaluate(indata):
    prefix = "kernel_"
    feats = util.get_features(indata, prefix)
    kfun = eval(indata[prefix + "name"] + "Kernel")
    kargs = util.get_args(indata, prefix)

    prefix = "preproc_"
    pargs = util.get_args(indata, prefix)
    feats = util.add_preproc(indata[prefix + "name"], feats, *pargs)

    prefix = "kernel_"
    kernel = kfun(feats["train"], feats["train"], *kargs)
    km_train = max(abs(indata[prefix + "matrix_train"] - kernel.get_kernel_matrix()).flat)
    kernel.init(feats["train"], feats["test"])
    km_test = max(abs(indata[prefix + "matrix_test"] - kernel.get_kernel_matrix()).flat)

    return util.check_accuracy(indata[prefix + "accuracy"], km_train=km_train, km_test=km_test)
	def add_field(self, name, **args):
		my_args = {
			'refresh': True,
			'type': None,
			'caption': '',
			'args': {}
		}
		my_args = get_args(my_args, args)
		if name in self._fields:
			raise Exception("Form %s already contains a field by the name %s." % (self._name, name))
		if my_args['type'] == None:
			raise Exception("Unable to add a field without specifying the class type.");
		my_args = get_args(my_args, args)
		metrics = self.get_metrics()
		top = self._inner_top = len(self._fields)
		field = my_args['type'](self, name = name, top = top, **my_args['args'])
		self._fields[name] = field
Beispiel #14
0
def main4():

    args = get_args()
    if args.data == "nyt":
        vocab_file = "/home/ml/lyu40/PycharmProjects/data/nyt/lda_domains/preprocessed/vocab_100d.p"
        with open(vocab_file, "rb") as f:
            vocab = pickle.load(f, encoding='latin1')
    else:
        vocab_file = '/home/ml/ydong26/data/CNNDM/CNN_DM_pickle_data/vocab_100d.p'
        with open(vocab_file, "rb") as f:
            vocab = pickle.load(f, encoding='latin1')
    config = Config(
        vocab_size=vocab.embedding.shape[0],
        embedding_dim=vocab.embedding.shape[1],
        category_size=args.category_size,
        category_dim=50,
        word_input_size=100,
        sent_input_size=2 * args.hidden,
        word_GRU_hidden_units=args.hidden,
        sent_GRU_hidden_units=args.hidden,
        pretrained_embedding=vocab.embedding,
        word2id=vocab.w2i,
        id2word=vocab.i2w,
    )
    doc = Document(content=[[
        'to', 'the', 'editor', 're', 'for', 'women', 'worried', 'about',
        'fertility', 'egg', 'bank', 'is', 'a', 'new', 'option', 'sept', '00',
        'imagine', 'my', 'joy', 'in', 'reading', 'the', 'morning',
        'newspapers', 'on', 'the', 'day', 'of', 'my', '00th', 'birthday',
        'and', 'finding', 'not', 'one', 'but', 'two', 'articles', 'on', 'how',
        'women', 's', 'fertility', 'drops', 'off', 'precipitously', 'after',
        'age', '00'
    ], [
        'one', 'in', 'the', 'times', 'and', 'one', 'in', 'another', 'newspaper'
    ], ['i', 'sense', 'a', 'conspiracy', 'here'],
                            [
                                'have', 'you', 'been', 'talking', 'to', 'my',
                                'mother', 'in', 'law'
                            ], ['laura', 'heymann', 'washington']],
                   summary=[[
                       'laura', 'heymann', 'letter', 'on', 'sept', '00',
                       'article', 'about', 'using', 'egg', 'bank', 'to',
                       'prolong', 'fertility', 'expresses', 'ironic', 'humor',
                       'about', 'her', 'age', 'and', 'chances', 'of',
                       'becoming', 'pregnant'
                   ]],
                   label=[0.01] * 100,
                   label_idx=[0.01] * 100)
    extract_net = model_all.FullyShare(config)
    label_idx = torch.tensor([2], dtype=torch.float, device='cuda:0').cuda()

    x = prepare_data(doc, vocab.w2i)
    sents = Variable(torch.from_numpy(x)).cuda()

    if label_idx.dim() == 2:
        outputs = extract_net(sents, label_idx[0])
    else:
        outputs = extract_net(sents, label_idx)
Beispiel #15
0
def _evaluate(indata):
    prefix = 'kernel_'
    feats = util.get_features(indata, prefix)
    kargs = util.get_args(indata, prefix)
    fun = eval(indata[prefix + 'name'] + 'Kernel')
    kernel = fun(feats['train'], feats['train'], *kargs)

    prefix = 'regression_'
    kernel.parallel.set_num_threads(indata[prefix + 'num_threads'])

    try:
        name = indata[prefix + 'name']
        if (name == 'KERNELRIDGEREGRESSION'):
            name = 'KernelRidgeRegression'

        rfun = eval(name)
    except NameError as e:
        print("%s is disabled/unavailable!" % indata[prefix + 'name'])
        return False

    labels = RegressionLabels(double(indata[prefix + 'labels']))
    if indata[prefix + 'type'] == 'svm':
        regression = rfun(indata[prefix + 'C'], indata[prefix + 'epsilon'],
                          kernel, labels)
    elif indata[prefix + 'type'] == 'kernelmachine':
        regression = rfun(indata[prefix + 'tau'], kernel, labels)
    else:
        return False

    regression.parallel.set_num_threads(indata[prefix + 'num_threads'])
    if prefix + 'tube_epsilon' in indata:
        regression.set_tube_epsilon(indata[prefix + 'tube_epsilon'])

    regression.train()

    alphas = 0
    bias = 0
    sv = 0
    if prefix + 'bias' in indata:
        bias = abs(regression.get_bias() - indata[prefix + 'bias'])
    if prefix + 'alphas' in indata:
        for item in regression.get_alphas().tolist():
            alphas += item
        alphas = abs(alphas - indata[prefix + 'alphas'])
    if prefix + 'support_vectors' in indata:
        for item in inregression.get_support_vectors().tolist():
            sv += item
        sv = abs(sv - indata[prefix + 'support_vectors'])

    kernel.init(feats['train'], feats['test'])
    classified = max(
        abs(regression.apply().get_labels() - indata[prefix + 'classified']))

    return util.check_accuracy(indata[prefix + 'accuracy'],
                               alphas=alphas,
                               bias=bias,
                               support_vectors=sv,
                               classified=classified)
Beispiel #16
0
def run(args):
    '''
    List available blogs
    '''
    (options, args) = util.get_args(args, OPTIONS, prog = 'list')
    slices = Slicer.slices.all()
    if not len(slices): return
    for article in slices:
        print article, format_extra_data(article, options)
Beispiel #17
0
def main():
    args = get_args()
    log_file = "/home/ml/lyu40/PycharmProjects/E_Yue/log/" + args.data + "/100/" + args.ext_model + ".tr.log"
    scores = []
    with open(log_file, "r") as f:
        for l in f.readlines()[-10000:]:
            scores.append(float(l.split(' ')[-1].rstrip('/n')))
    print("The average rouge score for the recent 10000 examples: ",
          sum(scores) / len(scores))
Beispiel #18
0
def main():
    der_arg, input_file, category = get_args("send_jobs.py")
    print "python ../bin/send_jobs.py {0} {1} {2}".format(
        input_file, category, der_arg)
    input_file = "{0}/{1}".format(config.input_dir,
                                  os.path.basename(input_file))
    pieces = split_input_file(input_file, config.pieces)
    send_jobs(input_file, pieces, der_arg, category)
    print "Program exit!"
Beispiel #19
0
def application(environ, start_response):
    args = util.get_args(environ)
    if route(environ, 'socket.io'):
        return socketio_manage(environ, { '/control': ControlNamespace })
    elif route(environ, 'presentation'):
        return presentation(start_response, args)
    elif route(environ, 'controls'):
        return controls(start_response, args)
    else:
        return util.serve_file(environ, start_response)
Beispiel #20
0
def _evaluate (indata):
	prefix='kernel_'
	feats=util.get_features(indata, prefix)
	kargs=util.get_args(indata, prefix)
	fun=eval(indata[prefix+'name']+'Kernel')
	kernel=fun(feats['train'], feats['train'], *kargs)

	prefix='regression_'
	kernel.parallel.set_num_threads(indata[prefix+'num_threads'])

	try:
		name = indata[prefix+'name']
		if (name=='KERNELRIDGEREGRESSION'):
			name = 'KernelRidgeRegression'

		rfun=eval(name)
	except NameError as e:
		print("%s is disabled/unavailable!"%indata[prefix+'name'])
		return False

	labels=RegressionLabels(double(indata[prefix+'labels']))
	if indata[prefix+'type']=='svm':
		regression=rfun(
			indata[prefix+'C'], indata[prefix+'epsilon'], kernel, labels)
	elif indata[prefix+'type']=='kernelmachine':
		regression=rfun(indata[prefix+'tau'], kernel, labels)
	else:
		return False

	regression.parallel.set_num_threads(indata[prefix+'num_threads'])
	if prefix+'tube_epsilon' in indata:
		regression.set_tube_epsilon(indata[prefix+'tube_epsilon'])

	regression.train()

	alphas=0
	bias=0
	sv=0
	if prefix+'bias' in indata:
		bias=abs(regression.get_bias()-indata[prefix+'bias'])
	if prefix+'alphas' in indata:
		for item in regression.get_alphas().tolist():
			alphas+=item
		alphas=abs(alphas-indata[prefix+'alphas'])
	if prefix+'support_vectors' in indata:
		for item in inregression.get_support_vectors().tolist():
			sv+=item
		sv=abs(sv-indata[prefix+'support_vectors'])

	kernel.init(feats['train'], feats['test'])
	classified=max(abs(
		regression.apply().get_labels()-indata[prefix+'classified']))

	return util.check_accuracy(indata[prefix+'accuracy'], alphas=alphas,
		bias=bias, support_vectors=sv, classified=classified)
Beispiel #21
0
def test():
    ret = ''
    ret += 'get_args() = ' + str(util.get_args()) + '\n'
    ret += '\n'
    ret += 'get_args_len() = ' + str(util.get_args_len()) + '\n'
    ret += '\n'
    ret += 'get_arg(0) = ' + util.get_arg(0) + '\n'
    ret += 'get_arg(1) = ' + util.get_arg(1) + '\n'
    ret += 'get_arg(2) = ' + util.get_arg(2) + '\n'
    ret += 'get_arg(3, \'-\') = ' + util.get_arg(3, '-') + '\n'
    return ret
Beispiel #22
0
	def __init__(self, message, data = None, **args):
		my_args = {
			'email': None, # who to e-mail an error report to
			'email_bcc': None # who to bcc error report to
		}
		args = get_args(my_args, args);
		self.message = message;
		self.data = data;
		self._args = args;
		if my_args['email'] != None:
			self.send_report(my_args['email'], my_args['email_bcc'])
def _evaluate (indata):
	prefix='kernel_'
	feats=util.get_features(indata, prefix)
	kfun=eval(indata[prefix+'name']+'Kernel')
	kargs=util.get_args(indata, prefix)

	prefix='preprocessor_'
	pargs=util.get_args(indata, prefix)
	feats=util.add_preprocessor(indata[prefix+'name'], feats, *pargs)

	prefix='kernel_'
	kernel=kfun(feats['train'], feats['train'], *kargs)
	km_train=max(abs(
		indata[prefix+'matrix_train']-kernel.get_kernel_matrix()).flat)
	kernel.init(feats['train'], feats['test'])
	km_test=max(abs(
		indata[prefix+'matrix_test']-kernel.get_kernel_matrix()).flat)

	return util.check_accuracy(
		indata[prefix+'accuracy'], km_train=km_train, km_test=km_test)
Beispiel #24
0
def _evaluate(indata):
    prefix = 'kernel_'
    feats = util.get_features(indata, prefix)
    kfun = eval(indata[prefix + 'name'] + 'Kernel')
    kargs = util.get_args(indata, prefix)

    prefix = 'preproc_'
    pargs = util.get_args(indata, prefix)
    feats = util.add_preproc(indata[prefix + 'name'], feats, *pargs)

    prefix = 'kernel_'
    kernel = kfun(feats['train'], feats['train'], *kargs)
    km_train = max(
        abs(indata[prefix + 'matrix_train'] - kernel.get_kernel_matrix()).flat)
    kernel.init(feats['train'], feats['test'])
    km_test = max(
        abs(indata[prefix + 'matrix_test'] - kernel.get_kernel_matrix()).flat)

    return util.check_accuracy(indata[prefix + 'accuracy'],
                               km_train=km_train,
                               km_test=km_test)
Beispiel #25
0
def main():
    # get argument
    args = util.get_args()

    # print argument
    util.print_args(args)

    # at every T + 1 step
    t = 1995
    repetita_args = util.get_repetita_args(args, t)
    print('command:', ' '.join(repetita_args))
    stdout = util.call(repetita_args)
    print('stdout:', stdout)
Beispiel #26
0
 def run(self, method, api, params = {}, args = {}):
     args = util.get_args(self.args, args, True)
     api = self._get_uri(api)
     #if args['verbose']:
         #sys.stderr.write('+ API=%s, PARAMS=%s, OPTIONS=%s\n' % (api, self.client.encode(params), self.client.encode(args)))
     retval = {}
     # check to see if we need to parse params (e.g. got them from CLI)
     if args['parse_params']:
         api_params = {};
         for param in params:
             api_params = self.parse_param(param, api_params);
         params = api_params
     try: 
         get = None
         if method is None:
             if params:
                 method = 'POST'
             else:
                 method = 'GET'
         if 'GET' in args:
             if isinstance(args['GET'], basestring):
                 get = args['GET']
             else:
                 get = '&'.join(args['GET'])
         if method.upper() == 'EXEC':
             response = self.client.run(
                 api,
                 params,
                 args['raw_response'],
                 args['full_response'],
                 get,
                 '&'.join(args['POST']),
                 args['FILES']
             )
         else:
             response = self.client.request(
                 method,
                 api,
                 params,
                 args['raw_response'],
                 args['full_response'],
                 get,
                 args['headers'],
                 args['verbose'],
                 args['raw_noformat']
             )
         result = True
         error = None
     except Exception, e:
         result = False
         response = e.message
Beispiel #27
0
def _get_machine (indata, prefix, feats):
	if indata[prefix+'type']=='kernel':
		pre='kernel_'
		kargs=util.get_args(indata, pre)
		kfun=eval(indata[pre+'name']+'Kernel')
		machine=kfun(feats['train'], feats['train'], *kargs)

		if indata[pre+'name']=='Linear':
			normalizer=eval(indata[pre+'normalizer']+'()')
			machine.set_normalizer(normalizer)
			machine.init(feats['train'], feats['train'])

		machine.parallel.set_num_threads(indata[prefix+'num_threads'])
	elif indata[prefix+'type']=='knn':
		pre='distance_'
		dargs=util.get_args(indata, pre)
		dfun=eval(indata[pre+'name'])
		machine=dfun(feats['train'], feats['train'], *dargs)
		machine.parallel.set_num_threads(indata[prefix+'num_threads'])
	else:
		machine=None

	return machine
Beispiel #28
0
def _get_machine(indata, prefix, feats):
    if indata[prefix + 'type'] == 'kernel':
        pre = 'kernel_'
        kargs = util.get_args(indata, pre)
        kfun = eval(indata[pre + 'name'] + 'Kernel')
        machine = kfun(feats['train'], feats['train'], *kargs)

        if indata[pre + 'name'] == 'Linear':
            normalizer = eval(indata[pre + 'normalizer'] + '()')
            machine.set_normalizer(normalizer)
            machine.init(feats['train'], feats['train'])

        machine.parallel.set_num_threads(indata[prefix + 'num_threads'])
    elif indata[prefix + 'type'] == 'knn':
        pre = 'distance_'
        dargs = util.get_args(indata, pre)
        dfun = eval(indata[pre + 'name'])
        machine = dfun(feats['train'], feats['train'], *dargs)
        machine.parallel.set_num_threads(indata[prefix + 'num_threads'])
    else:
        machine = None

    return machine
Beispiel #29
0
def _get_machine(indata, prefix, feats):
    if indata[prefix + "type"] == "kernel":
        pre = "kernel_"
        kargs = util.get_args(indata, pre)
        kfun = eval(indata[pre + "name"] + "Kernel")
        machine = kfun(feats["train"], feats["train"], *kargs)

        if indata[pre + "name"] == "Linear":
            normalizer = eval(indata[pre + "normalizer"] + "()")
            machine.set_normalizer(normalizer)
            machine.init(feats["train"], feats["train"])

        machine.parallel.set_num_threads(indata[prefix + "num_threads"])
    elif indata[prefix + "type"] == "knn":
        pre = "distance_"
        dargs = util.get_args(indata, pre)
        dfun = eval(indata[pre + "name"])
        machine = dfun(feats["train"], feats["train"], *dargs)
        machine.parallel.set_num_threads(indata[prefix + "num_threads"])
    else:
        machine = None

    return machine
Beispiel #30
0
def main():
    ts = time.time()
    der_arg, input_file, category = get_args("run_job_each_host.py")
    print "python ../bin/run_job_each_host.py {0} {1} {2}".format(
        input_file, category, der_arg)
    input_file = "{0}/{1}".format(config.input_dir,
                                  os.path.basename(input_file))
    scp_files_into_child(input_file)
    pieces = split_input_file(input_file, config.pieces_in_each_host)
    print "Pieces {0}".format(pieces)
    run_jobs(input_file, pieces, der_arg, category)
    ts2 = time.time()
    print "Took {0} seconds!".format(ts2 - ts)
    print "Program exit!"
Beispiel #31
0
def dm_analysis(dm_model_path, docs):
    try:
        embeddings = pickle.load(open("analyze_embeddings.p", "rb"))
    except FileNotFoundError:
        args = get_args()
        with open(args.vocab_file, "rb") as f:
            vocab = pickle.load(f, encoding='latin1')
        config = Config(
            vocab_size=vocab.embedding.shape[0],
            embedding_dim=vocab.embedding.shape[1],
            category_size=args.category_size,
            category_dim=50,
            word_input_size=100,
            sent_input_size=2 * args.hidden,
            word_GRU_hidden_units=args.hidden,
            sent_GRU_hidden_units=args.hidden,
            pretrained_embedding=vocab.embedding,
            word2id=vocab.w2i,
            id2word=vocab.i2w,
        )
        dm_model = DomainModel(config)
        dm_model_dict = torch.load(dm_model_path)['state_dict']
        dm_model.load_state_dict(dm_model_dict)

        dm_enc_analyzer = Dm_Enc_Analyzer(dm_model.encoder_list)
        dm_dec_analyzer = Dm_Dec_Analyzer(dm_model.decoder_list)

        # evaluate example articles
        # each doc is a Doc object
        embeddings = []
        probs = []
        for doc in docs:
            try:
                print(doc.content)
                x = prepare_data(doc, vocab.w2i)
                sents = Variable(torch.from_numpy(x))
                label_idx = Variable(
                    torch.from_numpy(np.array([doc.label_idx])))
                embedding = dm_enc_analyzer(sents, label_idx)
                embeddings.append(embedding)

                prob = dm_dec_analyzer(embedding)
                probs.append(prob)
            except:
                print("problem in doing evaluation, skip this doc")
                pass

        pickle.dump(embeddings, open("analyze_embeddings.p", "wb"))
        print(probs)
Beispiel #32
0
def _evaluate(indata):
    prefix = 'kernel_'
    feats = util.get_features(indata, prefix)
    kargs = util.get_args(indata, prefix)
    fun = eval(indata[prefix + 'name'] + 'Kernel')
    kernel = fun(feats['train'], feats['train'], *kargs)

    prefix = 'regression_'
    kernel.parallel.set_num_threads(indata[prefix + 'num_threads'])

    try:
        rfun = eval(indata[prefix + 'name'])
    except NameError, e:
        print "%s is disabled/unavailable!" % indata[prefix + 'name']
        return False
	def __init__(self, parent, **args):
		my_args = {
			'title': self._title,
			'title_attr': self._title_attr,
			'title_align': self._title_align,
			'msg': self._msg,
			'msg_attr': curses.color_pair(0)
		}
		my_args = get_args(my_args, args)
		self._title = my_args['title']
		self._title_attr = my_args['title_attr']
		self._title_align = my_args['title_align']
		self._msg = my_args['msg']
		self._msg_attr = my_args['msg_attr']
		Box.__init__(self, parent, **args)
Beispiel #34
0
def _evaluate (indata):
	prefix='kernel_'
	feats=util.get_features(indata, prefix)
	kargs=util.get_args(indata, prefix)
	fun=eval(indata[prefix+'name']+'Kernel')
	kernel=fun(feats['train'], feats['train'], *kargs)

	prefix='regression_'
	kernel.parallel.set_num_threads(indata[prefix+'num_threads'])

	try:
		rfun=eval(indata[prefix+'name'])
	except NameError, e:
		print "%s is disabled/unavailable!"%indata[prefix+'name']
		return False
Beispiel #35
0
def main():
    # num shows dataset size
    # drawpca draws pca results for datasets
    # umap draws scatter plot of umap projection
    # penscatter draws scatter plot on pendulum dataset, we draw scatter plot of x0
    args = util.get_args('num', 'drawpca', 'umap', 'penscatter')
    cfg, lbl = util.get_label_cfg_by_args(args)
    if args.num:
        show_dataset_size(cfg)
    if args.drawpca:
        drawPCA(cfg)
    if args.umap:
        drawUMap(cfg)
    if args.penscatter:
        drawPenScatter(cfg)
Beispiel #36
0
def _evaluate(indata):
    prefix = "kernel_"
    feats = util.get_features(indata, prefix)
    kargs = util.get_args(indata, prefix)
    fun = eval(indata[prefix + "name"] + "Kernel")
    kernel = fun(feats["train"], feats["train"], *kargs)

    prefix = "regression_"
    kernel.parallel.set_num_threads(indata[prefix + "num_threads"])

    try:
        rfun = eval(indata[prefix + "name"])
    except NameError, e:
        print "%s is disabled/unavailable!" % indata[prefix + "name"]
        return False
Beispiel #37
0
def main():
    # run trains the model
    # eval evaluate the model and dump into some file
    # prob evaluate probability to find assignment
    # k is the number of experts
    # valid changes model evaluation on the validation set
    # label evaluates label
    args = util.get_args('run', 'eval', 'prob', 'k5', 'valid', 'label')
    if args.run:
        run_the_training(args)
    if args.eval:
        eval_model(args)
    if args.valid:
        eval_model_on_valid(args)
    if args.label:
        eval_final_label(args)
Beispiel #38
0
def _evaluate (indata):
	prefix='distance_'
	feats=util.get_features(indata, prefix)

	dfun=eval(indata[prefix+'name'])
	dargs=util.get_args(indata, prefix)
	distance=dfun(feats['train'], feats['train'], *dargs)

	dm_train=max(abs(
		indata[prefix+'matrix_train']-distance.get_distance_matrix()).flat)
	distance.init(feats['train'], feats['test'])
	dm_test=max(abs(
		indata[prefix+'matrix_test']-distance.get_distance_matrix()).flat)

	return util.check_accuracy(
		indata[prefix+'accuracy'], dm_train=dm_train, dm_test=dm_test)
def main():
    # test is dummy arguments
    # run trains the model
    # eval evaluate the model
    # prob evaluate probability to find assignment
    # valid means we evaluate model on the validation set
    # label means we get prediction of label
    args = util.get_args('test', 'run', 'eval', 'prob', 'valid', 'label')
    if args.run:
        run_the_training(args)
    if args.eval:
        eval_model(args)
    if args.valid:
        eval_model_on_valid(args)
    if args.label:
        eval_final_label(args)
Beispiel #40
0
def run(args):
    (options, args) = util.get_args(args, OPTIONS, prog = 'copy-dep', options_func = lambda options:\
                                        (len(options.what) > 1 and (options.what[0] == 'all') and (options.what.remove('all') or options))\
                                             or options)
    if 'all' in options.what:
        print "-> Copying all"
        copy_all()
    elif 'all' not in options.what:
        if 'media' in options.what:
            print "-> Copying media"
            copy_media()
        if 'linked' in options.what:
            print "-> Copying linked"
            sys.stderr.write("WARNING: Linked copy doesn't exist.")
            pass #Copy linked
    print options, args
Beispiel #41
0
def main():
    epsilon, discount, alpha, iterations, selfPlay, readValues = get_args(
        sys.argv)
    if selfPlay == True:
        agent1 = Agent(1, epsilon, discount, alpha)
        agent2 = Agent(-1, epsilon, discount, alpha)
        print(
            "Beginning self play. Corresponding state values will be stored in agent1_values.txt and agent2_values.txt"
        )
        for i in range(iterations):
            print("Iteration %d..." % (i))
            self_play(agent1, agent2)

        agent1.write_qvalues('agent1_values.txt')
        agent2.write_qvalues('agent2_values.txt')

    elif readValues == True:
        token = 0
        ai = 0
        while (True):
            token = input("What piece would you like to be (X/O)")
            if token == "X" or token == "O":
                break
        if token == "X":
            token = 1
            ai = Agent(token * -1,
                       epsilon=.2,
                       discount=.7,
                       alpha=.7,
                       readValues=True,
                       file="./agent2_values.txt")
        else:
            token = -1
            ai = Agent(token * -1,
                       epsilon=.2,
                       discount=.7,
                       alpha=.7,
                       readValues=True,
                       file="./agent1_values.txt")
        human = Human(token)
        if token == 1:
            # human is X
            play_human_vs_ai(human, ai, token)
            ai.write_qvalues("agent2_values.txt")
        else:
            play_human_vs_ai(ai, human, token)
            ai.write_qvalues("agent1_values.txt")
Beispiel #42
0
def _evaluate (indata, prefix):
	feats=util.get_features(indata, prefix)
	kfun=eval(indata[prefix+'name']+'Kernel')
	kargs=util.get_args(indata, prefix)
	kernel=kfun(*kargs)
	if indata.has_key(prefix+'normalizer'):
		kernel.set_normalizer(eval(indata[prefix+'normalizer']+'()'))

	kernel.init(feats['train'], feats['train'])
	km_train=max(abs(
		indata[prefix+'matrix_train']-kernel.get_kernel_matrix()).flat)
	kernel.init(feats['train'], feats['test'])
	km_test=max(abs(
		indata[prefix+'matrix_test']-kernel.get_kernel_matrix()).flat)

	return util.check_accuracy(
		indata[prefix+'accuracy'], km_train=km_train, km_test=km_test)
Beispiel #43
0
def run_1model(eval_path):

    outfpath = sys.argv[3] if len(sys.argv) == 4 else None

    sys.argv = sys.argv[:2]
    args = util.get_args()
    config = util.initialize_from_env(args.experiment, args.logdir)
    config['eval_path'] = eval_path

    model = cm.CorefModel(config, eval_mode=True)
    with tf.Session() as session:
        model.restore(session, args.latest_checkpoint)
        model.evaluate(session,
                       official_stdout=True,
                       pprint=False,
                       test=True,
                       outfpath=outfpath)
def main():
    # kmean means we directly run kmeans on solution space with scaling
    # pcakmean means we run kmeans after running pca
    # speuclid means we do spectral clustering with euclidean distance (might be sparse)
    # spdydx means we do spectral clustering with dy/dx as distance
    # spvio means spectral clustering with constraint violation as distance
    # pen means we handle the pendulum dataset
    # car means we deal with the vehicle dataset
    # drone means we handle the drone obstacle problem
    # dtwo means drone with two obstacles
    # append means we append new k to existing data
    # neighbor is an integer of number of neighbors used for constructing sparse graph
    args = util.get_args('append', 'neighbor10')
    # args = getArgs('kmean', 'pcakmean', 'speuclid', 'spdydx', 'spvio', 'pen', 'car', 'drone', 'dtwo', 'append', 'neighbor10')
    if args.pen:
        cfg = datanames.PENDULUM
        k = [3, 5, 10, 20]
        target_dir = 'data/pen'
    elif args.car:
        cfg = datanames.VEHICLE
        k = [5, 10, 20, 40]
        target_dir = 'data/car'
    elif args.drone:
        cfg = datanames.DRONE
        k = [5, 10, 20, 40, 80, 160]
        target_dir = 'data/drone'
    elif args.dtwo:
        cfg = datanames.DRONETWO
        k = [5, 10, 20, 40, 80]
        target_dir = 'data/dronetwo'
    elif args.done:
        cfg = datanames.DRONEONE
        k = [5, 10, 20, 40, 80]
        target_dir = 'data/droneone'
    else:
        print('You must choose from existing datasets')
        raise SystemExit
    if args.kmean:
        generate_kmean_label(cfg, k, target_dir, args.append)
    if args.pcakmean:
        generate_pca_kmean_label(cfg, k, target_dir, args.append)
    try:
        if args.speuclid or args.spdydx or args.spvio:
            generate_spectral_label(cfg, k, target_dir, args.neighbor, args)
    except:
        pass
Beispiel #45
0
def main():
    args = util.get_args()
    INPUT_FILE = args.file or 'data.xlsx'
    DB = args.db or 'temp'
    TABLE = args.table or 'temp'

    # Connection params
    file = os.path.join(os.environ['USERPROFILE'], 'Desktop', 'Scripts',
                        INPUT_FILE)
    user = os.environ['PGUSER']
    password = os.environ['PGPASSWORD']

    # dump
    df = pd.read_excel(file)
    engine = create_engine(
        f'postgresql://{user}:{password}@localhost:5432/{DB}')
    df.to_sql(TABLE, engine, if_exists='replace')
	def __init__(self, parent, **args):
		my_args = {
			'caption': self._caption,
			'caption_attr': self._caption_attr,
			'default_val': self._default_val,
			'style': 'simple'
		}
		if not isinstance(parent, Form):
			raise Exception("Fields may only be added to forms.")
		my_args = get_args(my_args, args)
		Box.__init__(self, parent, **args)
		self._caption = my_args['caption']
		self._caption_attr = my_args['caption_attr']
		self._default_val = my_args['default_val']
		self._val = my_args['default_val']
		if not my_args['style'] in self.STYLES:
			raise Exception("Unrecognized style %s for field %s." % (my_args['style'], self._name))
		self._style = my_args['style']
Beispiel #47
0
def _evaluate(indata, prefix):
    feats = util.get_features(indata, prefix)
    kfun = eval(indata[prefix + 'name'] + 'Kernel')
    kargs = util.get_args(indata, prefix)
    kernel = kfun(*kargs)
    if indata.has_key(prefix + 'normalizer'):
        kernel.set_normalizer(eval(indata[prefix + 'normalizer'] + '()'))

    kernel.init(feats['train'], feats['train'])
    km_train = max(
        abs(indata[prefix + 'matrix_train'] - kernel.get_kernel_matrix()).flat)
    kernel.init(feats['train'], feats['test'])
    km_test = max(
        abs(indata[prefix + 'matrix_test'] - kernel.get_kernel_matrix()).flat)

    return util.check_accuracy(indata[prefix + 'accuracy'],
                               km_train=km_train,
                               km_test=km_test)
Beispiel #48
0
def main():
    # get parameters
    args = util.get_args()

    # set random seed
    np.random.seed(args.seed)

    # get benchmark
    functions = benchmark.get_functions(args)
    print(functions)

    # run optimizer
    Results = []
    mto = args.algorithm(functions, args)
    mto.optimize(callback=Results.append)

    # save data
    optimizer.save(Results, args)
Beispiel #49
0
def _evaluate(indata):
    prefix = 'distance_'
    feats = util.get_features(indata, prefix)

    dfun = eval(indata[prefix + 'name'])
    dargs = util.get_args(indata, prefix)
    distance = dfun(feats['train'], feats['train'], *dargs)

    dm_train = max(
        abs(indata[prefix + 'matrix_train'] -
            distance.get_distance_matrix()).flat)
    distance.init(feats['train'], feats['test'])
    dm_test = max(
        abs(indata[prefix + 'matrix_test'] -
            distance.get_distance_matrix()).flat)

    return util.check_accuracy(indata[prefix + 'accuracy'],
                               dm_train=dm_train,
                               dm_test=dm_test)
Beispiel #50
0
    def __init__(self, out_class):
        torch.manual_seed(100)
        """ Training Parameters"""
        train_args = vars(get_args())
        train_args['NumEpochs'] = 500
        train_args['BatchSize'] = 16 * 4
        train_args['ValGamma'] = 0.9
        train_args['ValLr'] = 1e-4
        train_args['ValWeightDecay'] = 0.00
        train_args['RunPar'] = True
        train_args['use_gpu'] = [0]

        if train_args['registration']:
            print('Perform Examples of Linear Registration')
            registration(['SAG_IW_TSE_LEFT'])  #, 'SAG_IW_TSE_RIGHT'])
        """ Options """
        options = {
            'sampling': 'random7',
            'use_val': True,
            'img_dir': 'data/registered/',
            'load_list': ['SAG_IW_TSE_LEFT', 'SAG_IW_TSE_RIGHT'],
            'slice_range': [None, None],
            'network_choice': 'present',
            'fusion_method': 'cat',
            'trial_name': 'Pain_uni_present'
        }
        """ Create and save Manager"""
        self.Manager = KneeManager(options=options,
                                   train_args=train_args,
                                   labels=get_label('Fusion_Results/' +
                                                    options['trial_name']),
                                   out_class=out_class)
        print(self.Manager.options)
        print(self.Manager.train_args)

        if not os.path.isdir('Fusion_Results/' +
                             self.Manager.options['trial_name']):
            os.mkdir('Fusion_Results/' + self.Manager.options['trial_name'])

        if not os.path.isdir('cam_print/'):
            os.mkdir('cam_print/')
Beispiel #51
0
def main3():
    #doc = pkl.load(open("doc_example.p", "rb"))
    #print(doc.content)
    #print(len(doc.content))
    #print("*"*40)
    #print(doc.summary)
    doc = Document(content=[[
        'to', 'the', 'editor', 're', 'for', 'women', 'worried', 'about',
        'fertility', 'egg', 'bank', 'is', 'a', 'new', 'option', 'sept', '00',
        'imagine', 'my', 'joy', 'in', 'reading', 'the', 'morning',
        'newspapers', 'on', 'the', 'day', 'of', 'my', '00th', 'birthday',
        'and', 'finding', 'not', 'one', 'but', 'two', 'articles', 'on', 'how',
        'women', 's', 'fertility', 'drops', 'off', 'precipitously', 'after',
        'age', '00'
    ], [
        'one', 'in', 'the', 'times', 'and', 'one', 'in', 'another', 'newspaper'
    ], ['i', 'sense', 'a', 'conspiracy', 'here'],
                            [
                                'have', 'you', 'been', 'talking', 'to', 'my',
                                'mother', 'in', 'law'
                            ], ['laura', 'heymann', 'washington']],
                   summary=[[
                       'laura', 'heymann', 'letter', 'on', 'sept', '00',
                       'article', 'about', 'using', 'egg', 'bank', 'to',
                       'prolong', 'fertility', 'expresses', 'ironic', 'humor',
                       'about', 'her', 'age', 'and', 'chances', 'of',
                       'becoming', 'pregnant'
                   ]],
                   label=1,
                   label_idx=1)

    #x = torch.tensor([1.]*12, device='cuda:0')
    #outputs = torch.nn.functional.softmax(Variable(x), dim=0).data
    #outputs = outputs.view(12, 1)

    args = get_args()
    #print("args.std_rouge:", args.std_rouge)
    if args.oracle_length == -1:  # use true oracle length
        oracle_summary_sent_num = len(doc.summary)
    else:
        oracle_summary_sent_num = args.oracle_length
Beispiel #52
0
def _evaluate_top_fisher(indata, prefix):
    feats = {}
    wordfeats = util.get_features(indata, prefix)

    pos_train = HMM(wordfeats['train'], indata[prefix + 'N'],
                    indata[prefix + 'M'], indata[prefix + 'pseudo'])
    pos_train.train()
    pos_train.baum_welch_viterbi_train(BW_NORMAL)
    neg_train = HMM(wordfeats['train'], indata[prefix + 'N'],
                    indata[prefix + 'M'], indata[prefix + 'pseudo'])
    neg_train.train()
    neg_train.baum_welch_viterbi_train(BW_NORMAL)
    pos_test = HMM(pos_train)
    pos_test.set_observations(wordfeats['test'])
    neg_test = HMM(neg_train)
    neg_test.set_observations(wordfeats['test'])

    if indata[prefix + 'name'] == 'TOP':
        feats['train'] = TOPFeatures(10, pos_train, neg_train, False, False)
        feats['test'] = TOPFeatures(10, pos_test, neg_test, False, False)
    else:
        feats['train'] = FKFeatures(10, pos_train, neg_train)
        feats['train'].set_opt_a(-1)  #estimate prior
        feats['test'] = FKFeatures(10, pos_test, neg_test)
        feats['test'].set_a(
            feats['train'].get_a())  #use prior from training data

    prefix = 'kernel_'
    args = util.get_args(indata, prefix)
    kernel = PolyKernel(feats['train'], feats['train'], *args)
    #	kernel=PolyKernel(*args)
    #	kernel.init(feats['train'], feats['train'])
    km_train = max(
        abs(indata[prefix + 'matrix_train'] - kernel.get_kernel_matrix()).flat)
    kernel.init(feats['train'], feats['test'])
    km_test = max(
        abs(indata[prefix + 'matrix_test'] - kernel.get_kernel_matrix()).flat)

    return util.check_accuracy(indata[prefix + 'accuracy'],
                               km_train=km_train,
                               km_test=km_test)
Beispiel #53
0
def _evaluate_top_fisher (indata, prefix):
	feats={}
	wordfeats=util.get_features(indata, prefix)

	pos_train=HMM(wordfeats['train'], indata[prefix+'N'], indata[prefix+'M'],
		indata[prefix+'pseudo'])
	pos_train.train()
	pos_train.baum_welch_viterbi_train(BW_NORMAL)
	neg_train=HMM(wordfeats['train'], indata[prefix+'N'], indata[prefix+'M'],
		indata[prefix+'pseudo'])
	neg_train.train()
	neg_train.baum_welch_viterbi_train(BW_NORMAL)
	pos_test=HMM(pos_train)
	pos_test.set_observations(wordfeats['test'])
	neg_test=HMM(neg_train)
	neg_test.set_observations(wordfeats['test'])

	if indata[prefix+'name']=='TOP':
		feats['train']=TOPFeatures(10, pos_train, neg_train, False, False)
		feats['test']=TOPFeatures(10, pos_test, neg_test, False, False)
	else:
		feats['train']=FKFeatures(10, pos_train, neg_train)
		feats['train'].set_opt_a(-1) #estimate prior
		feats['test']=FKFeatures(10, pos_test, neg_test)
		feats['test'].set_a(feats['train'].get_a()) #use prior from training data

	prefix='kernel_'
	args=util.get_args(indata, prefix)
	kernel=PolyKernel(feats['train'], feats['train'], *args)
#	kernel=PolyKernel(*args)
#	kernel.init(feats['train'], feats['train'])
	km_train=max(abs(
		indata[prefix+'matrix_train']-kernel.get_kernel_matrix()).flat)
	kernel.init(feats['train'], feats['test'])
	km_test=max(abs(
		indata[prefix+'matrix_test']-kernel.get_kernel_matrix()).flat)

	return util.check_accuracy(indata[prefix+'accuracy'],
		km_train=km_train, km_test=km_test)
	def write(self, text, **args):
		'''Prints formatted text to the box interior, returning the formatted
		text.'''
		my_args = {
			'text_align': self._text_align,
			'text_indent': self._text_indent,
			'text_attr': self._text_attr,
			'text_wrap': self._text_wrap,
			'left': 0,
			'top': 0,
			'refresh': True
		}
		my_args = get_args(my_args, args)
		center = self.get_center()
		# TODO: word-wrapping/overflow break our text up into multiple lines
		f_text = text
		# figure out how to format the text
		metrics = self.get_metrics()
		top = metrics['inner_top'] + my_args['top']
		left = metrics['inner_left'] + my_args['left']
		if my_args['text_align'] == 'left':
			if self._text_indent != 0:
				left += self._text_indent
		elif my_args['text_align'] == 'center':
			left = int(center['left'] - (len(text) / 2))
			if self._text_indent != 0:
				left += self._text_indent
		elif my_args['text_align'] == 'right':
			left = (metrics['inner_right'] - my_args['left']) - len(text)
			if self._text_indent != 0:
				left -= self._text_indent
		else:
			raise Exception("Invalid text alignment of '%s'." \
				% my_args['text_align'])
		self.doc.win.addstr(top, left, text, my_args['text_attr'])
		if my_args['refresh']:
			self.doc.win.refresh()
		return self
Beispiel #55
0
def _evaluate_top_fisher (indata, prefix):
	raise NotImplementedError, 'TOP/Fisher not yet supported in static interfaces.'

	sg('new_hmm', indata[prefix+'N'], indata[prefix+'M'])
	pos=HMM(wordfeats['train'], indata[prefix+'N'], indata[prefix+'M'],
		indata[prefix+'pseudo'])
	pos.train()
	pos.baum_welch_viterbi_train(BW_NORMAL)
	neg=HMM(wordfeats['train'], indata[prefix+'N'], indata[prefix+'M'],
		indata[prefix+'pseudo'])
	neg.train()
	neg.baum_welch_viterbi_train(BW_NORMAL)
	pos_clone=HMM(pos)
	neg_clone=HMM(neg)
	pos_clone.set_observations(wordfeats['test'])
	neg_clone.set_observations(wordfeats['test'])

	if indata[prefix+'type']=='TOP':
		feats['train']=TOPFeatures(10, pos, neg, False, False)
		feats['test']=TOPFeatures(10, pos_clone, neg_clone, False, False)
	else:
		feats['train']=FKFeatures(10, pos, neg)
		feats['train'].set_opt_a(-1) #estimate prior
		feats['test']=FKFeatures(10, pos_clone, neg_clone)
		feats['test'].set_a(feats['train'].get_a()) #use prior from training data

	prefix='kernel_'
	args=util.get_args(indata, prefix)
	kernel=PolyKernel(feats['train'], feats['train'], *args)
	km_train=max(abs(
		indata[prefix+'matrix_train']-kernel.get_kernel_matrix()).flat)
	kernel.init(feats['train'], feats['test'])
	km_test=max(abs(
		indata[prefix+'matrix_test']-kernel.get_kernel_matrix()).flat)

	return util.check_accuracy(indata[prefix+'accuracy'],
		km_train=km_train, km_test=km_test)
	def border(self, **args):
		my_args = {
			'style': None,
			'border': None,
			'border_chars': self._border_chars,
			'border_attr': self._border_attr,
			'refresh': True
		}
		my_args = get_args(my_args, args)
		# return our style info by default
		if my_args['style'] == None and my_args['border'] == None:
			return {
				'border': self._border,
				'border_chars': self._border_chars,
				'border_attr': self._border_attr
			}
		elif my_args['style'] in BORDER_STYLES:
			my_args['border'] = [1, 1, 1, 1];
			my_args['border_chars'] = BORDER_STYLES[my_args['style']][0:8]
			my_args['border_attr'] = BORDER_STYLES[my_args['style']][8]
		elif not (len(my_args['border_chars']) == 8 and len(my_args['border']) == 4):
			raise Exception("Invalid border sizes (%s), chars (%s), or style (%s)."
				% (str(my_args['border']), str(my_args['border_chars']), str(my_args['style']))
			)
		# set our border info
		self._border = my_args['border']
		self._border_chars = my_args['border_chars']
		self._border_attr = my_args['border_attr']
		# ['|', '|', '-', '-', '.', '.', "'", "'"]
		#  left rght top  btm  tl   tr   bl   br
		#  0    1    2    3    4    5    6    7
		top = self._top + self._margin[0] + my_args['border'][0]
		bottom = top + self._height + 1 - my_args['border'][2]
		metrics = self.get_metrics()
		for y in range(top, bottom):
			# left edge
			for i in range(0, my_args['border'][3]):
				self.doc.win.addch(
					y,
					self._left + self._margin[3] + i,
					ord(my_args['border_chars'][0]),
					my_args['border_attr']
				)
			# right edge
			for i in range(0, my_args['border'][3]):
				self.doc.win.addch(
					y,
					self._left + self._width + 1 + self._margin[3] + i,
					ord(my_args['border_chars'][1]),
					my_args['border_attr']
				)
		# top border
		for i in range(0, my_args['border'][0]):
			self.doc.win.addstr(
				self._top + self._margin[0] + i,
				self._left + self._margin[3] + my_args['border'][3],
				my_args['border_chars'][2] * (self._width + 1 - my_args['border'][1]),
				my_args['border_attr']
			)
		# bottom border
		for i in range(0, my_args['border'][2]):
			self.doc.win.addstr(
				bottom + i,
				self._left + self._margin[3] + my_args['border'][3],
				my_args['border_chars'][3] * (self._width + 1 - my_args['border'][1]),
				my_args['border_attr']
			)
		# top left and top right corners
		for i in range(0, my_args['border'][0]):
			# top left
			self.doc.win.addstr(
				self._top + self._margin[0] + i,
				self._left + self._margin[3],
				my_args['border_chars'][4] * my_args['border'][3],
				my_args['border_attr']
			)
			# top right
			self.doc.win.addstr(
				self._top + self._margin[0] + i,
				self._left + self._margin[3] + self._width + 1,
				my_args['border_chars'][5] * my_args['border'][1],
				my_args['border_attr']
			)
		# bottom left and bottom right corners
		for i in range(0, my_args['border'][2]):
			# bottom left
			self.doc.win.addstr(
				bottom + i,
				self._left + self._margin[3],
				my_args['border_chars'][6] * my_args['border'][3],
				my_args['border_attr']
			)
			# bottom right
			self.doc.win.addstr(
				bottom + i,
				self._left + self._margin[3] + self._width,
				my_args['border_chars'][7] * my_args['border'][1],
				my_args['border_attr']
			)
		self._dirty = True
		# and refresh if needed
		if my_args['refresh']:
			self.doc.win.refresh()
		return self
Beispiel #57
0
 def parse_args(self, expr, arg_slice=None):
     args = {
         'path': None,
         'verb': None,
         'api_args': {},
         'basic_auth': None,
         'cmd_args': [],
         'headers': {},
         'data': [],
         'extract': [],
         'exclude': [],
         'invert_color': False,
         'color': self.main_args['color'],
         'formatted': self.main_args['formatted'],
         'url': self.main_args['url'],
         'verbose': False,
         'stdout_redir': None,
         'redir_type': None,
         'shell': False,
         'query': [],
         'help': False,  # user just wanted some help
         'FILES': [],
         'oauth': {
             'consumer_key': None,
             'consumer_secret': None,
             'token': None,
             'token_secret': None
         }
     }
     if isinstance(expr, basestring):
         parts = shlex.split(expr)
     else:
         parts = expr  # already a list
     # check for any condensed parameters (e.g. -fr = -f, -r)
     old_parts = parts[:]
     for i in range(0, len(parts)):
         part = parts[i]
         if len(part) > 2 and part[0] == '-' and not (part[1] in ['-', '+', '=']):
             # expand the parameters out
             parts = parts[:i] + \
                 [''.join(['-', param]) for param in parts[i][1:]] + \
                 parts[i + 1:]
     i = 0
     # iterate through each paramter and handle it
     while i < len(parts):
         part = parts[i]
         if len(part) == 0:
             pass
         elif part == '>' or part[0] == '>' or part == '>>':
             # output redirection! woot
             if part == '>' or parts == '>>':
                 i += 1
                 if part == '>':
                     args['redir_type'] = 'w'
                 else:
                     args['redir_type'] = 'a'
                 if i == len(parts):
                     raise Exception("Missing file path to output result to.")
                 args['stdout_redir'] = parts[i]
             else:
                 if len(part) > 1 and part[0:2] == '>>':
                     args['stdout_redir'] = part[2:]
                     args['redir_type'] = 'a'
                 else:
                     args['stdout_redir'] = part[1:]
                     args['redir_type'] = 'w'
         elif part == '-B' or part == '--basic':
             i += 1
             if i == len(parts):
                 raise Exception("Missing HTTP basic auth user/pass parameter.")
             if ':' not in parts[i]:
                 raise Exception("Expected HTTP basic auth in format 'user:pass'.")
             args['basic_auth'] = parts[i]
         elif part == '-F' or part == '--file':
             i += 1
             if i == len(parts):
                 raise Exception("Missing value for file to upload.")
             # collect up the name
             if parts[i].find('=') == -1 or parts[i].find('&') != -1:
                 raise Exception("Invalid file name=file_path pair.")
             (name, path) = parts[i].split('=', 1)
             # make sure the file exists
             if not os.path.isfile(path):
                 raise Exception("Unable to either read or locate file '%s." % path)
             args['FILES'][name] = path
             raise Exception("Not supported at the moment")
         elif part == '-Q' or part == '--query':
             i += 1
             if i == len(parts):
                 raise Exception("Missing query name=value pair.")
             # make sure we have a valid pair
             if parts[i].find('=') == -1 or parts[i].find('&') != -1:
                 raise Exception("Invalid query name=value pair.")
             args['query'].append(parts[i])
         elif part == '-i' or part == '--invert':
             args['invert_color'] = True
         elif part == '-c' or part == '--color':
             args['color'] = True
         elif part == '-C' or part == '--no-color':
             args['color'] = False
         elif part == '-v' or part == '--verbose':
             args['verbose'] = True
         elif part == '-f' or part == '--form':
             args['headers']['content-type'] = 'application/x-www-form-urlencoded'
         elif part == '-O' or part == '--oauth':
             # the next 4 parameters are for oauth
             if i + 4 == len(parts):
                 raise Exception("Missing one of the following values for --oauth: consumer key, consumer secret, token, token secret.")
             next_params = [
                 'consumer_key', 'consumer_secret',
                 'token', 'token_secret'
             ]
             for ctr in range(0, 4):
                 args['oauth'][next_params[ctr]] = parts[i + ctr + 1]
             i += 4
         elif part == '-h' or part == '--help':
             self.print_help()
             args['help'] = True
         elif part == '-H' or part == '--header':
             i += 1
             if i == len(parts):
                 raise Exception("Missing value for HTTP header.")
             h_parts = parts[i].split(': ', 1)
             if len(h_parts) != 2:
                 raise Exception("Invalid HTTP header.")
             args['headers'][h_parts[0].lower()] = h_parts[1]
         elif part == '-s' or part == '--shell':
             args['shell'] = True
         elif part == '-j' or part == '--json':
             i += 1
             if i == len(parts):
                 raise Exception("Missing value for JSON API params.")
             try:
                 api_args = self.decode(parts[i])
                 if isinstance(api_args, dict):
                     args['api_args'].update(api_args)
                 else:
                     raise JSONException("JSON values must be a dictionary of arguments.")
             except JSONException as e:
                 sys.stderr.write('Invalid JSON:' + e.message)
                 raise e
             except Exception as e:
                 sys.stderr.write('Invalid JSON:' + e.message)
                 raise JSONException(e.message)
         elif part == '-r' or part == '--raw':
             args['formatted'] = False
         elif part == '--url' or part == '-u':
             i += 1
             if i == len(parts):
                 raise Exception("Missing value for URL.")
             args['url'] = parts[i]
         elif part == '-d' or part == '--data':
             i += 1
             if i == len(parts):
                 raise Exception("Missing value for --data.")
             part = parts[i]
             if part.index('=') == -1:
                 raise Exception("Invalid parameter for --data: expected format NAME[+]=PATH")
             args['data'].append(DataMap(*part.split('=', 1)))
         elif part == '-x' or part == '--extract':
             i += 1
             if i == len(parts):
                 raise Exception("Missing value for --extract.")
             args['extract'].append(parts[i])
         elif part == '-X' or part == '--exclude':
             i += 1
             if i == len(parts):
                 raise Exception("Missing value for --exclude.")
             args['exclude'].append(parts[i])
         else:
             # we always pick up the command/method first
             if args['verb'] is None:
                 args['verb'] = part.lower()
                 # process any aliases
                 if args['verb'] in self.method_aliases:
                     args['verb'] = self.method_aliases[args['verb']]
             elif args['verb'] in self.http_methods and args['path'] is None:
                 # collect the API -- unless this is a internal command
                 args['path'] = util.pretty_path(self.parse_path(part), False, False)
             else:
                 # anything else is a parameter
                 if args['verb'] in self.http_methods:
                     # get the name/value
                     args['api_args'] = self.parse_param(part, args['api_args'])
                 else:
                     args['cmd_args'].append(part)
         i += 1
     if arg_slice is not None:
         args = util.get_args(arg_slice, args)
     return args
Beispiel #58
0
#!/usr/bin/env /usr/bin/python
# -*- coding: utf-8 -*-
import render
import query
import util
import get_svg
import re

args = util.get_args() #get the form data in an array

render.http_header() #load header
render.html_header() #load scripts
gt = args['GraphTerm'] if 'GraphTerm' in args else ''
#ct = args['ClusterTerm'] if 'ClusterTerm' in args else ''
mode = args['GraphMode'] if 'GraphMode' in args else ''
render.render_div_search(ft=gt, gt=gt) #display form
#render.render_datalist( 'terms');

x = args['all'] if 'all' in args else ''

match = re.search(r'cl[0-9]',mode)

if mode == 'all' or match:
    print '''
        <p> Most Connected graph </p>
        '''
    print get_svg.get_svg_all(args)
    print '''
            </div>
            '''
Beispiel #59
0
import os
import time
import glob

import torch
import torch.optim as O
import torch.nn as nn

from torchtext import data
from torchtext import datasets

from model import SNLIClassifier
from util import get_args


args = get_args()
torch.cuda.set_device(args.gpu)

inputs = data.Field(lower=args.lower)
answers = data.Field(sequential=False)

train, dev, test = datasets.SNLI.splits(inputs, answers)

inputs.build_vocab(train, dev, test)
if args.word_vectors:
    if os.path.isfile(args.vector_cache):
        inputs.vocab.vectors = torch.load(args.vector_cache)
    else:
        inputs.vocab.load_vectors(wv_dir=args.data_cache, wv_type=args.word_vectors, wv_dim=args.d_embed)
        os.makedirs(os.path.dirname(args.vector_cache), exist_ok=True)
        torch.save(inputs.vocab.vectors, args.vector_cache)