コード例 #1
0
ファイル: result.py プロジェクト: BackupTheBerlios/tostdk-svn
	def create ( cls, p_opcode, p_args ):
	#----------------------------------------------------------------------

		l_format = opcodes.result_format(p_opcode)

		if l_format == None:
			return None

		l_data = data.pack(l_format, p_args)

		if l_data == None:
			return None

		return cls(p_opcode, l_data)
コード例 #2
0
	def create ( cls, p_opcode, p_args, p_timeout = 0.0, p_guid = None ):
	#----------------------------------------------------------------------

		l_format = opcodes.command_format(p_opcode)

		if l_format == None:
			return None

		l_data = data.pack(l_format, p_args)

		if l_data == None:
			return None

		l_command = cls(p_opcode, l_data)

		l_command.set_timeout(p_timeout)
		l_command.set_guid(p_guid)

		return l_command
コード例 #3
0
ファイル: train.py プロジェクト: felixwzh/La-DTL
        sess.run(tf.global_variables_initializer())

        # tensor board
        merged = tf.summary.merge_all()
        writer = tf.summary.FileWriter(log_path + '/', sess.graph)

        logging.info('Model builded, %s used\n' %
                     time_format(time.time() - tic0))

        test_feed_dicts = []
        for i in xrange((len(dataset_u.test_x) + args.batch_size_u - 1) //
                        args.batch_size_u):
            x, y, l, msl = data.pack(
                dataset_u.test_x[i * args.batch_size_u:(i + 1) *
                                 args.batch_size_u],
                dataset_u.test_y[i * args.batch_size_u:(i + 1) *
                                 args.batch_size_u])
            test_feed_dicts.append({
                x_u_: x,
                seq_len_u_: l,
                y_u_: y,
                msl_u_: msl,
                keep_prob_: 1
            })

        train_s_feed_dicts = []
        for i in xrange((len(dataset_s.train_x) + args.batch_size_s - 1) //
                        args.batch_size_s):
            x, y, l, msl = data.pack(
                dataset_s.train_x[i * args.batch_size_s:(i + 1) *
コード例 #4
0
ファイル: train.py プロジェクト: novellll/CWS_BLSTM_CRF
    
	config = tf.ConfigProto()
	config.gpu_options.allow_growth = True

	sess = tf.Session(config=config)
	init = tf.initialize_all_variables()
	sess.run(init)
        logging.info('Model builded, %s used\n' % time_format(time.time() - tic0))
        if not args.quiet:
            print('Model builded, %s used\n' % time_format(time.time() - tic0))

        # pre-store test dataset feed_dicts
        test_feed_dicts = []
        for i in xrange((len(dataset.test_x)+args.batch_size-1) // args.batch_size):
            x, y, l, msl = data.pack(
                dataset.test_x[i*args.batch_size:(i+1)*args.batch_size], 
                dataset.test_y[i*args.batch_size:(i+1)*args.batch_size], 
                args.window_size)
            test_feed_dicts.append({x_:x, seq_len_:l, y_:y, msl_:msl, keep_prob_:1.})
        
        for epoch in xrange(1, args.n_epochs+1):
            tic = time.time()
	    loss = 0.
            n_train = dataset.n_train
            n_trained = 0
	    for idxs in dataset.minibatches:
	        x, y, l, msl = dataset.next_batch()
                _, c = sess.run([train_op, loss_], feed_dict={x_:x, seq_len_:l, y_:y, msl_:msl, keep_prob_:args.keep_prob})
                if np.isnan(c):
                    logging.error('Gradient Explosion!')
                    print('Gradient Explosion!')
                    exit()