Exemple #1
0
import numpy as np
import copy



# USER LIBRARIES
import fmt
import lib
import errors
import logger
import reporter



# Define instances
Logger = logger.Logger("calculator")



# CONSTANTS
BG_LOW_LIMIT       = 4.2  # (mmol/L)
BG_HIGH_LIMIT      = 8.5  # (mmol/L)
BG_VERY_HIGH_LIMIT = 11.0 # (mmol/L)
DOSE_ENACT_TIME    = 0.5  # (h)



def computeIOB(net, IDC):

    """
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Exemple #2
0
import sqlite3
import numpy as np
import logger
import sys, os
sys.path.insert(0, os.getcwd())
from src.rule.graph import Graph
from src.rule.semQL import Sup, Sel, Order, Root, Filter, A, N, C, T, Root1, Turn, ValueS, Distance
from src.rule.sem_utils import alter_inter, alter_not_in, alter_column0, load_dataSets
from src.parse_sql_py import parse_sql
DB_DIR = os.path.join('sparc', 'database')

log_dir = 'log_tmp'
if not os.path.exists(log_dir):
    os.makedirs(log_dir)
record = logger.Logger(
    os.path.join(log_dir,
                 os.path.basename(__file__).split('.')[0] + '.log'), 'w')


def split_logical_form(lf):
    indexs = [i + 1 for i, letter in enumerate(lf) if letter == ')']
    indexs.insert(0, 0)
    components = list()
    for i in range(1, len(indexs)):
        components.append(lf[indexs[i - 1]:indexs[i]].strip())
    return components


def pop_front(array):
    if len(array) == 0:
        return 'None'
if len(sys.argv) < 2:
    sys.exit("Usage: test_luna_props_scan.py <configuration_name>")

config_name = sys.argv[1]
set_configuration('configs_luna_props_scan', config_name)

# predictions path
predictions_dir = utils.get_dir_path('model-predictions',
                                     pathfinder.METADATA_PATH)
outputs_path = predictions_dir + '/%s' % config_name
utils.auto_make_dir(outputs_path)

# logs
logs_dir = utils.get_dir_path('logs', pathfinder.METADATA_PATH)
sys.stdout = logger.Logger(logs_dir + '/%s.log' % config_name)
sys.stderr = sys.stdout

# builds model and sets its parameters
model = config().build_model()

x_shared = nn.utils.shared_empty(dim=len(model.l_in.shape))
givens_valid = {}
givens_valid[model.l_in.input_var] = x_shared

get_predictions_patch = theano.function([],
                                        nn.layers.get_output(
                                            model.l_out, deterministic=True),
                                        givens=givens_valid,
                                        on_unused_input='ignore')
Exemple #4
0
tf.set_random_seed(42)

# config
configs_dir = __file__.split('/')[-2]
config = importlib.import_module('%s.%s' % (configs_dir, args.config_name))
experiment_id = '%s-%s' % (args.config_name.split('.')[-1],
                           time.strftime("%Y_%m_%d", time.localtime()))

if not os.path.isdir('metadata'):
    os.makedirs('metadata')
save_dir = 'metadata/' + experiment_id

# logs
if not os.path.isdir('logs'):
    os.makedirs('logs')
sys.stdout = logger.Logger('logs/%s.log' % experiment_id)
sys.stderr = sys.stdout

print('exp_id', experiment_id)

# create the model
model = tf.make_template('model', config.build_model)
x_init = tf.placeholder(tf.float32,
                        shape=(config.batch_size, ) + config.obs_shape)
init_pass = model(x_init, init=True)

all_params = tf.trainable_variables()
n_parameters = 0
for variable in all_params:
    shape = variable.get_shape()
    variable_parameters = 1
Exemple #5
0
 def setUp(self):
     self._input = TestInputSingleton.input
     self._servers = self._input.servers
     self.log = logger.Logger().get_logger()
Exemple #6
0
best = args.best

# metadata
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, config_name, best=best)

metadata = utils.load_pkl(metadata_path)
expid = metadata['experiment_id']

if best:
    expid += "-best"

print("logs")
# logs
logs_dir = utils.get_dir_path('logs', pathfinder.METADATA_PATH)
sys.stdout = logger.Logger(logs_dir + '/%s-test.log' % expid)
sys.stderr = sys.stdout
print("prediction path")
# predictions path
predictions_dir = utils.get_dir_path('model-predictions',
                                     pathfinder.METADATA_PATH)
outputs_path = predictions_dir + '/' + expid

if valid_tta_feat or test_tta_feat or all_tta_feat or train_tta_feat:
    outputs_path += '/features'

utils.auto_make_dir(outputs_path)

if dump:
    prediction_dump = os.path.join(outputs_path,
                                   expid + "_" + args.eval + "_predictions.p")
def main(args):
	time_str = time.strftime("%Y%m%d-%H%M%S")
	logger_ins = logger.Logger(HOME + '/catkin_ws/src/Turtlebot3_Pheromone/src/log', output_formats=[logger.HumanOutputFormat(sys.stdout)])
	board_logger = tensorboard_logging.Logger(os.path.join(logger_ins.get_dir(), "tf_board", time_str))
	sess = tf.Session()
	K.set_session(sess)
	########################################################
	game_state= phero_turtlebot_exp1.Env()   # game_state has frame_step(action) function
	actor_critic = ActorCritic(game_state, sess)
	random.seed(args.random_seed)
	########################################################
	num_trials = 400
	trial_len  = 256
	log_interval = 5
	train_indicator = 1
	tfirststart = time.time()

	# Reward Logging
	with open(HOME + '/catkin_ws/src/Turtlebot3_Pheromone/src/log/csv/{}.csv'.format(actor_critic.file_name), mode='w') as csv_file:
		csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
		csv_writer.writerow(['Episode', 'Average Reward'])

	# Double ended queue with max size 100 to store episode info
	epinfobuf = deque(maxlen=100)
	num_robots = game_state.num_robots
	current_state = game_state.reset()

	# actor_critic.read_human_data()
	
	step_reward = np.array([0, 0]).reshape(1,2)
	step_Q = [0,0]
	step = 0

	if (train_indicator==2):
		for i in range(num_trials):
			print("trial:" + str(i))
			#game_state.step(0.3, 0.2, 0.0)
			#game_state.reset()

			current_state = game_state.reset()
			##############################################################################################
			total_reward = 0
			
			for j in range(100):
				step = step +1
				#print("step is %s", step)


				###########################################################################################
				#print('wanted value is %s:', game_state.observation_space.shape[0])
				current_state = current_state.reshape((1, game_state.observation_space.shape[0]))
				action, eps = actor_critic.act(current_state)
				action = action.reshape((1, game_state.action_space.shape[0]))
				print("action is speed: %s, angular: %s", action[0][1], action[0][0])
				_, new_state, reward, done, _ = game_state.step(0.1, action[0][1]*5, action[0][0]*5) # we get reward and state here, then we need to calculate if it is crashed! for 'dones' value
				total_reward = total_reward + reward
				

	

	if (train_indicator==1):

		# actor_critic.actor_model.load_weights("actormodel-90-1000.h5")
		# actor_critic.critic_model.load_weights("criticmodel-90-1000.h5")
		for i in range(num_trials):
			print("trial:" + str(i))
			
			#game_state.step(0.3, 0.2, 0.0)
			#game_state.reset()
			

			_, current_state = game_state.reset()
			##############################################################################################
			total_reward = 0
			epinfos = []
			for j in range(trial_len):
				
				###########################################################################################
				#print('wanted value is %s:', game_state.observation_space.shape[0])
				current_state = current_state.reshape((1, game_state.observation_space.shape[0]))
				action, eps = actor_critic.act(current_state)
				print("action is speed: %s, angular: %s", action[0][1], action[0][0])
				_, new_state, reward, done, info = game_state.step(0.1, linear_x = action[0][1], angular_z = action[0][0]) # we get reward and state here, then we need to calculate if it is crashed! for 'dones' value
				total_reward = total_reward + reward
				###########################################################################################

				if j == (trial_len - 1):
					done = np.array([True]).reshape(game_state.num_robots, 1)
				
				
				step = step + 1
				#plot_reward(step,reward,ax,fig)
				#step_reward = np.append(step_reward,[step,reward])
				#step_start = time.time()
				#sio.savemat('step_reward.mat',{'data':step_reward},True,'5', False, False,'row')
				#print("step is %s", step)
				#print("info: {}".format(info[0]['episode']['r']))
				#Q_values = actor_critic.read_Q_values(current_state, action)
				#step_Q = np.append(step_Q,[step,Q_values[0][0]])
				#print("step_Q is %s", Q_values[0][0])
				#sio.savemat('step_Q.mat',{'data':step_Q},True,'5', False, False,'row')

				epinfos.append(info[0]['episode'])
				
				start_time = time.time()

				if (j % 5 == 0):
					actor_critic.train(j)
					actor_critic.update_target()   

				end_time = time.time()
				print("Train time: {}".format(end_time - start_time))
				#print("new_state: {}".format(new_state))
				new_state = new_state.reshape((1, game_state.observation_space.shape[0]))

				# print shape of current_state
				#print("current_state is %s", current_state)
				##########################################################################################
				actor_critic.remember(current_state, action, reward, new_state, done)
				actor_critic.replay_buffer.add(current_state, action, reward, new_state, done)
				current_state = new_state


				
				##########################################################################################
			if (i % 10==0):
				actor_critic.save_weight(i, trial_len)
			epinfobuf.extend(epinfos)
			tnow = time.time()
			#fps = int(nbatch / (tnow - tstart))
			
			##################################################
            ##      Logging and saving model & weights      ##
            ##################################################

			if i % log_interval == 0 or i == 0:
				#ev = explained_variance(values, returns)
				reward_mean = safemean([epinfo['r'] for epinfo in epinfobuf])
				logger_ins.logkv("serial_timesteps", i*trial_len)
				logger_ins.logkv("nupdates", i)
				logger_ins.logkv("total_timesteps", i*trial_len)
				logger_ins.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
				logger_ins.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
				logger_ins.logkv('time_elapsed', tnow - tfirststart)
				# for (lossval, lossname) in zip(lossvals, model.loss_names):
				#     logger_ins.logkv(lossname, lossval)
				# logger_ins.dumpkvs()
				# for (lossval, lossname) in zip(lossvals, model.loss_names):
				#     board_logger.log_scalar(lossname, lossval, update)
				board_logger.log_scalar("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf]), i)
				board_logger.flush()
				with open(HOME + '/catkin_ws/src/Turtlebot3_Pheromone/src/log/csv/{}.csv'.format(actor_critic.file_name), mode='a') as csv_file:
					csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
					csv_writer.writerow(['%i'%i, '%0.2f'%reward_mean])

		

	if train_indicator==0:
		for i in range(num_trials):
			print("trial:" + str(i))
			current_state = game_state.reset()
			
			actor_critic.actor_model.load_weights(self.path + "actormodel-160-500.h5")
			actor_critic.critic_model.load_weights(self.path + "criticmodel-160-500.h5")
			##############################################################################################
			total_reward = 0
			
			for j in range(trial_len):

				###########################################################################################
				current_state = current_state.reshape((1, game_state.observation_space.shape[0]))

				start_time = time.time()
				action = actor_critic.play(current_state)  # need to change the network input output, do I need to change the output to be [0, 2*pi]
				action = action.reshape((1, game_state.action_space.shape[0]))
				end_time = time.time()
				print(1/(end_time - start_time), "fps for calculating next step")

				_, new_state, reward, done = game_state.step(0.1, action[0][1], action[0][0]) # we get reward and state here, then we need to calculate if it is crashed! for 'dones' value
				total_reward = total_reward + reward
				###########################################################################################

				if j == (trial_len - 1):
					done = 1
					#print("this is reward:", total_reward)
					

				# if (j % 5 == 0):
				# 	actor_critic.train()
				# 	actor_critic.update_target()   
				
				new_state = new_state.reshape((1, game_state.observation_space.shape[0]))
				# actor_critic.remember(cur_state, action, reward, new_state, done)   # remember all the data using memory, memory data will be samples to samples automatically.
				# cur_state = new_state

				##########################################################################################
				#actor_critic.remember(current_state, action, reward, new_state, done)
				current_state = new_state
    def __init__(self, bookmarks):
        """ Analyze constructor """

        self.logger = logger.Logger(name=__name__, log_level=logger.INFO)
        self.my_logger = self.logger.logger
        self.my_logger.info('INIT')

        self.bookmarks = bookmarks
        self.schemes = []
        self.hostnames = []
        self.pathnames = {}
        self.host_protocols = {}
        self.domains = {}
        self.subdomains = {}
        self.domain_types = {}
        self.duplicates = {}
        self.deleted_bookmarks = []
        self.empty_bookmarks = []
        self.mobile_bookmarks = []
        self.processed_bookmarks = []
        self.file_bookmarks = {}
        self.keyword_database = Keywords()
        self.href_database = Keywords()

        #: a local copy of 'TheConfig' for ease of debugging
        self.the_config = TheConfig

        #: populated as we discover various sites
        self.host_sites = {section: [] for section in TheConfig.sections}

        #: bookmark menubar, populated as we parse the various sections
        self.menubar_ = {
            section:
            {topic: []
             for topic in TheConfig.sections[section].keys()}
            for section in TheConfig.sections
        }
        # bookmarks that appear at the head of the bookmark menubar
        self.menubar_['head'] = []
        # bookmarks that appear at the tail (end) of the bookmark menubar
        self.menubar_['tail'] = []

        self.scan_bookmarks()
        self.delete_empty_bookmarks()
        self.build_keyword_dictionary()

        # build a list of bookmark hosts for all of the sections
        self.scan_bookmark_hosts()

        # build a list of bookmarks that reference a file
        self.scan_bookmarks_files()

        # scan bookmarks - head/tail items
        for site in TheConfig.menubar['head']:
            self.scan_bookmarks_site(site, self.menubar_['head'])
        for site in TheConfig.menubar['tail']:
            self.scan_bookmarks_site(site, self.menubar_['tail'])

        # scan bookmarks in the order specified in the configuration file
        try:
            for section in TheConfig.scanning_order:
                for topic in self.menubar_[section].keys():
                    self.my_logger.debug(f'Scanning: {section}/{topic}')
                    config_list = TheConfig.sections[section][topic]
                    scan_list = self.menubar_[section][topic]
                    self.scan_bookmarks_section(config_list, scan_list)
                pass
        except Exception as e:
            print(e)

        # remove any mobile bookmarks if a desktop site exists
        mobile_bookmark_values = len(self.mobile_bookmarks)
        for mobile_index in range(mobile_bookmark_values, 0, -1):
            bm_mobile = self.mobile_bookmarks[mobile_index - 1]
            # break bookmark url into parts and remove any 'm'
            bm_mobile_parts = bm_mobile.href_urlparts.hostname.split('.')
            bm_desktop = '.'.join(
                [bm_part for bm_part in bm_mobile_parts if bm_part != 'm'])
            # scan all bookmarks and check for a 'desktop' equivalent of the 'mobile' site
            for bm_desktop_key, bm_desktop_value in self.bookmarks.items():
                if not bm_desktop_value:
                    continue
                if mobile_index > len(self.mobile_bookmarks):
                    break
                # scan all bookmarks for bookmark desktop site
                bm_desktop_values = len(bm_desktop_value)
                for desktop_index in range(bm_desktop_values, 0, -1):
                    bm = bm_desktop_value[desktop_index - 1]
                    if bm.hostname == bm_desktop:
                        if bm.href_urlparts.hostname not in self.duplicates:
                            self.duplicates[bm.href_urlparts.hostname] = [
                                (bm.href_urlparts.path, bm)
                            ]
                            del self.mobile_bookmarks[mobile_index - 1]
                            break
                        else:
                            self.duplicates[bm.href_urlparts.hostname].append(
                                (bm.href_urlparts.path, bm))
                            del self.mobile_bookmarks[mobile_index - 1]
                            break

        # add any unscanned bookmarks to the miscellaneous section
        self.menubar_['misc']['misc'] = []
        for bm_key, bm_value in self.bookmarks.items():
            if not bm_value:
                continue
            for bm in bm_value:
                if not bm.scanned:
                    self.menubar_['misc']['misc'].append(bm)
                    bm.scanned = True

        self.delete_scanned_bookmarks()
        pass
Exemple #9
0
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import yaml
import ast

import logger

logger = logger.Logger()

CONFIG_SECS = [
    'train',
    'valid',
    'test',
    'infer',
]


class AttrDict(dict):
    """
    AttrDict
    """
    def __getattr__(self, key):
        return self[key]
Exemple #10
0
 def __init__(self, debug, silent, vformat):
     self.log = logger.Logger("FFmpeg", debug, silent)
     self.vformat = vformat
Exemple #11
0
device_cpu = torch.device('cpu')
device = torch.device('cuda') if args.cuda else device_cpu

# Create directory for checkpoint to be saved
if args.save:
    os.makedirs(os.path.split(args.save)[0], exist_ok=True)

# Set seeds
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed_all(args.seed)

# Visdom setup
log = logger.Logger(server=args.visdom_server,
                    port=args.visdom_port,
                    env_name=args.visdom_env)

# Create data loaders (return data in batches)
trainset_loader, valset_loader = \
    data.get_train_val_loaders(train_dir=args.train_dir,
                               max_trainset_size=args.max_trainset_size,
                               collate_fn=csv_collator,
                               height=args.height,
                               width=args.width,
                               seed=args.seed,
                               batch_size=args.batch_size,
                               drop_last_batch=args.drop_last_batch,
                               num_workers=args.nThreads,
                               val_dir=args.val_dir,
                               max_valset_size=args.max_valset_size)
Exemple #12
0
class Config(object):
    def __init__(self, configFilePath):
        self.configFilePath = configFilePath
        self.configParser = ConfigParser.RawConfigParser()

    def _open(self):
        if os.path.isfile(self.configFilePath) == True:
            self.configParser.read(self.configFilePath)
            return True
        else:
            return False

    def get_value(self, section, key):
        return self.configParser.get(section, key)


class Mongo_config(Config):
    def __init__(self, configFilePath):
        super(Mongo_config, self).__init__(configFilePath)


# Write test code for reading the MONGO config file.
if __name__ == '__main__':
    config = Mongo_config('./plugin.conf')
    log = logger.Logger()
    if config._open() == True:
        log.info(config.get_value('MONGO', 'HOSTNAME'))
        log.info(config.get_value('MONGO', 'PORT'))
        log.info(config.get_value('MONGO', 'COLLECTION'))
Exemple #13
0
 def __init__(self):
     self.success = False
     self.target = None
     self.options = None
     self.project = None
     self.logger = logger.Logger()
Exemple #14
0
def main():
    time_str = time.strftime("%Y%m%d-%H%M%S")
    logger_ins = logger.Logger(
        '/home/sub/catkin_ws/src/Turtlebot3_Pheromone/src/log',
        output_formats=[logger.HumanOutputFormat(sys.stdout)])
    board_logger = tensorboard_logging.Logger(
        os.path.join(logger_ins.get_dir(), "tf_board", time_str))
    sess = tf.Session()
    K.set_session(sess)
    ########################################################
    game_state = phero_turtlebot_exp2.Env(
    )  # game_state has frame_step(action) function
    actor_critic = ActorCritic(game_state, sess)
    ########################################################
    num_trials = 5000
    trial_len = 256
    log_interval = 5
    train_indicator = 1
    tfirststart = time.time()
    # Double ended queue with max size 100 to store episode info
    epinfobuf = deque(maxlen=100)

    # Experiment related
    num_robots = game_state.num_robots

    current_state = game_state.reset()

    # actor_critic.read_human_data()

    step_reward = np.array([0, 0]).reshape(1, 2)
    #step_Q = [0,0]
    step = 0

    if (train_indicator == 2):
        for i in range(num_trials):
            print("trial:" + str(i))
            #game_state.step(0.3, 0.2, 0.0)
            #game_state.reset()

            current_state = game_state.reset()
            ##############################################################################################
            total_reward = 0

            for j in range(100):
                step = step + 1
                #print("step is %s", step)

                ###########################################################################################
                #print('wanted value is %s:', game_state.observation_space.shape[0])
                current_state = current_state.reshape(
                    (1, game_state.observation_space.shape[0]))
                action, eps = actor_critic.act(current_state)
                action = action.reshape((1, game_state.action_space.shape[0]))
                print("action is speed: %s, angular: %s", action[0][1],
                      action[0][0])
                _, new_state, reward, done, _ = game_state.step(
                    0.1, action[0][1] * 5, action[0][0] * 5
                )  # we get reward and state here, then we need to calculate if it is crashed! for 'dones' value
                total_reward = total_reward + reward

    if (train_indicator == 1):

        # actor_critic.actor_model.load_weights("actormodel-90-1000.h5")
        # actor_critic.critic_model.load_weights("criticmodel-90-1000.h5")
        for i in range(num_trials):
            print("trial:" + str(i))

            #game_state.step(0.3, 0.2, 0.0)
            #game_state.reset()
            ''' Get states of multiple robots (num_robots x num_states) '''
            _, current_states = game_state.reset()
            ##############################################################################################
            #total_reward = 0
            epinfos = []
            for j in range(trial_len):

                ###########################################################################################
                #print('wanted value is %s:', game_state.observation_space.shape[0])
                current_states = current_states.reshape(
                    (num_robots, game_state.observation_space.shape[0]))
                ''' how can I solve this? using for loop iterating over num_robots?'''
                # num_robots
                actions = []
                for k in range(num_robots):
                    action, eps = actor_critic.act(current_states[k])
                    action = action.reshape(
                        (1, game_state.action_space.shape[0]))
                    actions.append(action)
                actions = np.squeeze(np.asarray(actions))
                #print("Actions: {}".format(actions))
                #print("action is speed: %s, angular: %s", action[0][1], action[0][0])
                _, new_states, rewards, dones, infos = game_state.step(
                    actions, 0.1
                )  # we get reward and state here, then we need to calculate if it is crashed! for 'dones' value
                #print("Rewards: {}".format(rewards))
                #total_reward = total_reward + reward
                ###########################################################################################

                if j == (trial_len - 1):
                    done = 1
                    #print("this is reward:", total_reward)
                    #print('eps is', eps)

                step = step + 1
                #plot_reward(step,reward,ax,fig)
                #step_reward = np.append(step_reward,[step,reward])
                #step_start = time.time()
                #sio.savemat('step_reward.mat',{'data':step_reward},True,'5', False, False,'row')
                #print("step is %s", step)
                #print("info: {}".format(info[0]['episode']['r']))
                #Q_values = actor_critic.read_Q_values(current_state, action)
                #step_Q = np.append(step_Q,[step,Q_values[0][0]])
                #print("step_Q is %s", Q_values[0][0])
                #sio.savemat('step_Q.mat',{'data':step_Q},True,'5', False, False,'row')
                #print("Train_step time: {}".format(time.time() - step_start))

                epinfos.append(infos[0]['episode'])

                start_time = time.time()

                if (j % 5 == 0):
                    actor_critic.train()
                    actor_critic.update_target()

                end_time = time.time()
                print("Train time: {}".format(end_time - start_time))
                #print("new_state: {}".format(new_state))
                new_states = new_states.reshape(
                    (num_robots, game_state.observation_space.shape[0]))

                # print shape of current_state
                #print("current_state is %s", current_state)
                ##########################################################################################
                actor_critic.remember(current_states, actions, rewards,
                                      new_states, dones)
                #actor_critic.replay_buffer.add(current_states, actions, rewards, new_states, dones)
                current_states = new_states

                ##########################################################################################
            if (i % 10 == 0):
                actor_critic.save_weight(i, trial_len)
            epinfobuf.extend(epinfos)
            tnow = time.time()
            #fps = int(nbatch / (tnow - tstart))

            ##################################################
            ##      Logging and saving model & weights      ##
            ##################################################

            if i % log_interval == 0 or i == 0:
                #ev = explained_variance(values, returns)
                reward_mean = safemean([epinfo['r'] for epinfo in epinfobuf])
                logger_ins.logkv("serial_timesteps", i * trial_len)
                logger_ins.logkv("nupdates", i)
                logger_ins.logkv("total_timesteps", i * trial_len)
                logger_ins.logkv(
                    'eprewmean',
                    safemean([epinfo['r'] for epinfo in epinfobuf]))
                logger_ins.logkv(
                    'eplenmean',
                    safemean([epinfo['l'] for epinfo in epinfobuf]))
                logger_ins.logkv('time_elapsed', tnow - tfirststart)
                # for (lossval, lossname) in zip(lossvals, model.loss_names):
                #     logger_ins.logkv(lossname, lossval)
                # logger_ins.dumpkvs()
                # for (lossval, lossname) in zip(lossvals, model.loss_names):
                #     board_logger.log_scalar(lossname, lossval, update)
                board_logger.log_scalar(
                    "eprewmean",
                    safemean([epinfo['r'] for epinfo in epinfobuf]), i)
                board_logger.flush()
                print("num_trial")
                step_reward = np.append(step_reward,
                                        [[num_trials, reward_mean]],
                                        axis=0)
        sio.savemat(
            '/home/sub/catkin_ws/src/Turtlebot3_Pheromone/src/log/MATLAB/step_reward_{}.mat'
            .format(self.time_str), {'data': step_reward}, True, '5', False,
            False, 'row')

    if train_indicator == 0:
        for i in range(num_trials):
            print("trial:" + str(i))
            current_state = game_state.reset()

            actor_critic.actor_model.load_weights("actormodel-160-500.h5")
            actor_critic.critic_model.load_weights("criticmodel-160-500.h5")
            ##############################################################################################
            total_reward = 0

            for j in range(trial_len):

                ###########################################################################################
                current_state = current_state.reshape(
                    (1, game_state.observation_space.shape[0]))

                start_time = time.time()
                action = actor_critic.play(
                    current_state
                )  # need to change the network input output, do I need to change the output to be [0, 2*pi]
                action = action.reshape((1, game_state.action_space.shape[0]))
                end_time = time.time()
                print(1 / (end_time - start_time),
                      "fps for calculating next step")

                _, new_state, reward, done = game_state.step(
                    0.1, action[0][1], action[0][0]
                )  # we get reward and state here, then we need to calculate if it is crashed! for 'dones' value
                total_reward = total_reward + reward
                ###########################################################################################

                if j == (trial_len - 1):
                    done = 1
                    print("this is reward:", total_reward)

                # if (j % 5 == 0):
                # 	actor_critic.train()
                # 	actor_critic.update_target()

                new_state = new_state.reshape(
                    (1, game_state.observation_space.shape[0]))
                # actor_critic.remember(cur_state, action, reward, new_state, done)   # remember all the data using memory, memory data will be samples to samples automatically.
                # cur_state = new_state

                ##########################################################################################
                #actor_critic.remember(current_state, action, reward, new_state, done)
                current_state = new_state
Exemple #15
0
        args = sys.argv[:]
        log_to_stdout = True
        log_file_name = 'build'
        for arg in args:
            if arg.lower() == '--no_stdout':
                log_to_stdout = False
                sys.argv.remove(arg)
            elif arg.lower().startswith('--log_name'):
                log_file_name = arg.rsplit('=', 1)[1]
                sys.argv.remove(arg)

        #================================
        # Initialize logging
        #================================

        lg = logger.Logger(log_file_name, log_to_stdout=log_to_stdout)

        lg.log(
            "#-------------------------------------------------------------------------------"
        )
        lg.log("# BUILD START")
        lg.log(
            "#-------------------------------------------------------------------------------"
        )
        start_time = time.time()
        lg.log("Start Time: " + time.ctime(start_time))
        lg.log(
            "Running $Header: //source/qcom/qct/images/scons/qc/rel/1.0/tools/build/scons/build/build.py#8 $"
        )

        # Call the build function
Exemple #16
0
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.

# You should have received a copy of the GNU General Public License
# along with kmotion.  If not, see <http://www.gnu.org/licenses/>.
"""
Exports various methods used to initialize motion configuration. These methods
have been moved to this seperate module to reduce issues when the motion API
changes. All changes should be in just this module.
"""

import os, ConfigParser
import logger, mutex

log_level = 'WARNING'
logger = logger.Logger('init_motion', log_level)


def gen_motion_configs(kmotion_dir):
    """
    Generates the motion.conf and thread??.conf files from www_rc and virtual
    motion conf files
            
    args    : kmotion_dir ... the 'root' directory of kmotion
    excepts : 
    return  : none
    """

    # delete all files in motion_conf skipping .svn directories
    for del_file in [
            del_file
Exemple #17
0
def main(argv):
    global client

    def err():
        print("Usage: %s [TCP/UDP]" % argv[0])
        sys.exit()

    if len(sys.argv) == 2:
        if sys.argv[1].lower() == "tcp":
            # remote_list returns a array of tuples (hostname, port) from servers.txt
            remote_list, _ = remotes.create_remote_list()
            client = ClientTCP(remote_list)
        elif sys.argv[1].lower() == "udp":
            client = ClientUDP()
        else:
            err()
    else:
        err()

    # Create Loggers
    global g_ClientLog
    # serverLogFile = open("Client.log", "w")
    # g_ClientLog = logger.Logger(serverLogFile)
    g_ClientLog = logger.Logger(sys.stdout)
    g_ClientLog.header("Client")

    while True:
        # recebe a expressão aritmética do usuário.
        expression = input(
            "Type an arithmetic expression. Example: 1+1, (13+1)*2, 5^3\n")

        client.send_exp(expression)

        received_result = False
        try:
            result = client.receive_result()
            received_result = True
            # print expression result received from server
            if result == "exception":
                print(
                    "[Client   ] An exception was detected. Try a valid mathematical expression"
                )
            elif result == "zero division":
                print(
                    "[Client   ] A division by zero was detected. Try a valid mathematical expression"
                )
            else:
                print("result = " + result)
            sys.stdout.flush()
        except socket.timeout:
            g_ClientLog.print("[Client   ] Timeout")
            print("Server timeout, try again")
            sys.stdout.flush()
            pass
        except Exception as e:  # Other exception
            g_ClientLog.print("[Client   ] Exception: " + str(e))

        if not received_result:
            g_ClientLog.print("[Client   ] No server could respond")
            print("No response")
            sys.stdout.flush()

        client.close()
Exemple #18
0
# Importing packages
import logger
import sys
import os
import os.path
import psutil
import urllib
import query

if sys.version[0] == '2':
    reload(sys)
    sys.setdefaultencoding('utf8')

pid = psutil.Process().pid
logger_obj = logger.Logger(filename='rdf-query-interface.log', instance_id=pid)

logger_obj.write_log('RDF query interface program has started with process id: '+str(pid), to_print=False)

file_name = sys.argv[5]
query_obj = query.Query(file_name)

if len(sys.argv) != 6:
    logger_obj.write_log('Program needs query_or_number, query and config', to_print=False)
    exit()

query_or_number  = int(sys.argv[1])
query = sys.argv[2]
config = sys.argv[3]
show_result = sys.argv[4]
respFile = ''
Exemple #19
0
""" integration step """
dt_log = 0.1
""" logging step """
dt_vis = 1 / 60
""" visualization frame step """
t = 0
""" time variable """

# Initialize the logger & plotter
##########################################################
ts = time.time()
name = name1 + "_" + name2 + datetime.datetime.fromtimestamp(ts).strftime(
    "_%Y%m%d%H%M%S")
""" Name of run to save to log files and plots """
fullname = "logs/" + name
logger = logger.Logger(fullname, name)
plotter = plotter.Plotter()

# Initialize the visualization
#########################################################
panda3D_app = pandaapp.Panda3DApp(plus, qrb, ref, 3)
readkeys = pandaapp.ReadKeys(ref, 3, panda3D_app)

# The main simulation loop
#########################################################
while readkeys.exitpressed is False:

    #------------------------------------begin controller --------------------------------------------
    if abs(t / pos_controller.dt_ctrl_pos_p -
           round(t / pos_controller.dt_ctrl_pos_p)) < 0.000001:
Exemple #20
0
import threading
import socket
import time
import datetime
import sys
from louie import dispatcher

import crc
import logger
import event
import error
from utils import *

log = logger.Logger('Tello')

START_OF_PACKET = 0xcc
WIFI_MSG = 0x1a
VIDEO_RATE_QUERY = 40
LIGHT_MSG = 53
FLIGHT_MSG = 0x56
LOG_MSG = 0x1050

VIDEO_ENCODER_RATE_CMD = 0x20
VIDEO_START_CMD = 0x25
EXPOSURE_CMD = 0x34
TIME_CMD = 70
STICK_CMD = 80
TAKEOFF_CMD = 0x0054
LAND_CMD = 0x0055
FLIP_CMD = 0x005c
Exemple #21
0
import re
import signal
import sys
import time

sys.path.append("..")

import requests
from retrying import retry

import defaults, logger, utils
from BloomFilterRedis_ex.BloomfilterOnRedis import BloomFilterRedis
from BloomFilterRedis_ex.connection import bloom_filter_from_defaults

spider_name = os.path.split(__file__)[1].split('.')[0]
logger = logger.Logger(spider_name)

full_PID_file_name = os.path.join(defaults.PIDS, defaults.PID_FILE_NAME) \
                     % {'spider_name': spider_name, 'tm': defaults.TM}
full_data_file_name = os.path.join(defaults.DATA_PATH, defaults.DATA_FILE_NAME) \
                      % {'spider_name': spider_name, 'tm': defaults.TM}

exit_signal = False
RETRY_TIMES = 5  # 网络请求超时重试次数
ItemId = '71'  # 百度
API_URL = 'http://www.gx-gj.com:9180/service.asmx/'


class GXguojiCrawl(object):  # 共享国际
    name = 'gongxiangguoji'
    redis_server = bloom_filter_from_defaults(defaults.BLOOM_REDIS_URL)
Exemple #22
0
 def __init__(self, name):
     super(foo, self).__init__(name=name)
     self.logger = logger.Logger(name)
     self.logger.cls_set_glo_file("global.log", clear=True)
     self.logger.set_loc_file("%s.log" % name, clear=True)
Exemple #23
0
import actions
import logger

import testsamples
import test_engine
from test_engine import Table, Column

log = logger.Logger(__name__, logger.INFO)

class TestDocModel(test_engine.EngineTestCase):

  def test_meta_tables(self):
    """
    Test changes to records accessed via lookup.
    """
    self.load_sample(testsamples.sample_students)
    self.assertPartialData("_grist_Tables", ["id", "columns"], [
      [1,   [1,2,4,5,6]],
      [2,   [10,12]],
      [3,   [21]],
    ])

    # Test that adding a column produces a change to 'columns' without emitting an action.
    out_actions = self.add_column('Students', 'test', type='Text', isFormula=False)
    self.assertPartialData("_grist_Tables", ["id", "columns"], [
      [1,   [1,2,4,5,6,22]],
      [2,   [10,12]],
      [3,   [21]],
    ])
    self.assertPartialOutActions(out_actions, {
      "calc": [],
    def learn(self):
        # For logging
        time_str = time.strftime("%Y%m%d-%H%M%S")
        logger_ins = logger.Logger(
            '/home/swn/catkin_ws/src/turtlebot3_waypoint_navigation/src/log',
            output_formats=[logger.HumanOutputFormat(sys.stdout)])
        board_logger = tensorboard_logging.Logger(
            os.path.join(logger_ins.get_dir(), "tf_board", time_str))

        # reassigning the members of class into this function for simplicity
        total_timesteps = int(self.total_timesteps)
        nenvs = 1
        #nenvs = env.num_envs # for multiple instance training
        ob_space = self.env.observation_space
        ac_space = self.env.action_space
        nbatch = nenvs * self.nsteps
        nminibatches = self.nminibatches
        nbatch_train = nbatch // nminibatches
        noptepochs = self.noptepochs
        nsteps = self.nsteps
        save_interval = self.save_interval
        log_interval = self.log_interval
        restore_path = self.restore_path
        gamma = self.gamma
        lam = self.lam
        lr = self.lr
        cliprange = self.cliprange
        deterministic = self.deterministic

        # Define a function to make Actor-Critic Model
        make_model = lambda: Model(policy=self.policy,
                                   ob_space=ob_space,
                                   ac_space=ac_space,
                                   nbatch_act=nenvs,
                                   nbatch_train=nbatch_train,
                                   nsteps=self.nsteps,
                                   ent_coef=self.ent_coef,
                                   vf_coef=self.vf_coef,
                                   max_grad_norm=self.max_grad_norm,
                                   deterministic=self.deterministic)

        # Save function
        # if save_interval and logger_ins.get_dir():
        #     import cloudpickle
        #     with open(osp.join(logger_ins.get_dir(), 'make_model.pkl'), 'wb') as fh:
        #         fh.write(cloudpickle.dumps(make_model))

        # Make a model
        model = make_model()

        # Restore when the path is provided
        if restore_path is not None:
            model.restore(restore_path)

        # Create a runner instance (generating samples with nsteps)
        runner = Runner(env=self.env,
                        model=model,
                        nsteps=nsteps,
                        gamma=gamma,
                        lam=lam)

        # Double ended queue with max size 100 to store episode info
        epinfobuf = deque(maxlen=100)

        # Get the start time
        tfirststart = time.time()

        # Calculate # for update (iteration)
        nupdates = total_timesteps // nbatch
        assert (nupdates > 0)
        '''
        PPO (iterating)
        1. Run policy in the environment for T timesteps
        2. Compute advantage estimates (in Model class)
        3. Optimise Loss w.r.t weights of policy, with K epochs and minibatch size M < N (# of actors) * T (timesteps)
        4. Update weights (in Model class)
        '''
        step_reward = np.array([0, 0]).reshape(1, 2)
        # In every update, one loop of PPO algorithm is executed
        for update in range(1, nupdates + 1):

            # INITIALISE PARAMETERS
            assert nbatch % nminibatches == 0
            nbatch_train = nbatch // nminibatches
            tstart = time.time()
            frac = 1.0 - (update - 1.0) / nupdates
            lrnow = lr(frac)
            cliprangenow = cliprange(frac)

            # 1. Run policy and get samples for nsteps
            ids, obs, returns, masks, actions, values, neglogpacs, states, epinfos, rewards = runner.run(
            )
            epinfobuf.extend(epinfos)
            mblossvals = []

            # Do not train or log if in deterministic mode:
            if deterministic:
                continue

            # 3. Optimise Loss w.r.t weights of policy, with K epochs and minibatch size M < N (# of actors) * T (timesteps)
            if states is None:  # nonrecurrent version
                inds = np.arange(nbatch)
                # Update weights using optimiser by noptepochsprint("ids {}, obs {}, returns {}, masks {}, actions {}, values {}, neglogpacs {}, states {}, epinfos {}".format(type(ids), type(obs), type(returns), type(masks), type(actions), type(values), type(neglogpacs), type(states), type(epinfos)))
                for _ in range(noptepochs):
                    # In each epoch, update weights using samples every minibatch in the total batch
                    # epoch = m(32)*minibatch(4)

                    for start in range(0, nbatch, nbatch_train):
                        end = start + nbatch_train
                        mbinds = inds[start:end]
                        # 4. Update weights
                        mblossvals.append(
                            model.train(lrnow, cliprangenow, ids[mbinds],
                                        [obs[i]
                                         for i in mbinds], returns[mbinds],
                                        masks[mbinds], actions[mbinds],
                                        values[mbinds], neglogpacs[mbinds]))

            else:  # recurrent version
                assert nenvs % nminibatches == 0
                envsperbatch = nenvs // nminibatches
                envinds = np.arange(nenvs)
                flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
                envsperbatch = nbatch_train // nsteps
                for _ in range(noptepochs):
                    #np.random.shuffle(envinds)
                    for start in range(0, nenvs, envsperbatch):
                        end = start + envsperbatch
                        mbenvinds = envinds[start:end]
                        mbflatinds = flatinds[mbenvinds].ravel()
                        slices = (arr[mbflatinds]
                                  for arr in (obs, returns, masks, actions,
                                              values, neglogpacs))
                        mbstates = states[mbenvinds]
                        mblossvals.append(
                            model.train(lrnow, cliprangenow,
                                        [obs[i]
                                         for i in mbinds], returns[mbflatinds],
                                        masks[mbflatinds], actions[mbflatinds],
                                        values[mbflatinds],
                                        neglogpacs[mbflatinds], mbstates))

            # Calculate mean loss
            lossvals = np.mean(mblossvals, axis=0)

            tnow = time.time()
            fps = int(nbatch / (tnow - tstart))
            '''
            Logging and saving model & weights
            '''
            if update % log_interval == 0 or update == 1:
                #ev = explained_variance(values, returns)
                logger_ins.logkv("serial_timesteps", update * nsteps)
                logger_ins.logkv("nupdates", update)
                logger_ins.logkv("total_timesteps", update * nbatch)
                logger_ins.logkv("fps", fps)
                #logger.logkv("explained_variance", float(ev))
                logger_ins.logkv(
                    'eprewmean',
                    self.safemean([epinfo['r'] for epinfo in epinfobuf]))
                logger_ins.logkv(
                    'eplenmean',
                    self.safemean([epinfo['l'] for epinfo in epinfobuf]))
                logger_ins.logkv('time_elapsed', tnow - tfirststart)
                for (lossval, lossname) in zip(lossvals, model.loss_names):
                    logger_ins.logkv(lossname, lossval)
                logger_ins.dumpkvs()

                for (lossval, lossname) in zip(lossvals, model.loss_names):
                    board_logger.log_scalar(lossname, lossval, update)
                board_logger.log_scalar(
                    "eprewmean",
                    self.safemean([epinfo['r'] for epinfo in epinfobuf]),
                    update)
                board_logger.flush()

                reward_arr = np.asarray([epinfo['r'] for epinfo in epinfobuf])
                reward_new = np.delete(reward_arr, np.where(reward_arr == 0.0))
                step_reward = np.append(step_reward, [[
                    update,
                    self.safemean([reward for reward in reward_new])
                ]],
                                        axis=0)
                sio.savemat(
                    '/home/swn/catkin_ws/src/turtlebot3_waypoint_navigation/src/log/MATLAB/step_reward_{}.mat'
                    .format(time_str), {'data': step_reward}, True, '5', False,
                    False, 'row')

            if save_interval and (update % save_interval == 0
                                  or update == 1) and logger_ins.get_dir():
                checkdir = osp.join(logger_ins.get_dir(), 'checkpoints')
                if not os.path.isdir(checkdir):
                    os.makedirs(checkdir)
                savepath = osp.join(
                    checkdir, '%.5i' % update + "r" + "{:.2f}".format(
                        self.safemean([epinfo['r'] for epinfo in epinfobuf])))
                print('Saving to', savepath)
                model.save(savepath)
        print("Done with training. Exiting.")
        self.env.close()
        return model
Exemple #25
0
 def test_rebalance_in(self):
     log = logger.Logger().get_logger()
     master = self._servers[0]
     num_of_docs = TestInputSingleton.input.param("num_of_docs", 100000)
     replica = TestInputSingleton.input.param("replica", 100000)
     add_items_count = TestInputSingleton.input.param(
         "num_of_creates", 30000)
     rebalance_in = TestInputSingleton.input.param("rebalance_in", 1)
     size = TestInputSingleton.input.param("item_size", 256)
     params = {
         "sizes": [size],
         "count": num_of_docs,
         "seed": str(uuid.uuid4())[:7]
     }
     RebalanceBaseTest.common_setup(self._input, self, replica=1)
     rest = RestConnection(master)
     buckets = rest.get_buckets()
     bucket_data = {}
     generators = {}
     for bucket in buckets:
         bucket_data[bucket.name] = {"kv_store": ClientKeyValueStore()}
     while len(rest.node_statuses()) < len(self._servers):
         for bucket in buckets:
             kv_store = bucket_data[bucket.name]["kv_store"]
             add_items_seed = str(uuid.uuid4())[:7]
             self._add_items(add_items_seed, bucket, add_items_count,
                             kv_store)
             errors = RebalanceDataGenerator.do_verification(
                 kv_store, rest, bucket.name)
             if errors:
                 log.error("verification returned {0} errors".format(
                     len(errors)))
             load_set_ops = {"ops": "set", "bucket": bucket.name}
             load_set_ops.update(params)
             load_delete_ops = {
                 "ops": "delete",
                 "bucket": bucket.name,
                 "sizes": [size],
                 "count": add_items_count / 5,
                 "seed": add_items_seed
             }
             thread = RebalanceDataGenerator.start_load(
                 rest, bucket.name,
                 RebalanceDataGenerator.create_loading_tasks(load_set_ops),
                 kv_store)
             generators["set"] = {"thread": thread}
             #restart three times
             generators["set"]["thread"].start()
             thread = RebalanceDataGenerator.start_load(
                 rest, bucket.name,
                 RebalanceDataGenerator.create_loading_tasks(
                     load_delete_ops), kv_store)
             generators["delete"] = {"thread": thread}
             generators["delete"]["thread"].start()
         self.log.info("current nodes : {0}".format(
             [node.id for node in rest.node_statuses()]))
         rebalanced_in, which_servers = RebalanceBaseTest.rebalance_in(
             self._servers, rebalance_in)
         self.assertTrue(rebalanced_in,
                         msg="unable to add and rebalance more nodes")
         for bucket in buckets:
             kv_store = bucket_data[bucket.name]["kv_store"]
             errors = RebalanceDataGenerator.do_verification(
                 kv_store, rest, bucket.name)
             if errors:
                 log.error("verification returned {0} errors".format(
                     len(errors)))
         generators["set"]["thread"].join()
         generators["delete"]["thread"].join()
         for bucket in buckets:
             kv_store = bucket_data[bucket.name]["kv_store"]
             bucket_data[bucket.name]["items_inserted_count"] = len(
                 kv_store.valid_items())
             RebalanceBaseTest.replication_verification(
                 master, bucket_data, replica, self)
Exemple #26
0
def main(argc, argv):

    argc = len(argv)
    if (argc < 2):

    #command-line argument codes.
    #-i:config file.
    #-c:command codes
    #-m:mosaic dataset name
    #-s:Source data paths. (as inputs to command (AR/AR)
    #-l:Full path to log file (including file name)

        user_args = \
        [
        "-m: Mosaic dataset path including GDB and MD name [e.g. c:\WorldElevation.gdb\Portland]",
        "-s: Source data paths. (As inputs to command (AR)",
        "-l: Log file output path [path+file name]",
        "-artdem: Update DEM path in ART file"
        ]

        print "\nMDCS.py v5.6 [20130801]\nUsage: MDCS.py -c:<Optional:command> -i:<config_file>" \
        "\n\nFlags to override configuration values," \

        for arg in user_args:
            print arg

        print \
        "\nNote: Commands can be combined with '+' to do multiple operations." \
        "\nAvailable commands:"

        user_cmds = solutionsLib.Solutions().getAvailableCommands()
        for key in user_cmds:
            print "\t" + key + ' = ' + user_cmds[key]['desc']

        sys.exit(1)


    base = Base.Base()
    global log
    log = logger.Logger();
    base.setLog(log)


    argIndx = 1
    md_path_ = artdem = config = com = log_folder = ''

    while(argIndx < argc):
        (values) = argv[argIndx].split(':')
        if (len(values[0]) < 2 or
            values[0][:1] != '-' and
            values[0][:1] != '#'):
            argIndx += 1
            continue

        exSubCode = values[0][1:len(values[0])].lower()
        subCode = values.pop(0)[1].lower()

        value = ':'.join(values).strip()

        if (subCode == 'c'):
            com = value.replace(' ', '')        #remove spaces in between.
        elif(subCode == 'i'):
            config = value
        elif(subCode == 'm'):
            md_path_ = value
        elif(subCode == 's'):
            base.m_sources = value
        elif(subCode == 'l'):
            log_folder =  value
        elif(exSubCode == 'artdem'):
            artdem =  value
        elif(subCode == 'p'):
            pMax = value.rfind('$')
            if (pMax == -1):
                argIndx += 1
                continue

            dynamic_var = value[pMax + 1:].upper()
            v =  value[0: pMax]
            if (dynamic_var.strip() != ''):
                if (base.m_dynamic_params.has_key(dynamic_var) == False):
                    base.m_dynamic_params[dynamic_var] = v

        argIndx += 1


    if (md_path_ != ''):
        (p, f) = os.path.split(md_path_)
        f = f.strip()
        const_gdb_ext_len_ = len(base.const_geodatabase_ext)
        ext = p[-const_gdb_ext_len_:].lower()
        if ((ext == base.const_geodatabase_ext.lower() or
            ext == base.const_geodatabase_SDE_ext.lower()) and
            f != ''):
            p = p.replace('\\', '/')
            w = p.split('/')
            workspace_ = ''
            for i in range(0, len(w) - 1):
                workspace_ += w[i] + '/'

            gdb_ = w[len(w) -1]
            base.m_workspace = workspace_
            base.m_geodatabase = w[len(w) - 1]
            base.m_mdName = f


    if (os.path.isfile(config) == False):
        errMessage = u"Error: Input config file is not specified/not found! " + config
        arcpy.AddMessage(errMessage)
        return False


    if (artdem != ''):
        (base.m_art_ws, base.m_art_ds) = os.path.split(artdem)
        base.m_art_apply_changes = True


    comInfo = {
    'AR' : { 'cb' : postAddData }       #assign a callback function to run custom user code when adding rasters.
    }


    configName, ext = os.path.splitext(config)
    configName = os.path.basename(configName)



    if (com == ''):
        com = base.const_cmd_default_text

    if (argv[1].lower() == '#gprun'):
        log.isGPRun = True
    log.Project ('MDCS')
    log.LogNamePrefix(configName)
    log.StartLog()

    log_output_folder  = os.path.join(os.path.dirname(solutionLib_path), 'logs')

    if (log_folder != ''):
        (path, fileName) = os.path.split(log_folder)
        if (path != ''):
            log_output_folder = path
        if (fileName != ''):
            log.LogFileName(fileName)

    log.SetLogFolder(log_output_folder)
    solutions = solutionsLib.Solutions(base)
    success = solutions.run(config, com, comInfo)

    log.WriteLog('#all')   #persist information/errors collected.

    print "Done..."
Exemple #27
0
import argparse
import collections
from flask import Flask
from flask import request
import json
import logger as doctor_log
import threading
import time

from keystoneauth1 import session
import novaclient.client as novaclient

import identity_auth

LOG = doctor_log.Logger('doctor_inspector').getLogger()


class ThreadedResetState(threading.Thread):
    def __init__(self, nova, state, server):
        threading.Thread.__init__(self)
        self.nova = nova
        self.state = state
        self.server = server

    def run(self):
        self.nova.servers.reset_state(self.server, self.state)
        LOG.info('doctor mark vm(%s) error at %s' % (self.server, time.time()))


class DoctorInspectorSample(object):
Exemple #28
0
 def __init__(self, logFilePath=None):
     """Init the custom logger"""
     self.logger = logger.Logger(logFilePath)
Exemple #29
0
 def _construct(self):
   self.logger = logger.Logger(FILEPATH, LOGGER)
Exemple #30
0
import write2dashboard
from optparse import OptionParser
from datetime import datetime
#log.disable(log.logger.info) #disable log

BASEDIR = os.path.dirname(os.path.abspath(__file__))
#BASEDIR = "C:\\Simon\\CrickTest"
SWITCHCONFIG = BASEDIR + "\\ConfigResolutionData.xlsx"
INPCOUNT = 0  #count input protocal pass number
OUTPCOUNT = 0  #count output protocal pass number
INFCOUNT = 0  #count input protocal fail number
OUTFCOUNT = 0  #count output protocal fail number
PATPCOUNT = 0  #count output pattern pass number
PATFCOUNT = 0  #count output pattern fail number
logname = BASEDIR + "\\log\\" + time.strftime("%Y%m%d_%H%M%S") + ".log"
log = log.Logger(logname)


def executeTest(cmdoptions, cmdargs):
    """
    Execute the test;
    :param cmdoptions:
    :param cmdargs:
    :return:
    """
    global INPCOUNT, OUTPCOUNT, INFCOUNT, OUTFCOUNT, PATPCOUNT, PATFCOUNT, TESTPARAS, outport, outporttype
    print("Your Test will be start after 3 seconds, please wait...")
    loadProcess()
    starttime = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())
    time1 = datetime.now()
    #Get config vars from the data sheet: