Esempio n. 1
0
	def __call__(cls, *args, **kwargs):
		if cls._instances_cache is None and cls._instances_session is None:
			super(SingletonDogpile, cls).__call__(*args, **kwargs)
			conf = config.Config()

			file_path = conf.get("dogpile", "cache.file.arguments.filename")
			parent_path = os.path.dirname(file_path)
			if not os.path.exists(parent_path):
				os.makedirs(parent_path)

			dogpile_conf = {
				"cache.memory.backend": conf.get("dogpile", "cache.memory.backend"),
				"cache.memory.expiration_time": conf.getint("dogpile", "cache.memory.expiration_time"),
				"cache.file.backend": conf.get("dogpile", "cache.file.backend"),
				"cache.file.expiration_time": conf.getint("dogpile", "cache.file.expiration_time"),
				"cache.file.arguments.filename": conf.get("dogpile", "cache.file.arguments.filename"),

				"session.memory.backend": conf.get("dogpile", "session.memory.backend"),
				"session.memory.expiration_time": conf.getint("dogpile", "session.memory.expiration_time"),
				"session.file.backend": conf.get("dogpile", "session.file.backend"),
				"session.file.expiration_time": conf.get("dogpile", "session.file.expiration_time"),
				"session.file.arguments.filename": conf.get("dogpile", "session.file.arguments.filename")
			}
			cls._instances_cache = make_region().configure_from_config(dogpile_conf, "cache.file.")
			cls._instances_session = make_region().configure_from_config(dogpile_conf, "session.file.")
		return cls._instances_cache, cls._instances_session
Esempio n. 2
0
 def __init__(self, bot):
     self.bot = bot
     self.conf = config.Config()
     self.logging_settings = {
         "log_user_join": True,
         "log_user_remove": True,
         "log_user_ban": True,
         "log_message_delete": False,
         "log_message_edit": False
     }
Esempio n. 3
0
    def initialize_for_batch_load(self):
        mname = "initiliaze_for_batch_load"

        self.log(mname,
                 "Loading Dataframe from [{}]".format(
                     self.train_label_data_file),
                 level=3)
        self.df = pd.read_csv(self.train_label_data_file)
        self.log(mname,
                 "Loaded [{}] recs".format(self.df['level'].count()),
                 level=3)

        #create & set all myImg Config
        self.myImg_config = cutil.Config(configid="myConfId", cdir=self.cdir)
        self.myImg_config.setDdir(self.train_data_dir)
        self.myImg_config.setOdir(self.img_croped_dir_path)
        self.myImg_config.setIdir(self.img_dir_path)
        ''' 
    self.df['h'] = 0
    self.df['w'] = 0
    self.df['imgpath'] = ""
    self.df['imgexists'] = False
    '''

        #generate dataset for handling train : test
        np.random.seed(self.random_seed)
        self.train_df = self.df.sample(frac=.7,
                                       replace=False,
                                       random_state=self.random_seed)
        self.test_df = self.df[~self.df.index.isin(self.train_df.index)]

        self.tot_cnt = self.img_processing_capacity
        if self.tot_cnt == 0:
            self.tot_cnt = self.train_df['level'].count()
        cnt = 0
        file_missing = 0

        #self.train_df = self.train_df.reset_index()
        #self.test_df = self.test_df.reset_index()
        print(self.train_df.head())

        self.train_df.to_csv(self.train_data_dir + "train_df.csv",
                             index=False,
                             header=False)
        self.test_df.to_csv(self.train_data_dir + "test_df.csv",
                            index=False,
                            header=False)

        self.no_classes = self.train_df.level.nunique()

        return (self.train_df.level.count(), self.test_df.level.count())
Esempio n. 4
0
	def __call__(cls, *args, **kwargs):
		if cls._instances is None:
			super(SingletonLog, cls).__call__(*args, **kwargs)
			conf = config.Config()
			# 按每天生成日志文件 linux
			log_file_path = conf.get("log", "path")
			if not os.path.exists(log_file_path):
				os.makedirs(log_file_path)
			log_handler = handlers.TimedRotatingFileHandler(log_file_path + "/blog", conf.get("log", "when"),
															conf.getint("log", "interval"))
			# 格式化日志内容
			format_ = "%(asctime)s %(pathname)-5s %(funcName)-5s %(lineno)-5s %(levelname)-5s %(message)s"
			log_formatter = logging.Formatter(format_)
			log_handler.setFormatter(log_formatter)
			# 设置记录器名字
			log = logging.getLogger('blog')
			log.addHandler(log_handler)
			# 设置日志等级
			log.setLevel(conf.get("log", "level"))
			cls._instances = log
		return cls._instances
    def atualizarStatusColeta(self, id_lixeiras_list, status_coleta):
        lixeiraRepository = _l.LixeiraRepository()
        lixeiras = lixeiraRepository.getLixeiras().lixeira
        flag = False
        for id in id_lixeiras_list:
            flag = False
            id = int(id.rstrip())
            for lixeira in lixeiras:
                if lixeira.id == id:
                    flag = True
                    lixeira.status_coleta = int(status_coleta)
                    config = _c.Config()
                    lixeiraRepository = _l.LixeiraRepository()
                    lixeiraRepository.updateStatusColeta(lixeira)
                    lixeiras.remove(lixeira)
                    break
            if not flag:
                break
            else:
                flag = True

        return flag
Esempio n. 6
0
#!/usr/bin/env python3

import gp.gp_driver as gp_driver_class
import util.args as args_class
import util.config as config_class

if __name__ == '__main__':

    # Process command line arguments
    args = args_class.Arguments(1, ['config/default.cfg'])
    config_file = args.get_args()[0]

    # Setup configuration
    config = config_class.Config(config_file)

    # Initialize the GP driver and its run variables
    gp_driver = gp_driver_class.GPDriver(config)

    # Run the GP
    while gp_driver.run_count <= int(config.settings['num experiment runs']):
        gp_driver.begin_run()

        while gp_driver.decide_termination():
            gp_driver.begin_eval()

            while gp_driver.check_game_over():
                gp_driver.execute_turn()

            gp_driver.end_eval()

        gp_driver.end_run()
Esempio n. 7
0
#coding:utf-8
__author__ = "chenghao"

import pyorient
import util
from util import config

conf = config.Config()
conn = pyorient.OrientDB(conf.get("orientdb", "host"),
                         conf.getint("orientdb", "port"))
conn.connect(conf.get("orientdb", "user"), conf.get("orientdb", "pwd"))

if conn.db_exists(util.DB_NAME):
    conn.db_open(util.DB_NAME, conf.get("orientdb", "user"),
                 conf.get("orientdb", "pwd"))
Esempio n. 8
0
    def load_img_data(self):
        mname = "load_greyscale_data"

        self.log(mname,
                 "Loading Dataframe from [{}]".format(
                     self.train_label_data_file),
                 level=3)
        self.df = pd.read_csv(self.train_label_data_file)
        self.log(mname,
                 "Loaded [{}] recs".format(self.df['level'].count()),
                 level=3)

        #create & set all myImg Config
        self.myImg_config = cutil.Config(configid="myConfId", cdir=self.cdir)
        self.myImg_config.setDdir(self.train_data_dir)
        self.myImg_config.setOdir(self.img_croped_dir_path)
        self.myImg_config.setIdir(self.img_dir_path)

        self.df['h'] = 0
        self.df['w'] = 0
        self.df['imgpath'] = ""
        self.df['imgexists'] = False

        #initialize all variables...
        n_img_w = self.img_width
        n_img_h = self.img_heigth

        tot_cnt = self.img_processing_capacity
        if tot_cnt == 0:
            tot_cnt = self.df['level'].count()
        cnt = 0
        file_missing = 0

        #generate dataset for handling train : test
        np.random.seed(self.random_seed)
        train_dataset_sample = np.random.choice(
            range(0, tot_cnt),
            int(tot_cnt * self.training_dataset_ratio),
            replace=False)
        train_dataset_indicies = dict(
            zip(train_dataset_sample, train_dataset_sample))

        #x_train = np.zeros(( tot_cnt, n_img_w, n_img_h, 3), dtype='uint8')
        x_img_buf = np.empty((1, n_img_w, n_img_h), dtype='uint8')
        x_train = None
        y_train = None
        x_test = None
        y_test = None
        y_train_buf = []
        y_test_buf = []

        if self.channels == 1:
            x_train = np.zeros((len(train_dataset_sample), n_img_w, n_img_h),
                               dtype='uint8')
            x_test = np.zeros(
                ((tot_cnt - len(train_dataset_sample)), n_img_w, n_img_h),
                dtype='uint8')
        else:
            x_train = np.zeros(
                (len(train_dataset_sample), n_img_w, n_img_h, self.channels),
                dtype='uint8')
            x_test = np.zeros(((tot_cnt - len(train_dataset_sample)), n_img_w,
                               n_img_h, self.channels),
                              dtype='uint8')

        y_train = np.zeros((0, 1), dtype='uint8')
        y_test = np.zeros((0, 1), dtype='uint8')

        #loop in through dataframe.
        train_cnt = 0
        test_cnt = 0
        train_samples_cnt = len(train_dataset_sample)
        test_samples_cnt = tot_cnt - len(train_dataset_sample)
        self.log(mname,
                 "[{}] recs for training.".format(train_samples_cnt),
                 level=3)
        self.log(mname,
                 "[{}] recs for test.".format(test_samples_cnt),
                 level=3)

        for i, rec in self.df.iterrows():
            if cnt >= tot_cnt:
                break

            progress_sts = "%6d out of %6d" % (cnt, tot_cnt)
            sys.stdout.write(progress_sts)
            sys.stdout.write(
                "\b" * len(progress_sts))  # return to start of line, after '['
            sys.stdout.flush()

            imgpath = self.img_dir_path + rec.image + self.img_filename_ext
            self.df.loc[i, 'imgpath'] = imgpath

            if os.path.exists(imgpath):
                myimg1 = myimg.myImg(imageid=str(i),
                                     config=self.myImg_config,
                                     path=imgpath)

                #x_img_buf[ 0, :, :] = myimg1.getImage()
                if train_dataset_indicies.get(cnt, False):
                    #x_train = np.vstack( (x_train, x_img_buf))
                    if train_cnt < train_samples_cnt:
                        if self.channels == 1:
                            x_train[train_cnt, :, :] = myimg1.getImage()
                        else:
                            x_train[train_cnt, :, :, :] = myimg1.getImage()
                    y_train_buf.append(rec.level)
                    train_cnt += 1
                else:
                    #x_test = np.vstack( (x_test, x_img_buf))
                    #self.log( mname, "[{}] [{}] x_test[{}] x_img_buf[{}]".format(cnt,test_cnt,x_test.shape,x_img_buf.shape), level=2)
                    if test_cnt < test_samples_cnt:
                        if self.channels == 1:
                            x_test[test_cnt, :, :] = myimg1.getImage()
                        else:
                            x_test[test_cnt, :, :, :] = myimg1.getImage()
                    y_test_buf.append(rec.level)
                    test_cnt += 1

                #self.log( mname, "Image file [{}] doesn't exists!!!".format(imgpath), level=2)
            else:
                #self.log( mname, "Image file [{}] doesn't exists!!!".format(imgpath), level=2)
                file_missing += 1

            cnt += 1

        #create y array as required
        y_train = np.array(y_train_buf, dtype='uint8')
        y_train = np.reshape(y_train, (y_train.size, 1))
        y_test = np.array(y_test_buf, dtype='uint8')
        y_test = np.reshape(y_test, (y_test.size, 1))
        #print final dimensionf or x_train and y_train
        self.log(mname,
                 "x_train [{}] y_train [{}]".format(x_train.shape,
                                                    y_train.shape),
                 level=3)
        self.log(mname,
                 "x_test [{}] y_test [{}]".format(x_test.shape, y_test.shape),
                 level=3)

        self.log(mname, "Process dataset [{}]".format(cnt), level=3)
        self.log(mname, "File missing [{}]".format(file_missing), level=3)
        self.log(mname,
                 "Max image width[{}] heigth[{}]".format(
                     self.df['w'].max(), self.df['h'].max()),
                 level=3)
        #print(self.df.head(10))
        #self.df.to_csv( self.train_data_dir + 'u_img_set.csv')

        return (x_train, y_train), (x_test, y_test)
Esempio n. 9
0
    def load_data_as_greyscale(self):
        mname = "load_data_as_greyscale"

        self.log(mname,
                 "Loading Dataframe from [{}]".format(
                     self.train_label_data_file),
                 level=3)
        self.df = pd.read_csv(self.train_label_data_file)
        self.log(mname,
                 "Loaded [{}] recs".format(self.df['level'].count()),
                 level=3)

        #create & set all myImg Config
        self.myImg_config = cutil.Config(configid="myConfId", cdir=self.cdir)
        self.myImg_config.setDdir(self.train_data_dir)
        self.myImg_config.setOdir(self.img_croped_dir_path)
        self.myImg_config.setIdir(self.img_dir_path)

        self.df['h'] = 0
        self.df['w'] = 0
        self.df['imgpath'] = ""
        self.df['imgexists'] = False

        #initialize all variables...
        n_img_w = self.img_width
        n_img_h = self.img_heigth

        tot_cnt = self.df['level'].count()
        cnt = 0
        file_missing = 0

        #x_train = np.zeros(( tot_cnt, n_img_w, n_img_h, 3), dtype='uint8')
        x_train = np.zeros((0, n_img_w, n_img_h), dtype='uint8')
        x_img_buf = np.empty((1, n_img_w, n_img_h), dtype='uint8')
        y_buf = []
        y_train = np.empty((0, 1), dtype='uint8')

        #loop in through dataframe.
        for i, rec in self.df.iterrows():
            #if cnt >= 50:
            #  break

            progress_sts = "%6d out of %6d" % (cnt, tot_cnt)
            sys.stdout.write("%6d out of %6d" % (cnt, tot_cnt))
            sys.stdout.write(
                "\b" * len(progress_sts))  # return to start of line, after '['
            sys.stdout.flush()

            imgpath = self.img_dir_path + rec.image + self.img_filename_ext
            self.df.loc[i, 'imgpath'] = imgpath

            #skip already processed data
            if os.path.exists(self.img_croped_dir_path + rec.image +
                              self.img_filename_ext):
                cnt += 1
                continue

            if os.path.exists(imgpath):
                myimg1 = myimg.myImg(imageid=str(i),
                                     config=self.myImg_config,
                                     path=imgpath)

                myimg1.getGreyScaleImage(convertFlag=True)
                myimg1.padImage(n_img_w, n_img_h)

                #x_img_buf[ 0, :, :] = myimg1.getImage()

                myimg1.saveImage(img_type_ext='.jpeg', gen_new_filename=True)

                #self.log( mname, "Croped Image [{}] [{}] [{}] [{}]".format(myimg1.getImage().shape,croped_img_arr.shape,x_train.shape,x_img_buf.shape), level=4)

                #x_train = np.vstack( (x_train, x_img_buf))
                #x_train[cnt,:,:,:] = croped_img_arr
                y_buf.append(rec.level)

                self.df.loc[i, 'imgexists'] = True
                self.df.loc[i, 'w'], self.df.loc[i, 'h'] = myimg1.getImageDim()
                #self.df.loc[i,'_w'], self.df.loc[i,'_h'] = croped_img_arr.shape[0],croped_img_arr.shape[1]
                #self.log( mname, "Image file [{}] doesn't exists!!!".format(imgpath), level=2)
            else:
                #self.log( mname, "Image file [{}] doesn't exists!!!".format(imgpath), level=2)
                file_missing += 1

            cnt += 1

        #create y array as required
        y_train = np.array(y_buf, dtype='uint8')
        y_train = np.reshape(y_train, (y_train.size, 1))
        #print final dimensionf or x_train and y_train
        self.log(mname,
                 "x_train [{}] y_train [{}]".format(x_train.shape,
                                                    y_train.shape),
                 level=3)

        self.log(mname, "Process dataset [{}]".format(cnt), level=3)
        self.log(mname, "File missing [{}]".format(file_missing), level=3)
        self.log(mname,
                 "Max image width[{}] heigth[{}]".format(
                     self.df['w'].max(), self.df['h'].max()),
                 level=3)
        #print(self.df.head(10))
        self.df.to_csv(self.train_data_dir + 'u_img_set.csv')
Esempio n. 10
0
    def load_train_data(self):
        mname = "load_train_data"

        self.log(mname,
                 "Loading Dataframe from [{}]".format(
                     self.train_label_data_file),
                 level=3)
        self.df = pd.read_csv(self.train_label_data_file)

        #create & set all myImg Config
        self.myImg_config = cutil.Config(configid="myConfId", cdir=self.cdir)
        self.myImg_config.setDdir(self.train_data_dir)
        self.myImg_config.setOdir(self.img_croped_dir_path)
        self.myImg_config.setIdir(self.img_dir_path)

        self.df['h'] = 0
        self.df['w'] = 0
        self.df['imgpath'] = ""
        self.df['imgexists'] = False

        #initialize all variables...
        n_img_w = self.img_width
        n_img_h = self.img_heigth

        tot_cnt = self.df['level'].count()
        cnt = 0
        file_missing = 0

        #x_train = np.zeros(( tot_cnt, n_img_w, n_img_h, 3), dtype='uint8')
        x_train = np.zeros((0, n_img_w, n_img_h, 3), dtype='uint8')
        x_img_buf = np.empty((1, n_img_w, n_img_h, 3), dtype='uint8')
        y_buf = []
        y_train = np.empty((0, 1), dtype='uint8')

        #loop in through dataframe.
        for i, rec in self.df.iterrows():
            #if cnt > 50:
            #  break

            progress_sts = "%6d out of %6d" % (cnt, tot_cnt)
            sys.stdout.write("%6d out of %6d" % (cnt, tot_cnt))
            sys.stdout.write(
                "\b" * len(progress_sts))  # return to start of line, after '['
            sys.stdout.flush()

            imgpath = self.img_dir_path + rec.image + self.img_filename_ext
            self.df.loc[i, 'imgpath'] = imgpath

            #skip already processed data
            if os.path.exists(self.img_croped_dir_path + rec.image +
                              self.img_filename_ext):
                cnt += 1
                continue

            if os.path.exists(imgpath):
                myimg1 = myimg.myImg(imageid=str(i),
                                     config=self.myImg_config,
                                     path=imgpath)

                i_w, i_h = myimg1.getImageDim()
                croped_img_arr = np.zeros((n_img_w, n_img_h, 3), dtype='uint8')
                calc_img_w_offset = int((n_img_w - i_w) / 2)
                calc_img_h_offset = int((n_img_h - i_h) / 2)
                croped_img_arr[calc_img_w_offset:(calc_img_w_offset + i_w),
                               calc_img_h_offset:(calc_img_h_offset +
                                                  i_h), :] = myimg1.getImage()
                ''' 
        croped_img = tf.image.resize_image_with_crop_or_pad( myimg1.getImage(), n_img_w, n_img_h)
        init = tf.global_variables_initializer()
        croped_img_arr = 0
        with tf.Session() as sess:
          sess.run(init)
          croped_img_arr = sess.run(croped_img)
          print(v.shape,type(v))  # will show you your variable.
          v = np.reshape( v, ( n_img_w, n_img_h, 3))
          print(v.shape,type(v))  # will show you your variable.
        '''

                x_img_buf[0, :, :, :] = croped_img_arr

                #'''#use below block of code to debug croped image with original.
                #myimg1.showImage()
                #myimg1.saveImage(img_type_ext='.jpeg',gen_new_filename=False)
                myimg2 = myimg.myImg(imageid=str(i),
                                     config=self.myImg_config,
                                     path=rec.image + self.img_filename_ext,
                                     img=croped_img_arr)
                myimg2.saveImage(img_type_ext='.jpeg', gen_new_filename=True)
                #myimg2.saveImage()
                #'''

                #self.log( mname, "Croped Image [{}] [{}] [{}] [{}]".format(myimg1.getImage().shape,croped_img_arr.shape,x_train.shape,x_img_buf.shape), level=4)

                #x_train = np.vstack( (x_train, x_img_buf))
                #x_train[cnt,:,:,:] = croped_img_arr
                y_buf.append(rec.level)

                self.df.loc[i, 'imgexists'] = True
                self.df.loc[i, 'w'], self.df.loc[i, 'h'] = myimg1.getImageDim()
                self.df.loc[i, '_w'], self.df.loc[
                    i, '_h'] = croped_img_arr.shape[0], croped_img_arr.shape[1]
                #self.log( mname, "Image file [{}] doesn't exists!!!".format(imgpath), level=2)
            else:
                #self.log( mname, "Image file [{}] doesn't exists!!!".format(imgpath), level=2)
                file_missing += 1

            cnt += 1

        #create y array as required
        y_train = np.array(y_buf, dtype='uint8')
        y_train = np.reshape(y_train, (y_train.size, 1))
        #print final dimensionf or x_train and y_train
        self.log(mname,
                 "x_train [{}] y_train [{}]".format(x_train.shape,
                                                    y_train.shape),
                 level=3)

        self.log(mname, "Process dataset [{}]".format(cnt), level=3)
        self.log(mname, "File missing [{}]".format(file_missing), level=3)
        self.log(mname,
                 "Max image width[{}] heigth[{}]".format(
                     self.df['w'].max(), self.df['h'].max()),
                 level=3)
        #print(self.df.head(10))
        self.df.to_csv(self.train_data_dir + 'u_img_set.csv')
Esempio n. 11
0
 def __init__(self):
     self.config = config.Config()
Esempio n. 12
0
 def __init__(self):
     config = _c.Config()
     self.gmaps = googlemaps.Client(key=config.googlemaps_key)