Example #1
0
class TrainerInterface(Client):
    def __init__(self, host, port, db=0, batch_size=5):
        super().__init__()
        # redis data
        self.host = host
        self.port = port
        self.db = db
        self.client = Client(host=self.host, port=self.port,
                             db=self.db)  # decode_responses=True

        # Environment data
        self.env_name = str(self.client.get('env_name'), encoding='utf-8')
        self.input_dims = self.client.tensorget(
            'input_dims')  # need to improve

        # print('self.input_dims=', self.input_dims)

        self.n_actions = int(self.client.get('n_actions'))
        self.action_continous = int(self.client.get('action_continous'))
        self.max_action = float(
            self.client.get('max_action'))  # maximum value for action output
        self.min_action = float(
            self.client.get('min_action'))  # minimum value for action output

        # Prcess control data
        self.batch_size = batch_size
        self.stop_gathering = int(
            self.client.get('stop_gathering')
        )  # information for regulators if 1 stop data collecting
        self.mem_cntr = int(self.client.get('mem_cntr'))
        self.mem_size = int(self.client.get('mem_size'))

        # self.show_info()

    def show_info(self):
        print('\n ---------------- INFO ---------------------------------')
        print(f'The environment: {self.env_name} has been created\n')
        print(' -------------- observations --------------------------')
        print(f'Input observation dimension: {self.input_dims}\n')
        print(' -------------- actions --------------------------')
        print(f'Number of actions: {self.n_actions}')
        print(f'Action continous: {self.action_continous}')
        print(f'Maximum action value: {self.max_action}')
        print(f'Minimum action value: {self.min_action}')
        try:
            print(
                f'Action meanings: {self.env.unwrapped.get_action_meanings()}')
        except:
            print(f'Action meanings not decribed in env')

    def get_batch(self):

        self.mem_cntr = int(self.client.get('mem_cntr'))
        idx_max = np.min([self.mem_cntr, self.mem_size])
        batch = np.random.choice(
            idx_max, self.batch_size,
            replace=False)  # rand indexes to get from redis database

        # Get tensors with randomized indexes and stack them into batches
        # get observations from database
        self.observations = np.stack([
            self.client.tensorget(f'obs{batch[i]}')
            for i in range(self.batch_size)
        ])
        self.observations = tf.convert_to_tensor(self.observations,
                                                 dtype=tf.float32)

        # get next observations from database
        self.observations_ = np.stack([
            self.client.tensorget(f'obs_{batch[i]}')
            for i in range(self.batch_size)
        ])
        self.observations_ = tf.convert_to_tensor(self.observations_,
                                                  dtype=tf.float32)

        # get actions from database
        self.actions = np.stack([
            self.client.tensorget(f'action{batch[i]}')
            for i in range(self.batch_size)
        ])
        self.actions = tf.convert_to_tensor(self.actions, dtype=tf.float32)

        # get rewards from database
        reward = self.client.mget(
            [f'reward{batch[i]}' for i in range(self.batch_size)])
        self.rewards = np.array(reward, dtype=np.float)
        self.rewards = tf.convert_to_tensor(self.rewards, dtype=tf.float32)

        # get dones from database
        done = self.client.mget(
            [f'done{batch[i]}' for i in range(self.batch_size)])
        self.dones = np.array(done, dtype=np.int)
        self.dones = tf.convert_to_tensor(self.dones, dtype=tf.int32)

        return self.observations, self.actions, self.rewards, self.observations_, self.dones
import sys
import numpy as np

from redisai import BlobTensor, Client, Backend, Device
from ml2rt import load_model

from cli import arguments

tensor = BlobTensor.from_numpy(np.ones((1, 13), dtype=np.float32))
model = load_model(
    '../models/sklearn/boston_house_price_prediction/boston.onnx')

if arguments.gpu:
    device = Device.gpu
else:
    device = Device.cpu

con = Client(host=arguments.host, port=arguments.port)
con.tensorset('tensor', tensor)
con.modelset('model', Backend.onnx, device, model)
con.modelrun('model', inputs=['tensor'], outputs=['out'])
out = con.tensorget('out', as_type=BlobTensor)
print(out.to_numpy())
Example #3
0
import numpy as np

from redisai import BlobTensor, Client, Backend, Device
from ml2rt import load_model

tensor = BlobTensor.from_numpy(np.ones((1, 13), dtype=np.float32))
model = load_model(
    '../models/sklearn/boston_house_price_prediction/boston.onnx')

con = Client()
con.tensorset('tensor', tensor)
con.modelset('model', Backend.onnx, Device.cpu, model)
con.modelrun('model', input=['tensor'], output=['out'])
out = con.tensorget('out', as_type=BlobTensor)
print(out.to_numpy())
Example #4
0
class RedisInitializer(InfoInRedis):
    '''
    RedisInitializeer  is connecting with redisai database in address = host in specified port and initiate gym environment
    '''
    def __init__(self,
                 host,
                 port,
                 environment,
                 db=0,
                 mem_size=1000000,
                 clean_all_keys=True,
                 batch_size=10,
                 stop_collecting=0,
                 stop_training=1):

        #redis data
        self.host = host
        self.port = port
        self.db = db

        self.mem_size = mem_size
        self.clean_all_keys = clean_all_keys
        self.client = Client(host=self.host, port=self.port, db=self.db)

        #Environment data
        self.env_name = environment
        self.env = gym.make(self.env_name)
        self.input_dims = np.asarray(self.env.reset().shape, dtype=np.int)

        self.n_actions = 4  #self.env.action_space.n
        self.action_discrete = 0  # 1 for discrete, 0 for continuous
        self.max_action = 1  #maximum value for action output
        self.min_action = -1  #minimum value for action output

        #Prcess control data
        self.stop_collecting = stop_collecting  #information for regulators if 1 stop data collecting
        self.stop_training = stop_training  # information for trainers, stopped if 1, running if 0, prevent start training before number of keys is smaller than batch size
        self.mem_cntr = 0
        self.batch_size = batch_size

        #Write data to redis base
        if self.clean_all_keys:
            self.client.flushall(
            )  #delete all keys    <<-------  delete all keys at the begining of the process

            #environmental data
        self.client.set('env_name', self.env_name)
        self.client.tensorset('input_dims', self.input_dims)
        self.client.set('n_actions', self.n_actions)
        self.client.set('max_action', self.max_action)
        self.client.set('min_action', self.min_action)
        self.client.set('action_discrete', self.action_discrete)
        #Process control data
        self.client.set('stop_collecting', self.stop_collecting)
        self.client.set('stop_training', self.stop_training)
        self.client.set('mem_size', self.mem_size)
        self.client.set('mem_cntr', self.mem_cntr)
        self.client.set('batch_size', self.batch_size)
Example #5
0
class InfoInRedis:
    """
    This class read information about environment from redis database and show it
    """
    def __init__(self, host, port, db=0):
        '''
         Function gets from redis database information about parameters of gym environment
        :param host: ip of redis database host e.g. '192.168.1.16' or 'local'
        :param port: redis service port, check proper port forwarding if using docker images
        :param db: redis databse index, default 0, use other values if there are more databases in redis server
        :param get_info:
        '''
        self.host = host
        self.port = port
        self.db = db
        self.client = Client(host=self.host, port=self.port, db=self.db)

        # Environment data
        self.env_name = str(self.client.get('env_name'), encoding='utf-8')
        self.input_dims = self.client.tensorget(
            'input_dims')  # need to improve

        self.n_actions = int(self.client.get('n_actions'))
        self.action_discrete = int(self.client.get('action_discrete'))
        self.max_action = float(
            self.client.get('max_action'))  # maximum value for action output
        self.min_action = float(
            self.client.get('min_action'))  # minimum value for action output

        #Process control data
        self.stop_collecting = int(
            self.client.get('stop_collecting')
        )  #information for regulators if 1 stop regulator, 2 suspend regulator
        self.stop_training = int(
            self.client.get('stop_training')
        )  # information for trainers if 1 stop regulator, 2 suspend trainer
        self.mem_cntr = self.client.get('mem_cntr')
        self.mem_size = float(self.client.get('mem_size'))
        self.batch_size = int(self.client.get('batch_size'))

    def show_info(self):
        print('\n ---------------- INFO ---------------------------------')
        print(f'The environment: {self.env_name} has been created\n')
        print(' -------------- observations --------------------------')
        print(f'Input observation dimension: {self.input_dims}\n')
        print(' -------------- actions --------------------------')
        print(f'Number of actions: {self.n_actions}')
        print(f'Action discrete: {self.action_discrete}')
        print(f'Maximum action value: {self.max_action}')
        print(f'Minimum action value: {self.min_action}\n')
    def __init__(self, host, port, environment, db=0, mem_size=1000000):
        super().__init__()
        #redis data
        self.host = host
        self.port = port
        self.db = db
        self.mem_size = mem_size
        self.client = Client(host=self.host, port=self.port, db=self.db)

        #Environment data
        self.env_name = environment
        self.env = gym.make(self.env_name)
        self.input_dims = np.asarray(self.env.reset().shape, dtype=np.int)

        self.n_actions = 4  #self.env.action_space.n
        self.action_continous = 1  # 1 for continous, 0 for discreete
        self.max_action = 1  #maximum value for action output
        self.min_action = -1  #minimum value for action output

        #Prcess control data
        self.stop_gathering = 0  #information for regulators if 1 stop data collecting
        self.mem_cntr = 0
        self.batch_size = 3

        #Write data to redis base
        #self.client.flushall() #delete all keys    <<-------  delete all keys at the begining of the process
        #environmental data
        self.client.set('env_name', self.env_name)
        self.client.tensorset('input_dims', self.input_dims)
        self.client.set('n_actions', self.n_actions)
        self.client.set('max_action', self.max_action)
        self.client.set('min_action', self.min_action)
        self.client.set('action_continous', self.action_continous)
        #Process control data
        self.client.set('stop_gathering', self.stop_gathering)
        self.client.set('mem_size', self.mem_size)
        self.client.set('mem_cntr', self.mem_cntr)
        """
        #make initial trajectories
        for i in range(self.mem_size):
            cnt = str(self.client.get('mem_cntr'))
            self.client.incr('mem_cntr')

            state = np.zeros((1,*self.input_dims))#np.random.random_sample((3, 2))
            state_ = np.zeros((1, *self.input_dims))
            action=np.zeros((1, self.n_actions))
            reward=0
            done=0

            self.client.tensorset(f'state{cnt}', state)
            self.client.tensorset(f'state_{cnt}', state_)
            self.client.tensorset(f'action{cnt}', action)
            self.client.set(f'reward{cnt}',reward)
            self.client.set(f'done{cnt}', done)


            #add sarsd to one trajectory
            self.client.sadd(f'trajectory{cnt}', f'state{cnt}')
            self.client.sadd(f'trajectory{cnt}', f'state_{cnt}')
            self.client.sadd(f'trajectory{cnt}', f'action{cnt}')
            self.client.sadd(f'trajectory{cnt}', f'reward{cnt}')
            self.client.sadd(f'trajectory{cnt}', f'done{cnt}')

            #add trajectory to the set of trajectories
            self.client.sadd(f'sarsd', f'trajectory{cnt}')

        """

        print('keys=', self.client.keys())
        #print(self.client.get('input_dims'))
        #print(self.client.smembers('sarsd'))
        #print(self.client.srandmember('sarsd', self.batch_size))
        #print(self.client.info())

        self.show_info()
class RegulatorInterface(Client):
    def __init__(self, host, port, db=0):
        super().__init__()
        #redis data
        self.host = host
        self.port = port
        self.db = db
        self.client = Client(host=self.host, port=self.port,
                             db=self.db)  #decode_responses=True

        # Environment data
        self.env_name = str(self.client.get('env_name'), encoding='utf-8')
        self.input_dims = self.client.tensorget('input_dims')
        self.n_actions = int(self.client.get('n_actions'))
        self.action_continous = int(self.client.get('action_continous'))
        self.max_action = float(
            self.client.get('max_action'))  #maximum value for action output
        self.min_action = float(
            self.client.get('min_action'))  #minimum value for action output

        #Prcess control data
        self.stop_gathering = int(
            self.client.get('stop_gathering')
        )  #information for regulators if 1 stop data collecting
        self.mem_cntr = self.client.get('mem_cntr')
        self.mem_size = float(self.client.get('mem_size'))
        self.show_info()

    def storage_data(self, obs, action, reward, obs_, done):
        obs = np.array(obs, dtype=np.float)
        obs_ = np.array(obs_, dtype=np.float)
        action = np.array(action, dtype=np.float)
        done = int(done)

        self.mem_cntr = int(
            self.client.get('mem_cntr'))  #get free database index
        self.client.incr('mem_cntr')  #increment index to lock value in use
        index = int(
            self.mem_cntr % self.mem_size
        )  #if the counter is bigger than allocated buffer refill from oldest samples

        self.client.tensorset(f'obs{index}', obs)
        self.client.tensorset(f'obs_{index}', obs_)
        self.client.tensorset(f'action{index}', action)
        #self.client.set(f'reward{index}', reward)
        #self.client.set(f'done{index}', done)

        self.client.mset({f'reward{index}': reward, f'done{index}': done})
        #combine sarsd into one trajectory
        self.client.sadd(f'trajectory{index}', f'obs{index}', f'obs_{index}',
                         f'action{index}', f'reward{index}', f'done{index}')
        #and add this n-th trajectory to all trajectories
        self.client.sadd('trajectories', f'trajectory{index}')

        #check if data should be collected
        self.stop_gathering = int(self.client.get('stop_gathering'))
        if self.stop_gathering:
            print('Stop_gathering flag has been set. Finishing episode')

        #self.client.sadd(f'trajectory{index}', f'obs{index}')

        #print('obs=',obs)
        #print('obs_=', obs_)
        #print('action=', action)
        #print('reward', self.client.get(f'reward{index}'))
        #print('done', done)
        #print('index=',index,'mem_cnt=',self.mem_cntr)
        #print(f'trajectory{index}',self.smembers(f'trajectory{index}'))
        pass

    def show_info(self):
        print('\n ---------------- INFO ---------------------------------')
        print(f'The environment: {self.env_name} has been created\n')
        print(' -------------- observations --------------------------')
        print(f'Input observation dimension: {self.input_dims}\n')
        print(' -------------- actions --------------------------')
        print(f'Number of actions: {self.n_actions}')
        print(f'Action continous: {self.action_continous}')
        print(f'Maximum action value: {self.max_action}')
        print(f'Minimum action value: {self.min_action}')
        try:
            print(
                f'Action meanings: {self.env.unwrapped.get_action_meanings()}')
        except:
            print(f'Action meanings not decribed in env')
Example #8
0
 def get_client(self):
     return Client()