Example #1
0
def train(model, criterion, optimizer, n_epochs, train_loader, test_loader=None, scheduler=None, noise_rate=0.0):
    train_noise_generator = Noise(train_loader, noise_rate=noise_rate)
    test_noise_generator = Noise(test_loader, noise_rate=noise_rate) if test_loader is not None else None

    train_loss_per_epoch = []
    test_loss_per_epoch = []
    correct_per_epoch = []
    incorrect_per_epoch = []
    memorized_per_epoch = []

    for _ in tqdm(range(n_epochs)):
        # activate train mode
        model.train()
        train_loss = 0
        for batch_idx, (inputs, targets) in enumerate(train_loader):
            targets_with_noise = train_noise_generator.symmetric_noise(targets, batch_idx)
            # to(device) copies data from CPU to GPU
            inputs, targets = inputs.to(device), targets_with_noise.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item() * targets.size(0)
        train_loss_per_epoch.append(train_loss / len(train_loader.dataset))

        if test_loader is not None:
            model.eval()
            test_loss = 0
            with torch.no_grad():
                correct, incorrect, memorized, total = 0, 0, 0, 0
                with torch.no_grad():
                    for batch_idx, (inputs, targets) in enumerate(test_loader):
                        original_targets = targets.to(device)
                        targets_with_noise = test_noise_generator.symmetric_noise(targets, batch_idx)
                        inputs, targets = inputs.to(device), targets_with_noise.to(device)
                        outputs = model(inputs)
                        loss = criterion(outputs, targets)

                        _, predicted = outputs.max(1)
                        total += targets.size(0)
                        correct_idx = predicted.eq(original_targets)
                        memorized_idx = ((predicted != original_targets) & (predicted == targets))
                        incorrect_idx = ((predicted != original_targets) & (predicted != targets))
                        correct += correct_idx.sum().item()
                        memorized += memorized_idx.sum().item()
                        incorrect += incorrect_idx.sum().item()
                        test_loss += loss.item() * targets.size(0)

                test_loss_per_epoch.append(test_loss / total)
                correct_per_epoch.append(correct / total)
                memorized_per_epoch.append(memorized / total)
                incorrect_per_epoch.append(incorrect / total)

        # anneal learning rate
        scheduler.step()

    return (train_loss_per_epoch, test_loss_per_epoch,
            correct_per_epoch, memorized_per_epoch, incorrect_per_epoch,)
Example #2
0
def main(_):
    with tf.Session() as sess:
        env = gym.make(ENV_NAME)
        np.random.seed(RANDOM_SEED)
        tf.set_random_seed(RANDOM_SEED)
        env.seed(RANDOM_SEED)

        print(env.observation_space)
        print(env.action_space)

        state_dim = env.observation_space.shape[0]

        try:
            action_dim = env.action_space.shape[0]
            action_bound = env.action_space.high
            # Ensure action bound is symmetric
            assert (env.action_space.high == -env.action_space.low)
            discrete = False
            print('Continuous Action Space')
        except AttributeError:
            action_dim = env.action_space.n
            action_bound = 1
            discrete = True
            print('Discrete Action Space')

        actor = ActorNetwork(sess, state_dim, action_dim, action_bound,
                             ACTOR_LEARNING_RATE, TAU)

        critic = CriticNetwork(sess, state_dim, action_dim,
                               CRITIC_LEARNING_RATE, TAU, actor.get_num_trainable_vars())

        noise = Noise(DELTA, SIGMA, OU_A, OU_MU)
        reward = Reward(REWARD_FACTOR, GAMMA)
Example #3
0
    def __init__(
        self,
        a_dim,
        s_dim,
        a_bound,
    ):
        self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,
        #self.sess = tf.Session()
        self.P_online = Actor(s_dim, a_dim)
        self.P_target = Actor(s_dim, a_dim)
        self.P_target.load_state_dict(self.P_online.state_dict())
        self.Q_online = Critic(s_dim, a_dim)
        self.Q_target = Critic(s_dim, a_dim)
        self.Q_target.load_state_dict(self.Q_online.state_dict())
        self.q_optimizer = torch.optim.Adam(self.Q_online.parameters(),
                                            lr=LR_C)
        self.p_optimizer = torch.optim.Adam(self.P_online.parameters(),
                                            lr=LR_A)
        self.loss_td = nn.MSELoss()
        self.replay_buffer = ReplayBuffer()
        self.batch_size = 32

        self.discrete = False
        self.ep_step = 0
        # noise
        self.noise = Noise(DELTA, SIGMA, OU_A, OU_MU)
        # Initialize noise
        self.ou_level = 0.
        self.action_low = -2
        self.action_high = 2
Example #4
0
def main(_):

    with tf.compat.v1.Session() as sess:
        env = StageWorld(LASER_BEAM, map_type)
        np.random.seed(RANDOM_SEED)
        tf.compat.v1.set_random_seed(RANDOM_SEED)

        state_dim = LASER_BEAM * LASER_HIST + SPEED + TARGET

        action_dim = ACTION
        #action_bound = [0.25, np.pi/6] #bounded acceleration
        action_bound = [0.5, np.pi / 3]  #bounded velocity
        switch_dim = SWITCH

        discrete = False
        print('Continuous Action Space')

        actor = ActorNetwork(sess, state_dim, action_dim, action_bound,
                             ACTOR_LEARNING_RATE, TAU)

        critic = CriticNetwork(sess, state_dim, action_dim, switch_dim,
                               CRITIC_LEARNING_RATE, TAU,
                               actor.get_num_trainable_vars())

        noise = Noise(DELTA, SIGMA, OU_A, OU_MU)
        reward = Reward(REWARD_FACTOR, GAMMA)

        try:
            train(sess, env, actor, critic, noise, reward, discrete,
                  action_bound)
        except KeyboardInterrupt:
            pass
    def __init__(self, task):
        self.task=task
        self.state_size=task.state_size
        self.action_size=task.action_size
        self.action_low=task.action_low
        self.action_high=task.action_high

        self.actor_local=Actor(self.state_size, self.action_size, self.action_low, self.action_high)
        self.actor_target=Actor(self.state_size, self.action_size, self.action_low, self.action_high)
        self.critic_local=Critic(self.state_size, self.action_size)
        self.critic_target=Critic(self.state_size, self.action_size)

        self.critic_target.model.set_weights(self.critic_local.model.get_weights())
        self.actor_target.model.set_weights(self.actor_local.model.get_weights())

        self.mu=0
        self.theta=0.2 
        self.sigma=0.005 # random noise
        self.noise=Noise(self.action_size, self.mu, self.theta, self.sigma)
        self.gamma=0.9 
        self.tau=0.1 
        self.best_score=-np.inf
        self.score=0
        
        self.buffer_size=100000
        self.batch_size=64
        self.memory=ReplayBuffer(self.buffer_size, self.batch_size)
Example #6
0
 def __init__(self, sess, scale_u, params):
     self.sess = sess
     self.scale_u = scale_u
     self.__dict__.update(params)
     # CREATE INPUT PLACEHOLDERS
     self.create_input_placeholders()
     # INITIALIZE ACTOR & CRITIC MODELS
     self.agents = [
         Actor(self.sess, self.inputs, i, **self.actor_params)
         for i in [1, 2, 3]
     ]
     self.critic = Critic(self.sess, self.inputs, **self.critic_params)
     # INITIALIZE EXPLORATION MODEL
     self.noise_params = {
         k: np.fromstring(v, sep=",", dtype="f")
         for k, v in self.noise_params.items()
     }
     self.noise = [Noise(**self.noise_params) for _ in range(3)]
     # INITIALIZE REPLAY BUFFER
     self.memory = Memory(self.memory_size)
     # AVERAGE AGENT POLICIES
     avg_pi = [
         tf.reduce_mean(i, axis=0)
         for i in zip(*[x.pi.net_params for x in self.agents])
     ]
     self.avg_op = [
         tf.assign(i, j) for x in self.agents
         for i, j in zip(x.pi.net_params, avg_pi)
     ]
Example #7
0
    def __init__(self, labels, pubkey, curveID=409, fingerprint="fingerprint"):
        #    global _C
        #    self._C = _C
        self.num = len(labels)
        self.curveID = curveID
        self.pubkey = pubkey
        #    print self.pubkey.export().encode("hex")
        self.lab = {}

        # Store the group we work in
        # precompute tables and the generator
        self.ecgroup = EcGroup(curveID)
        self.gen = self.ecgroup.generator()
        self.order = self.ecgroup.order()

        #    self.ecgroup = _C.EC_GROUP_new_by_curve_name(curveID)
        #    if not _C.EC_GROUP_have_precompute_mult(self.ecgroup):
        #        _C.EC_GROUP_precompute_mult(self.ecgroup, _FFI.NULL);
        #    self.gen = _C.EC_GROUP_get0_generator(self.ecgroup)

        # This is where we store the ECEG ciphertexts
        self.buf = []

        # This DC's weight for noise calculation
        twbw, p_exit, num_of_dc, sum_of_sq = prob_exit(consensus, fingerprint)

        for label in labels:
            #Make session key
            s_priv = self.order.random()
            s_pub = s_priv * self.gen

            alpha = s_pub

            beta = self.pubkey
            beta.pt_mul_inplace(s_priv)

            # Adding noise and setting the resolution
            res_noise = int(Noise(sigma, sum_of_sq, p_exit) * resolution)
            n = Bn(res_noise)

            kappa = self.gen
            kappa = kappa.pt_mul(n)
            beta.pt_add_inplace(kappa)

            del (kappa)
            del (n)

            # Save the ECEG ciphertext
            c = (alpha, beta)
            self.lab[label] = c
            self.buf += [c]

            # Save the resolution
            resolute = Bn(resolution)
            self.resolution = self.gen
            self.resolution = self.resolution.pt_mul(resolute)
            del (resolute)
Example #8
0
    def __init__(self, width, height):

        self.noise = Noise(256)
        self.cache = dict()
        self.width = width
        self.height = height
        self.screen = pygame.display.set_mode((width, height))
        pygame.display.set_caption('Perlin Noise Demo')
        pygame.font.init()
        self.font = pygame.font.SysFont('Arial', 30)
        self.background = (2, 2, 2)
Example #9
0
def main(_):
    with tf.Session() as sess:
        env = gym.make(ENV_NAME)
        np.random.seed(RANDOM_SEED)
        tf.set_random_seed(RANDOM_SEED)
        env.seed(RANDOM_SEED)

        print(env.observation_space)
        print(env.action_space)

        state_dim = env.observation_space.shape[0]

        try:
            action_dim = env.action_space.shape[0]
            action_bound = env.action_space.high
            # Ensure action bound is symmetric
            assert (env.action_space.high == -env.action_space.low)
            discrete = False
            print('Continuous Action Space')
        except:  #原来的对象抛出处理不了这里的异常,此处更换为全部处理
            action_dim = env.action_space.n
            action_bound = 1
            discrete = True
            print('Discrete Action Space')

        actor = ActorNetwork(sess, state_dim, action_dim, action_bound,
                             ACTOR_LEARNING_RATE, TAU)

        critic = CriticNetwork(sess, state_dim, action_dim,
                               CRITIC_LEARNING_RATE, TAU,
                               actor.get_num_trainable_vars())

        noise = Noise(DELTA, SIGMA, OU_A, OU_MU)
        reward = Reward(REWARD_FACTOR, GAMMA)

        if GYM_MONITOR_EN:
            if not RENDER_ENV:
                gym.wrappers.Monitor(env,
                                     MONITOR_DIR,
                                     video_callable=False,
                                     force=True)  #此处更换为新版本
            # env.monitor.start(MONITOR_DIR, video_callable=False, force=True)
            else:
                gym.wrappers.Monitor(env, force=True)  #此处更换为新版本
            # env.monitor.start(MONITOR_DIR, force=True)

        try:
            train(sess, env, actor, critic, noise, reward, discrete)
        except KeyboardInterrupt:
            pass

        if GYM_MONITOR_EN:
            env.monitor.close()
Example #10
0
    def __init__(self, width, height, prob_deviation=0):
        """

        :param width: int
        :param height: int
        :param prob_deviation: float, optional, probability of some deviation
        """
        self.width = width
        self.height = height
        self._noise = Noise(width, height, low=0.2, high=0.7)
        self.data = self._get_background()
        self._prob_deviation = prob_deviation
        self._prob_deviation_internal = 0.3
def select_model():

    net_options = list('to ' + net_name + ' press ' + str(i) for i, net_name in enumerate(MY_NETS))
    message = 'Please select the requested net:\n'
    net_name = get_input(message, net_options, MY_NETS)

    if net_name == DEBLUR:
        deblur = Deblur()
        blur = Blur()
        return deblur, blur, net_name
    else:
        denoise = Denoise()
        noise = Noise()
        return denoise, noise, net_name
Example #12
0
 def __init__(self, sess, params):
     self.sess = sess
     self.__dict__.update(params)
     # create placeholders
     self.create_input_placeholders()
     # create actor/critic models
     self.actor = Actor(self.sess, self.inputs, **self.actor_params)
     self.critic = Critic(self.sess, self.inputs, **self.critic_params)
     self.noise_params = {k: np.array(list(map(float, v.split(","))))
                          for k, v in self.noise_params.items()}
     self.noise = Noise(**self.noise_params)
     self.ou_level = np.zeros(self.dimensions["u"])
     self.memory = Memory(self.n_mem_objects,
                          self.memory_size)
Example #13
0
def main(_):
    with tf.Session() as sess:
        env = gym.make(ENV_NAME)
        np.random.seed(RANDOM_SEED)
        tf.set_random_seed(RANDOM_SEED)
        env.seed(RANDOM_SEED)

        print env.observation_space
        print env.action_space

        state_dim = env.observation_space.shape[0]

        try:
            action_dim = env.action_space.shape[0]
            action_bound = env.action_space.high
            # Ensure action bound is symmetric
            assert (env.action_space.high == -env.action_space.low)
            discrete = False
            print "Continuous Action Space"
        except AttributeError:
            action_dim = env.action_space.n
            action_bound = 1
            discrete = True
            print "Discrete Action Space"

        actor = ActorNetwork(sess, state_dim, action_dim, action_bound,
                             ACTOR_LEARNING_RATE, TAU)
        critic = CriticNetwork(sess, state_dim, action_dim,
                               CRITIC_LEARNING_RATE, TAU,
                               actor.get_num_trainable_vars())

        noise = Noise(DELTA, SIGMA, OU_A, OU_MU)
        reward = Reward(REWARD_FACTOR, GAMMA)

        if GYM_MONITOR_EN:
            if not RENDER_ENV:
                env = wrappers.Monitor(env, MONITOR_DIR, force=True)
            else:
                env = wrappers.Monitor(env, MONITOR_DIR, force=True)

        try:
            train(sess, env, actor, critic, noise, reward, discrete)
        except KeyboardInterrupt:
            pass

        #if GYM_MONITOR_EN:
        #env.monitor.close()
        env.close()

    gym.upload(MONITOR_DIR, api_key="sk_JObiOSHpRjw48FpWvI1GA")
Example #14
0
    def __init__(self, q, labels, authorities, fingerprint):
        self.data = {}
        self.q = q
        for l in labels:
            self.data[l] = 0

        self.keys = [os.urandom(20) for _ in authorities]
        self.keys = dict([(PRF(K, "KEYID"), K) for K in self.keys])
       
	twbw, p_exit, number_exits, sum_of_sq = prob_exit(consensus, fingerprint) 
        for _, K in self.keys.iteritems():
            shares = keys_from_labels(labels, K, True, q)            
            for (l, s0) in shares:
        	noise = Noise(sigma,fingerprint,sum_of_sq,p_exit)
		self.data[l] = (self.data[l] + int((s0+noise)/resolution)) % q
Example #15
0
    def transmit(self, data):
        noise_values = Noise(self.noise_level).get_noises(len(data))
        noisy_data = data[:]

        for i in range(len(noisy_data)):
            n = noisy_data[i] + noise_values[i]
            # Keep voltage levels within [0.0, 1.0]
            if n >= 1.0:
                n = .999999999
            elif n < 0.0:
                n = 0.0
            noisy_data[i] = n
            assert (noisy_data[i] >= 0.0
                    and noisy_data[i] <= 1.0)  # double-check

        return noisy_data
    def __init__(self, task):
        # Task (environment) information
        self.task = task
        self.state_size = task.state_size
        self.action_size = task.action_size
        self.action_low = task.action_low
        self.action_high = task.action_high
        self.action_range = self.action_high - self.action_low

        self.w = np.random.normal(
            size=(
                self.state_size, self.action_size
            ),  # weights for simple linear policy: state_space x action_space
            scale=(self.action_range / (2 * self.state_size)
                   ))  # start producing actions in a decent range

        self.actor = Actor(self.state_size, self.action_size, self.action_low,
                           self.action_high)
        self.critic = Critic(self.state_size, self.action_size)

        self.actor_target = Actor(self.state_size, self.action_size,
                                  self.action_low, self.action_high)
        self.critic_target = Critic(self.state_size, self.action_size)

        self.gamma = 0.95
        self.tau = 0.001

        self.best_w = None
        self.best_score = -np.inf

        self.exploration_mu = 0.5
        self.exploration_theta = 0.2
        self.exploration_sigma = 0.4
        self.noise = Noise(self.action_size, self.exploration_mu,
                           self.exploration_theta, self.exploration_sigma)

        self.buffer_size = 100000
        self.batch_size = 32
        self.memory = ReplayBuffer(self.buffer_size, self.batch_size)

        self.best_score = -np.inf
        self.num_steps = 0

        # Episode variables
        self.reset_episode()
Example #17
0
    def __init__(self, test=False):
        # device
        if torch.cuda.is_available():
            self.device = torch.device('cuda')
        else:
            self.device = torch.device('cpu')
        #########################################
        """
        Some hand tune config(for developing)
        """
        self.discrete = False
        self.action_dim = 1
        self.state_dim = 3
        self.batch_size = 100
        self.action_low = -2
        self.action_high = 2
        ##########################################
        self.P_online = Actor(state_dim=self.state_dim,
                              action_size=self.action_dim).to(self.device)
        self.P_target = Actor(state_dim=self.state_dim,
                              action_size=self.action_dim).to(self.device)
        self.P_target.load_state_dict(self.P_online.state_dict())
        self.Q_online = Critic(state_size=self.state_dim,
                               action_size=self.action_dim).to(self.device)
        self.Q_target = Critic(state_size=self.state_dim,
                               action_size=self.action_dim).to(self.device)
        self.Q_target.load_state_dict(self.Q_online.state_dict())
        # discounted reward
        self.gamma = 0.99
        self.eps = 0.25
        # optimizer
        self.q_optimizer = torch.optim.Adam(self.Q_online.parameters(),
                                            lr=1e-3)
        self.p_optimizer = torch.optim.Adam(self.P_online.parameters(),
                                            lr=1e-3)
        # saved rewards and actions
        self.replay_buffer = ReplayBuffer()

        # noise
        self.noise = Noise(DELTA, SIGMA, OU_A, OU_MU)
        # Initialize noise
        self.ou_level = 0.

        self.ep_step = 0
Example #18
0
    def __init__(self, detail_return):
        # Filters parameters
        self.whitening = 0.2
        self.size_filter_gaussian = (9, 9)
        self.type_gaussian = 0
        self.size_median = 3
        self.thresh_threshold = 25
        self.maxvalue_threshold = 255
        self.kernel_size_morphology = ((5, 5), (7, 7), (10, 10), (12, 12),
                                       (15, 15), (17, 17))
        self.color_circle = (255, 255, 0)
        self.thickness_circle = 3
        self.position_text = (120, 30)
        self.font_text = cv2.FONT_HERSHEY_DUPLEX
        self.font_scale = 0.2
        self.min_area = 50000

        self.ellipse = Ellipse()
        self.noise = Noise(self.min_area)
Example #19
0
def main(_):
    t1 = time.time()
    # Training the model
    with tf.Session() as sess:

        env = PowerSystem()
        # System Info
        state_dim = 20  # We only consider the Current of all line as state at this moment
        action_dim = 4  # The number of generators
        action_bound = np.array([[-1, 1], [-0.675, 0.675]])

        actor = ActorNetwork(sess, state_dim, action_dim, action_bound,
                             ACTOR_LEARNING_RATE, TAU)

        critic = CriticNetwork(sess, state_dim, action_dim,
                               CRITIC_LEARNING_RATE, TAU,
                               actor.get_num_trainable_vars())

        saver = tf.train.Saver()

        noise = Noise(DELTA, SIGMA, OU_A, OU_MU)

        # Training the model
        train(sess, env, actor, critic, noise, action_bound)

        # # save the variables
        save_path = saver.save(sess, model_path)
        # print("[+] Model saved in file: %s" % save_path)

    # # Testing the model
    # with tf.Session() as sess:
    #
    #     env = PowerSystem()
    #     # System Info
    #     state_dim = 11  # We only consider the Current of all line as state at this moment
    #     action_dim = 2  # The number of generators
    #     action_bound = np.array([[-1, 1], [-0.675, 0.675]])
    #
    #     actor = ActorNetwork(sess, state_dim, action_dim, action_bound, ACTOR_LEARNING_RATE, TAU)
    #     saver = tf.train.Saver()
    #     load_path = saver.restore(sess, model_path)
    #     test(env, actor)
    print('Running time: {} minutes.'.format((time.time() - t1) / 60))
Example #20
0
    def __init__(self, q, labels, authorities, fingerprint, consensus):
        self.data = {}
        self.q = q
        for l in labels:
            self.data[l] = 0

        twbw, p_exit, num_exits, sum_of_sq = prob_exit(consensus, fingerprint)
        self.keys = [os.urandom(20) for _ in authorities]
        self.keys = dict([(PRF(K, "KEYID"), K) for K in self.keys])
        for _, K in self.keys.iteritems():
            shares = keys_from_labels(labels, K, True, q)
            for (l, s0) in shares:
                #        	noise = Noise(sigma, fingerprint, sum_of_sq, p_exit)
                #                self.data[l] = (self.data[l] + int((s0+noise)/resolution)) % self.q
                self.data[l] = (self.data[l] + int(s0 / resolution)) % self.q
# Add noise for each website independently
        for label in self.data:
            noise = Noise(sigma, fingerprint, sum_of_sq, p_exit)
            self.data[label] = (self.data[label] +
                                int(noise / resolution)) % self.q
Example #21
0
    def __init__(self, state_size, action_size, seed, add_noise=True):
        """ Initialize an Agent instance.
        
        Params
        ======
            state_size (int): Dimension of each state
            action_size (int): Dimension of each action
            seed (int): Random seed
            add_noise (bool): Toggle for using the stochastic process
        """

        # Set the parameters.
        self.state_size = state_size
        self.action_size = action_size
        self.seed = random.seed(seed)

        # Setting the Actor network (with the Target Network).
        self.actor_local = Actor(state_size, action_size, seed).to(device)
        self.actor_target = Actor(state_size, action_size, seed).to(device)

        # Optimize the Actor using Adam.
        self.actor_optimizer = optim.Adam(self.actor_local.parameters(),
                                          lr=LR_ACTOR)

        # Setting the Critic network (with the Target Network).
        self.critic_local = Critic(state_size, action_size, seed).to(device)
        self.critic_target = Critic(state_size, action_size, seed).to(device)

        # Optimize the Critic using Adam.
        self.critic_optimizer = optim.Adam(self.critic_local.parameters(),
                                           lr=LR_CRITIC,
                                           weight_decay=WEIGHT_DECAY)

        # Set up noise processing.
        if add_noise:
            self.noise = Noise((20, action_size), seed)

        # Use the Replay memory buffer (once per class).
        self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed,
                                   device)
Example #22
0
def main(_):
    # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
    # with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
    with tf.Session() as sess:
        env = StageWorld(LASER_BEAM)
        np.random.seed(RANDOM_SEED)
        tf.set_random_seed(RANDOM_SEED)

        state_dim = LASER_BEAM * LASER_HIST + SPEED + TARGET

        action_dim = ACTION
        action_bound = [0.5, np.pi / 3]
        switch_dim = SWITCH

        discrete = False
        print('Continuous Action Space')
        with tf.name_scope("Actor"):
            actor = ActorNetwork(sess, state_dim, action_dim, action_bound,
                                 ACTOR_LEARNING_RATE, TAU)
        with tf.name_scope("Critic"):
            critic = CriticNetwork(sess,
                                   state_dim,
                                   action_dim,
                                   switch_dim,
                                   CRITIC_LEARNING_RATE,
                                   TAU,
                                   actor.get_num_trainable_vars(),
                                   baseline_rate=10.,
                                   control_variance_flag=CONTROL_VARIANCE)

        noise = Noise(DELTA, SIGMA, OU_A, OU_MU)
        reward = Reward(REWARD_FACTOR, GAMMA)

        try:
            train(sess, env, actor, critic, noise, reward, discrete,
                  action_bound)
        except KeyboardInterrupt:
            pass
Example #23
0
 def __init__(self,
              width,
              height,
              seed,
              changes=None,
              min_height=128,
              max_height=-128):
     self.width = width
     self.height = height
     self.ground_height = [0 for g in range(self.width)
                           ]  # небольшая оптимизация
     self.tree_map = [[0 for g in range(self.width)]
                      for i in range(self.height)]
     self.sub_tree_map = [0 for g in range(self.width)]
     self.max_ground = 25
     self.chunk = [[0 for g in range(self.width)]
                   for i in range(self.height)]
     self.seed = seed
     self.cave_generator = TwoDisNoise(self.seed)
     self.generator = Noise(self.seed)
     self.min_height = min_height
     self.max_height = max_height
     self.changes = changes if changes is not None else {}
Example #24
0
    def _draw_circle(self,
                     radius,
                     thickness,
                     center_x=0,
                     center_y=0,
                     a=1,
                     b=1,
                     turb_size=0,
                     turb_power=0):
        """

        :param radius:
        :param thickness:
        :param center_x:
        :param center_y:
        :param a: float, optional, ellipse: radius on x axis
        :param b: float, optional, ellipse: radius on y axis
        :return:
        """
        noise = Noise(self.width, self.height)
        a = 1 / (a * a)
        b = 1 / (b * b)
        for x in xrange(self.width):
            for y in xrange(self.height):
                xv = (x - self.width / 2.0) / self.width
                yv = (y - self.height / 2.0) / self.height
                dist = math.sqrt((xv - center_x) * (xv - center_x) * a +
                                 (yv - center_y) * (yv - center_y) * b)
                dist += turb_power * noise.turbulence(x, y, turb_size)
                if ((radius - thickness) <= dist) and \
                        (dist <= (radius + thickness)):
                    theta = np.interp(dist,
                                      [radius - thickness, radius + thickness],
                                      [0, math.pi])
                    multiplier = random.uniform(0.2, 0.25)
                    value = math.sin(theta) * multiplier
                    self.data[x, y] -= value
Example #25
0
import time
import logging
from cnu import app
from noise import Noise
from db import Database

logger = app.logger

max_time_update = 60 * 30 * 1000
max_time_feedback = 60 * 60 * 1000

client = Database.get_client()
db = client.cnu
updates = db.updates
feedback = db.feedback
noise = Noise(db.updates, logger)

cursor = updates.find()
for record in cursor:
    updated = record.get('time')
    uuid = record.get('id')
    current = int(round(time.time() * 1000))
    if (current - updated) > max_time_update or uuid.startswith('SERVER'):
        updates.remove({"id": uuid})

for x in set(['Regattas', 'Commons', 'Einsteins']):
    noise.createNoise(x)

cursor = feedback.find()
for record in cursor:
    updated = record.get('time')
Example #26
0
    format='%(asctime)s: %(message)s',
    level='INFO',
    datefmt='%m/%d/%Y %I:%M:%S %p')

parser = argparse.ArgumentParser()
parser.add_argument(
    '--data', type=str, choices=['mnist', 'cifar10'], default='mnist')
parser.add_argument('--test_eval', type=bool, default=False)
args = parser.parse_args()

noises = [
    'vert_shrink25', 'horiz_shrink25', 'both_shrink25', 'light_tint',
    'gradient', 'checkerboard', 'pos_noise', 'mid_noise', 'neg_noise'
]

# Load data
X_train, Y_train, X_test, Y_test = load_data(args.data)

# Flatten image matrices
num_train, num_rows, num_cols, num_channels = X_train.shape
num_test, _, _, _ = X_test.shape
_, num_classes = Y_train.shape

direct_noise = Noise()
path = f'../data/{args.data}/noise'
os.makedirs(path, exist_ok=True)
for noise in noises:
    X_train_noise = direct_noise.apply_noise(X_train, noise)
    np.save(f'{path}/{noise}.npy', X_train_noise)
    logger.info(f"Saved noise {noise}")
Example #27
0
def create_field(out_id, seed, scale_noise, base_dir):

    #int randseed=1;
    #int seedoffset=321;
    #if (argc>1)
    #randseed=atoi(argv[1]);
    #srand(randseed+seedoffset);

    seed = randomseed

    N = 512  #powf(2,ceil(log2(ny)))
    noise = Noise(N, 0.025, seed)

    n_array = noise.get_address()
    sigma = 50
    amp = 1.50

    omega = 0.01
    speed = 1
    if DEBUG:
        tmax = 2
    else:
        tmax = timeSteps
    width = 1.0 / (2.0 * sigma)

    xrange = xmax - xmin
    yrange = ymax - ymin

    switch_time = 0

    offset = 20
    x = (xmin + offset
         ) + (xrange - offset) * np.random.random()  #(rand()/(RAND_MAX+1.0)))
    y = (ymin + offset
         ) + (yrange - offset) * np.random.random()  #(rand()/(RAND_MAX+1.0)))

    nmax = 0.0
    for i in range(N):
        if nmax < n_array[0, i]:
            nmax = n_array[0, i]

    level = '{0:.0e}'.format(scale_noise).replace("+", "").replace("-", "n")
    out_dir = base_dir + out_id + '-' + level + '/'
    try:
        os.makedirs(out_dir)
    except OSError:
        print('Warning: ' + out_dir + ' directory exists')
        pass

    for t in range(tmax):
        if t == switch_time:
            nextx = (xmin + offset) + (xrange - offset) * np.random.random(
            )  # (rand()/(RAND_MAX+1.0)))
            nexty = (ymin + offset) + (yrange - offset) * np.random.random(
            )  #(rand()/(RAND_MAX+1.0)))
            switch_time = t + 1 + int(
                np.sqrt((x - nextx)**2 + (y - nexty)**2) / speed)
            heading = np.arctan2(nexty - y, nextx - x)

        f = open(out_dir + 't' + str(t) + '.csv', 'w')

        x = x + speed * np.cos(heading)
        y = y + speed * np.sin(heading)
        nav = 0.0
        nc = 0
        for i in range(xmin, xmax):
            for j in range(ymin, ymax):
                nav = nav + n_array[j, i]
                nc = nc + 1

        nav = nav / float(nmax * nc)

        for i in range(xmin, xmax):

            out = []
            for j in range(ymin, ymax):
                x_d = i - x
                y_d = j - y

                grey = 1.0 - amp * np.exp(
                    -np.sqrt(x_d**2 + y_d**2) *
                    width) + scale_noise * (n_array[j, i] / float(nmax) - nav)
                if grey > 1:
                    grey = 1.0
                if grey < 0:
                    grey = 0.0
                #if ((mask.read(i,j)/256.0>0.5)
                #   grey=0.0

                out += ["{0:0.2f}".format(grey)]

            f.write(','.join(out) + '\n')

        f.close()

        noise.advance_timestep()
def main(args):

    # Set path to save result
    gym_dir = './' + args['env'] + '_' + args['variation'] + '/gym'

    # Set random seed for reproducibility
    np.random.seed(int(args['seed']))
    tf.set_random_seed(int(args['seed']))

    with tf.Session() as sess:

        # Load environment
        env = gym.make(args['env'])
        env.seed(int(args['seed']))

        # get size of action and state (i.e. output and input for the agent)
        obs = env.reset()
        observation_dim = obs['observation'].shape[0]
        achieved_goal_dim = obs['achieved_goal'].shape[0]
        desired_goal_dim =  obs['desired_goal'].shape[0]
        assert achieved_goal_dim == desired_goal_dim

        # state size = observation size + goal size
        state_dim = observation_dim + desired_goal_dim
        action_dim = env.action_space.shape[0]
        action_highbound = env.action_space.high

        # print out parameters
        print('Parameters:')
        print('Observation Size=', observation_dim)
        print('Goal Size=', desired_goal_dim)
        print('State Size =', state_dim)
        print('Action Size =', action_dim)
        print('Action Upper Boundary =', action_highbound)

        # save to monitor if render
        if args['render']:
            env = gym.wrappers.Monitor(env, gym_dir, force=True)
        else:
            env = gym.wrappers.Monitor(env, gym_dir, video_callable=False, force=True)

        # create actor
        actor = Actor(sess, state_dim, action_dim, action_highbound,
                      float(args['actor_lr']), float(args['tau']),
                      int(args['batch_size']), int(args['hidden_size']))

        # create critic
        critic = Critic(sess, state_dim, action_dim,
                        float(args['critic_lr']), float(args['tau']),
                        float(args['gamma']),
                        actor.n_actor_vars,
                        int(args['hidden_size']))

        # noise
        actor_noise = Noise(mu=np.zeros(action_dim))

        # train the network
        if not args['test']:
            train(sess, env, args, actor, critic, actor_noise, desired_goal_dim, achieved_goal_dim, observation_dim)
        else:
            test(sess, env, args, actor, critic, desired_goal_dim, achieved_goal_dim, observation_dim)

        # close gym
        env.close()

        # close session
        sess.close()
Example #29
0
    def __init__(self, labels, pubkey, curveID=409, fingerprint="fingerprint"):
        global _C
        self._C = _C
        self.num = len(labels)
        self.curveID = curveID
        self.pubkey = pubkey
        self.lab = {}

        # Store the group we work in
        # precompute tables and the generator
        self.ecgroup = _C.EC_GROUP_new_by_curve_name(curveID)
        if not _C.EC_GROUP_have_precompute_mult(self.ecgroup):
            _C.EC_GROUP_precompute_mult(self.ecgroup, _FFI.NULL)
        self.gen = _C.EC_GROUP_get0_generator(self.ecgroup)

        # This is where we store the ECEG ciphertexts
        self.buf = []

        # This DC's weight for noise calculation
        twbw, p_exit, num_of_dc, sum_of_sq = prob_exit(consensus, fingerprint)

        for label in labels:
            # Make session key
            session = _C.EC_KEY_new_by_curve_name(curveID)
            _C.EC_KEY_set_group(session, self.ecgroup)
            _C.EC_KEY_generate_key(session)

            s_pub = _C.EC_KEY_get0_public_key(session)
            s_priv = _C.EC_KEY_get0_private_key(session)

            alpha = _C.EC_POINT_new(self.ecgroup)
            _C.EC_POINT_copy(alpha, s_pub)

            beta = _C.EC_POINT_new(self.ecgroup)
            _C.EC_POINT_copy(beta, self.pubkey)
            _C.EC_POINT_mul(self.ecgroup, beta, _FFI.NULL, beta, s_priv,
                            _FFI.NULL)

            # Adding noise and setting the resolution
            n = _C.BN_new()
            res_noise = int(Noise(sigma, sum_of_sq, p_exit) * resolution)

            if res_noise < 0:
                _C.BN_set_word(n, -res_noise)
                _C.BN_set_negative(n, 1)
            else:
                _C.BN_set_word(n, res_noise)

            kappa = _C.EC_POINT_new(self.ecgroup)
            _C.EC_POINT_mul(self.ecgroup, kappa, _FFI.NULL, self.gen, n,
                            _FFI.NULL)
            _C.EC_POINT_add(self.ecgroup, beta, beta, kappa, _FFI.NULL)
            _C.EC_POINT_free(kappa)

            _C.EC_KEY_free(session)
            _C.BN_clear_free(n)

            # Save the ECEG ciphertext
            c = (alpha, beta)
            self.lab[label] = c
            self.buf += [c]

            # Save the resolution
            resolute = _C.BN_new()
            _C.BN_set_word(resolute, resolution)
            self.resolution = _C.EC_POINT_new(self.ecgroup)
            _C.EC_POINT_mul(self.ecgroup, self.resolution, _FFI.NULL, self.gen,
                            resolute, _FFI.NULL)
            _C.BN_clear_free(resolute)
Example #30
0
    buttons = []
    game_running = True
    tempbool = False

    playerGroup = pg.sprite.Group()
    projectileGroup = pg.sprite.Group()
    enemyGroup = pg.sprite.Group()

    normal_enemy = EnemyStats("N", 150, 2, 0, 100, 100, 30, 10, 1)
    elite_enemy = EnemyStats("E", 1000, 10, 20, 30, 100, 20, 100, 1.5)
    boss_enemy = EnemyStats("B", 10000, 50, 60, 30, 100, 10, 1000000, .4)
    enemy_projectile_normal = ProjStats("NP", 10, 5, 50, 60)
    enemy_projectile_strong = ProjStats("EP", 50, 20, 30, 40)
    player_projectile = ProjStats("PLR_P", 20, 0, 100, 60)

    N = Noise()
    world = World()
    player = Player([CHUNKSIZE / 2, CHUNKSIZE / 2], [0, 0], -1,
                    sprites["player"], 0, 10000,
                    EnemyStats("PLR", 100, 2, 20, 120, 100, 50, 20, 1))
    playerGroup.add(player)
    player_stats = PlayerStats()

    for i in range(10):
        t1 = time.time()
        c1 = Chunk((0, i))
        c1.generate(CHUNKSIZE, N)
        print(time.time() - t1)
    """
    world.load_all()