예제 #1
0
def ad_connect(username, password, hosts):
    valid, message = validate_connect(username, password, hosts)
    if not valid:
        return None, message

    hosts = get_valid_hosts(hosts)
    host = first(hosts)

    address = 'ldap://{}'.format(host)
    conn = ldap.initialize(address)
    conn.protocol_version = 3
    conn.set_option(ldap.OPT_REFERRALS, 0)

    try:
        conn.simple_bind_s(username, password)
        return conn, 'Succesfully authenticated to {}'.format(address)
    except ldap.INVALID_CREDENTIALS:
        return None, 'Invalid credentials for {} on {}'.format(
            username, address)
    except ldap.SERVER_DOWN:
        next_hosts = hosts[1:]
        if next_hosts:
            get_logger().warn('Server is down at %s.  Trying next host...',
                              address)
            return ad_connect(username, password, next_hosts)
        return None, 'Server is down at {}'.format(address)
    except ldap.LDAPError as e:
        if type(e.message) == dict and e.message.has_key('desc'):
            return None, 'LDAP Error: ' + e.message['desc']
        return None, 'LDAP Error: ' + e

    return conn
예제 #2
0
 def __init__(self, conn, base, filter=None, scope=ldap.SCOPE_SUBTREE):
     self.logger = get_logger(self)
     self.conn = conn
     self.base = base
     self.filter = filter
     self.scope = scope
     self.entry_mapping = None
예제 #3
0
    def __init__(self, env, config, logger=None):
        # directory for training outputs
        if not os.path.exists(config.output_path):
            os.makedirs(config.output_path)

        # store hyper-params
        self.config = config
        self.logger = logger
        if logger is None:
            self.logger = get_logger(config.log_path)

        self.env = env
        temp = self.env.observation_dim
        self.observation_dim = [
            temp[1], temp[2], temp[0] * self.config.history_mul
        ]
        self.move_action_dim = int((self.env.action_dim - 1) / 2)
        self.attack_action_dim = int((self.env.action_dim - 1) / 2)

        self.lr = self.config.learning_rate
        self.scheduler = LinearSchedule(self.config.rand_begin, \
                self.config.rand_end,\
                self.config.rand_steps)

        self._path_net = "/".join(_path.split('/')[:-1]) \
             + "/policy_gradient_net_" \
             + str(self.config.history_mul) + "/policy_gradient.ckpt"

        # build model
        self.build()
예제 #4
0
    def __init__(self, env, config, r_seed, logger=None):
        """
        Initialize Policy Gradient Class

        Args:
                env: an OpenAI Gym environment
                config: class with hyperparameters
                logger: logger instance from the logging module

        You do not need to implement anything in this function. However,
        you will need to use self.discrete, self.observation_dim,
        self.action_dim, and self.lr in other methods.

        """
        # directory for training outputs
        if not os.path.exists(config.output_path):
            os.makedirs(config.output_path)

        # store hyperparameters
        self.config = config
        self.r_seed = r_seed

        self.logger = logger
        if logger is None:
            self.logger = get_logger(config.log_path)
        self.env = env
        self.env.seed(self.r_seed)


        # discrete vs continuous action space
        self.discrete = isinstance(env.action_space, gym.spaces.Discrete)
        self.observation_dim = self.env.observation_space.shape[0]
        self.action_dim = self.env.action_space.n if self.discrete else self.env.action_space.shape[
            0]

        self.lr = self.config.learning_rate

        # build model
        self.build()