Exemple #1
0
    def post(self):
        fields = ["username", "email"]
        q_args = ["username", "password"]
        args = utils.gen_fields(reqparse.RequestParser(), q_args)
        
        for arg in q_args:
            if arg not in args:
                return en_us.BAD_REQUEST
            if args[arg] == None or args[arg] == "":
                return en_us.BAD_REQUEST

        m_length = 0
        users = None
        for field in fields:
            length = len(utils.encoder(utils.col.find({field: args['username']})))
            if length > m_length:
                m_length = length
                users = utils.encoder(utils.col.find({field: args['username']}))

        if m_length == 0:
            return en_us.NOT_FOUND

        user = utils.encoder(users)[0]

        if not auth.check_password(args['password'], user['password']):
            return en_us.AUTH_FAILED

        user.update({"token": auth.generate(user)})
        return auth.user(user), 200
Exemple #2
0
    def post(self):
        args = utils.gen_fields(reqparse.RequestParser(),
                                ['username', 'password'])

        fields = ["username", "email"]
        m_length = 0
        users = None
        for field in fields:
            length = len(
                utils.encoder(utils.col.find({field: args['username']})))
            if length > m_length:
                m_length = length
                users = utils.encoder(utils.col.find({field:
                                                      args['username']}))

        if m_length == 0:
            return en_us.NOT_FOUND

        user = utils.encoder(users)[0]

        if not auth.check_password(args['password'], user['password']):
            return en_us.AUTH_FAILED

        user.update({"token": auth.generate(user)})
        return auth.user(user), 200
Exemple #3
0
    def get(self, client_id):
        request_token = request.headers.get('authorization')
        auth_status = auth.verify(client_id, request_token)
        if auth_status != 200:
            return auth_status

        user = utils.encoder(utils.col.find({"id": client_id}))[0]
        return auth.user(utils.encoder(user))
Exemple #4
0
    def post(self, clientid, key):
        cur_time = int(time.time())
        user = utils.col.find({"id": clientid})
        if user.count() == 0:
            return en_us.AUTH_FAILED
        user = utils.encoder(user)[0]
        secret = user["salt"]

        try:
            enterance_payload = jwt.decode(key, secret, algorithms=["HS256"])
        except jwt.exceptions.DecodeError:
            return en_us.AUTH_FAILED

        if enterance_payload["expiration"] < cur_time:
            return en_us.AUTH_FAILED

        if enterance_payload['user'] != clientid:
            return en_us.AUTH_FAILED

        # good to go reset the password, but what is the password?
        args = utils.gen_fields(reqparse.RequestParser(), ['password'])
        if args["password"] == "" or args["password"] == None:
            return en_us.BAD_REQUEST

        utils.col.update(
            {"id": clientid},
            {"$set": {
                "password": auth.make_password(args["password"])
            }})
Exemple #5
0
    def __init__(self, hidden_size, batch_size, learning_rate):
        self.input_tensor = tf.placeholder(tf.float32, [None, 28 * 28])
        with arg_scope([layers.conv2d, layers.conv2d_transpose],
                       activation_fn=tf.nn.elu,
                       normalizer_fn=layers.batch_norm,
                       normalizer_params={'scale': True}):
            with tf.variable_scope('model') as scope:
                encoded = encoder(self.input_tensor, hidden_size * 2)

                mean = encoded[:, :hidden_size]
                stddev = tf.sqrt(tf.exp(encoded[:, hidden_size:]))

                epsilon = tf.random_normal([tf.shape(mean)[0], hidden_size])
                input_sample = mean + epsilon * stddev

                output_tensor = decoder(input_sample)
            with tf.variable_scope('model', reuse=True) as scope:
                self.sampled_tensor = decoder(
                    tf.random_normal([batch_size, hidden_size]))
        vae_loss = self.__get_vae_cost(mean, stddev)
        rec_loss = self.__get_reconstruction_cost(output_tensor,
                                                  self.input_tensor)

        loss = vae_loss + rec_loss
        self.train = layers.optimize_loss(loss,
                                          get_or_create_global_step(),
                                          learning_rate=learning_rate,
                                          optimizer='Adam',
                                          update_ops=[])

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())
Exemple #6
0
    def post(self):
        q_args = ["username", "email", "password"]
        args = utils.gen_fields(reqparse.RequestParser(), q_args)

        same_email = utils.col.find({"email": args['email']})
        same_username = utils.col.find({"username": args['username']})

        for arg in q_args:
            if arg not in args:
                return en_us.BAD_REQUEST
            if args[arg] == None or args[arg] == "":
                return en_us.BAD_REQUEST

        if same_email.count() > 0:
            return en_us.EMAIL_EXISTS
        if same_username.count() > 0:
            return en_us.UNAME_EXISTS

        user = {
            "username": args['username'],
            "email": args['email'],
            "password": auth.make_password(args['password']),
            "id": account_utils.get_user_id(),
            "time_created": int(time.time())
        }

        user.update({"salt": auth.salt()})

        utils.col.insert(user)

        user.update({"token": auth.generate(user)})

        return auth.user(utils.encoder(user)), 201
Exemple #7
0
    def get(self, client_id):
        request_token = request.headers.get('authorization')
        auth_status = auth.verify(client_id, request_token)
        if auth_status != 200:
            return auth_status

        return utils.encoder(utils.get_all_services(client_id))
Exemple #8
0
def password_reset(identification):
    x = utils.col.find({"username": identification})
    y = utils.col.find({"email": identification})
    z = utils.col.find({"id": identification})
    user = None
    if x.count() > 0:
        user = x
    elif y.count() > 0:
        user = y
    elif z.count() > 0:
        user = z

    if user == None:
        return 404  # not found

    # get the single user out of the array it's returned in
    user = utils.encoder(user)[0]
    email = user["email"]
    userid = user["id"]
    encoded_jwt = jwt.encode({'expiration': int(
        time.time()) + 600, 'user': userid}, user["salt"], algorithm='HS256')
    encoded_jwt = encoded_jwt.decode('utf-8')
    e_tem = template(
        email, f"https://driplet.cf/reset?={encoded_jwt}&user={userid}")
    s = r.post(
        "https://api.sendgrid.com/v3/mail/send",
        json=e_tem,
        headers={
            "Authorization": "Bearer " + os.getenv("API_KEY"),
            "Content-Type": "application/json"
        }
    )
    if s.status_code != 202:
        return 400
def generate(user):
    mem_user = copy(user)
    mem_user.pop('salt')
    mem_user.pop('_id')
    mem_user.pop('password')
    token = jwt.encode(utils.encoder(mem_user),
                       user['salt'] + user['password'], algorithm='HS256')
    return token.decode('utf-8')
Exemple #10
0
    def get(self, client_id, service_id):
        request_token = request.headers.get('authorization')
        auth_status = auth.verify(client_id, request_token)
        if auth_status != 200:
            return auth_status

        service = utils.get_service(client_id, service_id)
        if service.count() == 0:
            return en_us.SERVICE_NOT_FOUND

        return utils.encoder(service[0])
Exemple #11
0
def verify(client_id, token):
    user = utils.encoder(utils.col.find({"id": client_id}))[0]
    if user == None:
        return en_us.NOT_FOUND
    try:
        payload = jwt.decode(token,
                             user['salt'] + user['password'],
                             algorithms=['HS256'])
    except:
        return en_us.AUTH_FAILED
    if 'id' in payload:
        if payload['id'] == user['id']:
            return 200
    return en_us.AUTH_FAILED
def _process_wav(file_list, outfile, winlen, winstep, n_mcep, mcep_alpha,
                 minf0, maxf0, q_channels, type):
    data_dict = {}
    enc = encoder(q_channels)
    for f in tqdm(file_list):
        wav, sr = load(f, sr=None)

        x = wav.astype(float)
        _f0, t = world.harvest(x,
                               sr,
                               f0_floor=minf0,
                               f0_ceil=maxf0,
                               frame_period=winstep *
                               1000)  # can't adjust window size
        f0 = world.stonemask(x, _f0, t, sr)

        window_size = int(sr * winlen)
        hop_size = int(sr * winstep)
        # get mel
        if type == 'mcc':
            nfft = 2**(window_size - 1).bit_length()
            spec = np.abs(
                stft(x,
                     n_fft=nfft,
                     hop_length=hop_size,
                     win_length=window_size,
                     window='blackman'))**2
            h = sptk.mcep(spec,
                          n_mcep - 1,
                          mcep_alpha,
                          eps=-60,
                          etype=2,
                          itype=4).T
        else:
            h = mfcc(x,
                     sr,
                     n_mfcc=n_mcep,
                     n_fft=int(sr * winlen),
                     hop_length=int(sr * winstep))
        h = np.vstack((h, f0))
        # mulaw encode
        wav = enc(x).astype(np.uint8)

        id = os.path.basename(f).replace(".wav", "")
        data_dict[id] = wav
        data_dict[id + "_h"] = h
    np.savez(outfile, **data_dict)
Exemple #13
0
    def post(self, client_id, service_id):
        request_token = request.headers.get('authorization')
        auth_status = auth.verify(client_id, request_token)
        if auth_status != 200:
            return auth_status

        service = utils.get_service(client_id, service_id)
        if service.count() == 0:
            return en_us.NOT_FOUND

        command = {
            "serviceid": service_id,
            "content": utils.encoder(service)[0]['restart_command']
        }
        command = json.dumps(command)
        pub(command.encode('utf-8'))
        return "", 204
def preprocess_cmu(wav_dir, output, *, q_channels, winlen, winstep, n_mcep,
                   mcep_alpha, minf0, maxf0, type):
    in_dir = os.path.join(wav_dir)
    out_dir = os.path.join(output)
    train_data = os.path.join(out_dir, 'train.npz')
    test_data = os.path.join(out_dir, 'test.npz')
    os.makedirs(out_dir, exist_ok=True)

    files = [os.path.join(in_dir, f) for f in os.listdir(in_dir)]
    files.sort()
    train_files = files[:1032]
    test_files = files[1032:]

    feature_fn = partial(get_features,
                         winlen=winlen,
                         winstep=winstep,
                         n_mcep=n_mcep,
                         mcep_alpha=mcep_alpha,
                         minf0=minf0,
                         maxf0=maxf0,
                         type=type)
    n_workers = cpu_count() // 2
    print("Running", n_workers, "processes.")

    data_dict = {}
    enc = encoder(q_channels)
    print("Processing training data ...")
    with ProcessPoolExecutor(n_workers) as executor:
        futures = [executor.submit(feature_fn, f) for f in train_files]
        for future in tqdm(futures):
            name, data, feature = future.result()
            data_dict[name] = enc(data).astype(np.uint8)
            data_dict[name + '_h'] = feature
    np.savez(train_data, **data_dict)

    data_dict = {}
    print("Processing test data ...")
    with ProcessPoolExecutor(n_workers) as executor:
        futures = [executor.submit(feature_fn, f) for f in test_files]
        for future in tqdm(futures):
            name, data, feature = future.result()
            data_dict[name] = enc(data).astype(np.uint8)
            data_dict[name + '_h'] = feature
    np.savez(test_data, **data_dict)

    calc_stats(train_data, out_dir)
Exemple #15
0
    def patch(self, client_id):
        request_token = request.headers.get('authorization')
        auth_status = auth.verify(client_id, request_token)
        if auth_status != 200:
            return auth_status

        args = utils.gen_fields(reqparse.RequestParser(),
                                ['username', 'email', 'password'])

        updates = {}
        for key in args:
            if key == "password":
                args[key] = auth.make_password(args[key])
            if args[key] != None:
                updates.update({key: args[key]})

        utils.col.update({'id': client_id}, {"$set": updates}, upsert=False)
        user = utils.encoder(utils.col.find({"id": client_id}))[0]

        return auth.user(user), 200
Exemple #16
0
    def __init__(self, hidden_size, batch_size, learning_rate, size):
        img_size = size[0] * size[1] * size[2]
        self.input_tensor = tf.placeholder(tf.float32, [batch_size, img_size])
        with arg_scope([layers.conv2d, layers.conv2d_transpose],
                       activation_fn=tf.nn.elu,
                       normalizer_fn=layers.batch_norm,
                       normalizer_params={'scale': True}):
            with tf.variable_scope("model") as scope:
                encoded = encoder(self.input_tensor, hidden_size * 2, size)

                mean = encoded[:, :hidden_size]
                stddev = tf.sqrt(tf.exp(encoded[:, hidden_size:]))

                epsilon = tf.random_normal([tf.shape(mean)[0], hidden_size])
                input_sample = mean + epsilon * stddev

                output_tensor = decoder(input_sample, img_size)

            with tf.variable_scope("model", reuse=True) as scope:
                self.sampled_tensor = decoder(
                    tf.random_normal([batch_size, hidden_size]), img_size)
                self.recons_tensor = output_tensor

        vae_loss = self.__get_vae_cost(mean, stddev)
        rec_loss = self.__get_reconstruction_cost(
            output_tensor,
            self.input_tensor)  # output_tensor: y  input_tensor: x

        loss = vae_loss + rec_loss
        # loss = vae_loss + rec_loss
        self.train = layers.optimize_loss(
            loss,
            tf.contrib.framework.get_or_create_global_step(),
            learning_rate=learning_rate,
            optimizer='Adam',
            update_ops=[])
        # opt = tf.train.AdamOptimizer(2e-4, beta1=0.5)
        # self.train = opt.minimize(loss)

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())
Exemple #17
0
    def post(self, client_id):
        request_token = request.headers.get('authorization')
        auth_status = auth.verify(client_id, request_token)
        if auth_status != 200:
            return auth_status

        args = utils.gen_fields(reqparse.RequestParser(),
                                ['name', 'description', 'start_command', 'stop_command',
                                 'restart_command', 'status_command', 'log_command'])
        service = args

        service.update(
            {
                "id": services_util.new_id(),
                "associated_to": client_id,
                "logs": []
            }
        )

        utils.services.insert(service)
        return utils.encoder(service), 201
Exemple #18
0
                                                 'logs', name),
                                    histogram_freq=1)
checkpoint = callbacks.ModelCheckpoint(os.path.join(pathlib.Path().absolute(),
                                                    'checkpoints', name,
                                                    'model'),
                                       save_best_only=True,
                                       save_weights_only=True)

for i in trange(n_splits, total=n_splits, desc='Splits', unit='split'):
    train_images, train_labels, test_images, test_labels, result = [None] * 5
    ds_data = []
    ds_labels = []

    files = dataset_files[i::n_splits]
    for file in tqdm(files, unit='examples', total=dataset_length):
        ds_labels.append(encoder(pattern.search(file.name).group(1)))
        ds_data.append(
            np.array(Image.open(file.path), dtype=np.uint8).reshape(
                (128, 64, 1)) / 255)

    files = None
    (train_images,
     train_labels), (test_images,
                     test_labels) = ((np.array(ds_data[:split]),
                                      np.array(ds_labels[:split])),
                                     (np.array(ds_data[split:]),
                                      np.array(ds_labels[split:])))
    ds_data, ds_labels = None, None

    model.fit(train_images,
              train_labels,
Exemple #19
0
 t = time.time()
 if opt.generate:
     data_all = [
         pool.apply_async(generate_data,
                          args=(opt.data_type, opt.encoding))
         for i in range(num_samples)
     ]
     # data = generate_data(data_type=opt.data_type,
     # encode=opt.encoding)
     if opt.encoding:
         for data in data_all:
             raw_data.append(data.get())
             encoded_data.extend(
                 encoder(data.get(),
                         imageSize,
                         200 * pq.ms,
                         20,
                         fill=0.0))
     else:
         for i, data in enumerate(data_all):
             dat = data[0].to_array().ravel()
             raw_data.append(data[1])
             # Reshape to required format
             dat = dat.reshape((1, imageSize, imageSize))
             binned_data[i] = dat
             # Normalize data
             dat = np.divide(dat, np.max(dat))
             # data = (data - data.mean()) / data.std()
             norm_data[i] = dat
 else:
     # TODO load data
Exemple #20
0
    def __init__(self, input_size, hidden_size, batch_size, learning_rate,
                 log_dir):
        self.input_tensor = tf.placeholder(tf.float32, [None, 3 * input_size])
        self.s_t_p_placeholder = tf.placeholder(tf.float32,
                                                [None, hidden_size])
        '''
        ##################################
        with open('params.txt') as f:
            first = f.readline()
            first = first.strip('\n')
            temp = first.split(' ')
            o_p_dim = int(temp[3]);
            s_p_dim = int(temp[4]);
            ln = f.readline()
            for i in range(s_p_dim):
                temp = f.readline()
            self.sig_2_init = np.zeros((s_p_dim, s_p_dim), np.float32)
            for i in range(s_p_dim):
                temp = f.readline().strip('\n').split(' ')
                for j in range(s_p_dim):
                    self.sig_2_init[i,j] = float(temp[j])
            
            eig_val , eig_vec = np.linalg.eig(self.sig_2_init)
            cf = np.sqrt(np.repeat(eig_val,s_p_dim).reshape(s_p_dim,s_p_dim).transpose())
            self.r_2_init = np.multiply(cf,eig_vec)
            
            self.sig_3_init = np.zeros((o_p_dim, o_p_dim), np.float32)
            for i in range(o_p_dim):
                temp = f.readline().strip('\n').split(' ')
                for j in range(o_p_dim):
                    self.sig_3_init[i,j] = float(temp[j])
            
            eig_val , eig_vec = np.linalg.eig(self.sig_3_init)
            cf = np.sqrt(np.repeat(eig_val,o_p_dim).reshape(o_p_dim,o_p_dim).transpose())
            self.r_3_init = np.multiply(cf,eig_vec)
            
            self.a_2_init = np.zeros((s_p_dim, s_p_dim), np.float32)
            for i in range(s_p_dim):
                temp = f.readline().strip('\n').split(' ')
                for j in range(s_p_dim):
                    self.a_2_init[i,j] = float(temp[j])
            
            self.a_3_init = np.zeros((s_p_dim, o_p_dim), np.float32)
            for i in range(s_p_dim):
                temp = f.readline().strip('\n').split(' ')
                for j in range(o_p_dim):
                    self.a_3_init[i,j] = float(temp[j])     
        ###################################
        '''

        with arg_scope([layers.fully_connected], activation_fn=tf.nn.relu):
            with tf.variable_scope("encoder"):
                with tf.variable_scope("encoder_s_t"):
                    self.s_t_minus_1_p = encoder(self.input_tensor[:, :input_size],\
                        hidden_size)
                with tf.variable_scope("encoder_s_t", reuse=True):
                    self.s_t_p = encoder(self.input_tensor[:, input_size:2 * input_size],\
                        hidden_size)
                with tf.variable_scope("encoder_o_t"):
                    self.o_t_p = encoder(self.input_tensor[:, 2 * input_size:],\
                        hidden_size)

            with tf.variable_scope("decoder"):
                with tf.variable_scope("decoder_s_t"):
                    self.output_s_t_minus_1 = decoder(self.s_t_minus_1_p,
                                                      input_size)
                with tf.variable_scope("decoder_s_t", reuse=True):
                    self.output_s_t = decoder(self.s_t_p, input_size)
                with tf.variable_scope("decoder_s_t", reuse=True):
                    self.s_t_decoded = decoder(self.s_t_p_placeholder,
                                               input_size)
                with tf.variable_scope("decoder_o_t"):
                    self.output_o_t = decoder(self.o_t_p, input_size)
            self.output_tensor = tf.concat(
                [self.output_s_t_minus_1, self.output_s_t, self.output_o_t],
                axis=1)

            #self.a_2, self.b_2, self.sigma_2, self.a_3, self.b_3, self.sigma_3 = self._MLE_Gaussian_params()
            self.a_2, self.b_2, self.sigma_2, self.a_3, self.b_3, self.sigma_3 = self._simple_Gaussian_params(
            )
            #self.a_2, self.b_2, self.sigma_2, self.a_3, self.b_3, self.sigma_3 = self._simple_Gaussian_plus_offset_params()
            self.r_2 = tf.cholesky(self.sigma_2)
            self.r_3 = tf.cholesky(self.sigma_3)

            #define reconstruction loss
            reconstruction_loss = tf.reduce_mean(tf.norm(self.output_tensor - \
                self.input_tensor, axis=1))

            # define classification loss
            y_1 = self.s_t_p - tf.matmul(self.s_t_minus_1_p, self.a_2)
            mvn_1 = tf.contrib.distributions.MultivariateNormalFull(
                self.b_2, self.sigma_2)
            #mvn_1 = tf.contrib.distributions.MultivariateNormalTrill(self.b_2, scale_tril=self.r_2)
            pos_samples_1 = mvn_1.sample(batch_size)

            y_2 = self.o_t_p - tf.matmul(self.s_t_p, self.a_3)
            #mvn_2 = tf.contrib.distributions.MultivariateNormalTriL(self.b_3, scale_tril=self.r_3)
            mvn_2 = tf.contrib.distributions.MultivariateNormalFull(
                self.b_3, self.sigma_3)
            pos_samples_2 = mvn_2.sample(batch_size)

            with tf.variable_scope('discriminator'):
                with tf.variable_scope('d1'):
                    pos_samples_1_pred = discriminator(pos_samples_1)
                with tf.variable_scope('d1', reuse=True):
                    neg_samples_1_pred = discriminator(y_1)
                with tf.variable_scope('d2'):
                    pos_samples_2_pred = discriminator(pos_samples_2)
                with tf.variable_scope('d2', reuse=True):
                    neg_samples_2_pred = discriminator(y_2)
            classification_loss_1 = compute_classification_loss(
                pos_samples_1_pred, neg_samples_1_pred)
            classification_loss_2 = compute_classification_loss(
                pos_samples_2_pred, neg_samples_2_pred)
            classification_loss = classification_loss_1 + classification_loss_2

            # define s_t likelihood
            s_diff = self.s_t_p - tf.matmul(self.s_t_minus_1_p, self.a_2)
            s_t_likelihood = tf.reduce_sum(mvn_1.log_prob(s_diff))

            # define o_t likelihood
            o_diff = self.o_t_p - tf.matmul(self.s_t_p, self.a_3)
            o_t_likelihood = tf.reduce_sum(mvn_2.log_prob(o_diff))

            self.likelihood = s_t_likelihood + o_t_likelihood

            # add summary ops
            tf.summary.scalar('likelihood', self.likelihood)
            tf.summary.scalar('s_t_likelihood', s_t_likelihood)
            tf.summary.scalar('o_t_likelihood', o_t_likelihood)
            tf.summary.scalar('classification_loss', classification_loss)
            tf.summary.scalar('classification_loss_1', classification_loss_1)
            tf.summary.scalar('classification_loss_2', classification_loss_2)
            tf.summary.scalar('reconstruction_loss', reconstruction_loss)

            # define references to params
            encoder_params = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope='encoder')
            decoder_params = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope='decoder')
            autoencoder_params = encoder_params + decoder_params
            gaussian_params = [self.a_2, self.a_3, self.r_2, self.r_3]
            discriminator_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, \
                scope='discriminator')

            global_step = tf.contrib.framework.get_or_create_global_step()
            # define training steps
            self.learn_rate = self._get_learn_rate(global_step, learning_rate)

            # update autoencoder params to minimise reconstruction loss
            self.train_autoencoder = layers.optimize_loss(reconstruction_loss, \
                    global_step, self.learn_rate * 0.1, optimizer=lambda lr: \
                    tf.train.AdamOptimizer(lr), variables=\
                    #tf.train.MomentumOptimizer(lr, 0.9), variables=\
                    autoencoder_params, update_ops=[])

            # update discriminator
            self.train_discriminator = layers.optimize_loss(classification_loss, \
                    global_step, self.learn_rate * 10, optimizer=lambda lr: \
                    tf.train.MomentumOptimizer(lr, 0.1), variables=\
                    #tf.train.AdamOptimizer(lr), variables=\
                    discriminator_params, update_ops=[])

            # update encoder params to fool the discriminator
            self.train_encoder = layers.optimize_loss(-classification_loss, \
                    global_step, self.learn_rate , optimizer=lambda lr: \
                    #tf.train.MomentumOptimizer(lr, 0.9), variables=\
                    tf.train.AdamOptimizer(lr), variables=\
                    encoder_params, update_ops=[])

            self.sess = tf.Session()
            self.merged = tf.summary.merge_all()
            self.train_writer = tf.summary.FileWriter(log_dir, \
                self.sess.graph)
            self.sess.run(tf.global_variables_initializer())
Exemple #21
0
    def __init__(self, hidden_size, batch_size, learning_rate):
        self.input_tensor = tf.placeholder(tf.float32, [None, 28 * 28])
        self.xs2 = tf.placeholder(tf.float32, [None, 28 * 28])
        self.dis = tf.placeholder(tf.float32, [1, None])
        self.flag = tf.placeholder(tf.float32, [1, None])

        with arg_scope([layers.conv2d, layers.conv2d_transpose],
                       activation_fn=concat_elu,
                       normalizer_fn=layers.batch_norm,
                       normalizer_params={'scale': True}):
            with tf.variable_scope("model"):
                D1 = discriminator(self.input_tensor)  # positive examples
                D_params_num = len(tf.trainable_variables())
                encoded = encoder(self.input_tensor, hidden_size * 2)

                mean = encoded[:, :hidden_size]
                stddev = tf.sqrt(tf.square(encoded[:, hidden_size:]))

                epsilon = tf.random_normal([tf.shape(mean)[0], hidden_size])
                input_sample = mean + epsilon * stddev
                # G = decoder(tf.random_normal([batch_size, hidden_size]))
                G_params_num = len(tf.trainable_variables())
                G = decoder(input_sample)
                self.sampled_tensor = G

            with tf.variable_scope("model", reuse=True):
                D2 = discriminator(G)  # generated examples
                encoded1 = encoder(self.xs2, hidden_size * 2)

                mean1 = encoded1[:, :hidden_size]
                stddev1 = tf.sqrt(tf.square(encoded1[:, hidden_size:]))

                epsilon1 = tf.random_normal([tf.shape(mean1)[0], hidden_size])
                input_sample1 = mean1 + epsilon1 * stddev1

                output_tensor1 = decoder(input_sample1)

        D_loss = self.__get_discrinator_loss(D1, D2)
        G_loss = self.__get_generator_loss(D2, mean, stddev, mean1)

        params = tf.trainable_variables()
        D_params = params[:D_params_num]
        G_params = params[G_params_num:]
        #    train_discrimator = optimizer.minimize(loss=D_loss, var_list=D_params)
        # train_generator = optimizer.minimize(loss=G_loss, var_list=G_params)
        global_step = tf.contrib.framework.get_or_create_global_step()
        self.train_discrimator = layers.optimize_loss(D_loss,
                                                      global_step,
                                                      learning_rate / 10,
                                                      'Adam',
                                                      variables=D_params,
                                                      update_ops=[])
        self.train_generator = layers.optimize_loss(G_loss,
                                                    global_step,
                                                    learning_rate,
                                                    'Adam',
                                                    variables=G_params,
                                                    update_ops=[])

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())
from utils import discriminator
from sklearn.decomposition import PCA
from vdpmm_maximizePlusGaussian import *
from vdpmm_expectationPlusGaussian import *
from sklearn import metrics

hidden_size = 1
batch_size = 128

input_tensor = tf.placeholder(tf.float32, [None, 28 * 28])
xs2 = tf.placeholder(tf.float32, [None, 28 * 28])
dis = tf.placeholder(tf.float32, [1, None])
flag = tf.placeholder(tf.float32, [1, None])

with tf.variable_scope("model") as scope:
    encoded = encoder(input_tensor, hidden_size * 2)

    mean = encoded[:, :hidden_size]
    stddev = tf.sqrt(tf.square(encoded[:, hidden_size:]))

    epsilon = tf.random_normal([tf.shape(mean)[0], hidden_size])
    input_sample = mean + epsilon * stddev

    output_tensor = decoder(input_sample)

with tf.variable_scope("model") as scope:
    encoded1 = encoder(xs2, hidden_size * 2)

    mean1 = encoded1[:, :hidden_size]
    stddev1 = tf.sqrt(tf.square(encoded1[:, hidden_size:]))
Exemple #23
0
    def __init__(self,
                 hidden_size,
                 batch_size,
                 learning_rate,
                 alpha,
                 beta,
                 gamma,
                 sum_dir,
                 attri_num,
                 add_gan=1,
                 GAN_model="V",
                 similarity_layer=4):

        print("\nInitializing model with following parameters:")
        print("batch_size:", batch_size, " learning_rate:", learning_rate,
              " alpha:", alpha, " beta:", beta, " gamma:", gamma)
        print("GAN_model:", GAN_model, " similarity_layer:", similarity_layer,
              "\n")

        self.input_tensor = tf.placeholder(tf.float32, [batch_size, 64, 64, 3])
        #self.input_label  = tf.placeholder(tf.int, [batch_size, attri_num])
        self.visual_attri = tf.placeholder(tf.float32, [hidden_size])

        with arg_scope([layers.conv2d, layers.conv2d_transpose],
                       activation_fn=tf.nn.relu,
                       normalizer_fn=layers.batch_norm,
                       normalizer_params={'scale': True},
                       padding='SAME'):
            with tf.variable_scope("model") as scope:  #Full VAEGAN structure
                # Encoder
                ENC = encoder(self.input_tensor, hidden_size * 2)
                Enc_params_num = len(tf.trainable_variables())

                # Add noise
                self.mean, stddev = tf.split(1, 2, ENC)
                stddev = tf.sqrt(tf.exp(stddev))
                epsilon = tf.random_normal(
                    [tf.shape(self.mean)[0], hidden_size])
                ENC_w_noise = self.mean + epsilon * stddev

                # Decoder / Generator
                self.DEC_of_ENC = decoder(ENC_w_noise)
                Enc_n_Dec_params_num = len(tf.trainable_variables())

                # Discriminator
                if add_gan == 1:
                    DIS_of_DEC_of_ENC = discriminator(self.DEC_of_ENC,
                                                      GAN_model)
                    Gen_dis_sum = tf.scalar_summary(
                        'Gen_dis_mean', tf.reduce_mean(DIS_of_DEC_of_ENC))

            with tf.variable_scope(
                    "model", reuse=True) as scope:  #Computation for Recon_Loss
                if add_gan == 1:
                    Real_Similarity = discriminator(self.input_tensor,
                                                    GAN_model,
                                                    extract=similarity_layer)
                    Gen_Similarity = discriminator(
                        self.DEC_of_ENC, GAN_model, extract=similarity_layer
                    )  #+ tf.random_normal([batch_size, 8, 8, 256])

            with tf.variable_scope(
                    "model", reuse=True) as scope:  #Computation for GAN_Loss
                if add_gan == 1:
                    Real_in_Dis = discriminator(self.input_tensor, GAN_model)
                    Real_dis_sum = tf.scalar_summary(
                        'Real_dis_mean', tf.reduce_mean(Real_in_Dis))
                    Prior_in_Dis = discriminator(
                        decoder(tf.random_normal([batch_size, hidden_size])),
                        GAN_model)
                    Prior_dis_sum = tf.scalar_summary(
                        'Prior_dis_mean', tf.reduce_mean(Prior_in_Dis))

            with tf.variable_scope(
                    "model", reuse=True) as scope:  #Sample from latent space
                self.sampled_tensor = decoder(
                    tf.random_normal([batch_size, hidden_size]))

            with tf.variable_scope(
                    "model", reuse=True) as scope:  #Add visual attributes
                #expand_mean = tf.expand_dims(self.mean, -1)
                print("shape of mean:", np.shape(self.mean),
                      " shape of visual attri:", np.shape(self.visual_attri))
                add_attri = self.mean + np.ones(
                    [batch_size, 1]
                ) * self.visual_attri  #[batch size, hidden size] (broadcasting)
                print("shape of add attri:", tf.shape(add_attri))
                self.with_attri_tensor = decoder(add_attri)

        self.params = tf.trainable_variables()
        self.Enc_params = self.params[:Enc_params_num]
        '''
        print ('Encoder Param:')
        for var in Enc_params:
            print (var.name)
        '''
        self.Dec_params = self.params[Enc_params_num:Enc_n_Dec_params_num]
        '''
        print ('Decoder Param:')
        for var in Dec_params:
            print (var.name)
        '''
        if add_gan == 1:
            self.Dis_params = self.params[Enc_n_Dec_params_num:]
        '''
        print ('Discriminator Param:')
        for var in Dis_params:
            print (var.name)
        '''
        self.Prior_loss = self.__get_prior_loss(self.mean, stddev)
        Prior_loss_sum = tf.scalar_summary('Prior_loss', self.Prior_loss)
        if add_gan == 1:
            self.Recon_loss = self.__get_reconstruction_loss(
                Gen_Similarity, Real_Similarity)
            Recon_loss_sum = tf.scalar_summary('Recon_loss', self.Recon_loss)
            self.GAN_loss = self.__get_GAN_loss(Real_in_Dis, Prior_in_Dis,
                                                DIS_of_DEC_of_ENC, GAN_model)
            GAN_loss_sum = tf.scalar_summary('GAN_loss', self.GAN_loss)
        else:
            self.Recon_loss = self.__get_reconstruction_loss(
                self.DEC_of_ENC, self.input_tensor)
            Recon_loss_sum = tf.scalar_summary('Recon_loss', self.Recon_loss)

        # merge  summary for Tensorboard
        if add_gan == 1:
            self.detached_loss_summary_merged = tf.merge_summary([
                Prior_loss_sum, Recon_loss_sum, GAN_loss_sum, Real_dis_sum,
                Prior_dis_sum, Gen_dis_sum
            ])
            #self.dis_mean_value_summary_merged         =  tf.merge_summary([Real_dis_sum,Prior_dis_sum,Gen_dis_sum])
        else:
            self.detached_loss_summary_merged = tf.merge_summary(
                [Prior_loss_sum, Recon_loss_sum])

        if add_gan == 1:
            enc_loss = self.Prior_loss + beta * self.Recon_loss
            dec_loss = gamma * self.Recon_loss + self.GAN_loss
            dis_loss = (-1) * self.GAN_loss
        else:
            total_loss = self.Prior_loss + beta * self.Recon_loss

        #self.combined_loss_summary_merged          =  tf.merge_summary([self.prior_loss_sum,self.recon_loss_sum,self.GAN_loss_sum])
        if add_gan == 1:
            self.train_enc = layers.optimize_loss(enc_loss, tf.contrib.framework.get_or_create_global_step(\
                ), learning_rate=learning_rate, variables = self.Enc_params, optimizer='RMSProp', update_ops=[])

            self.train_dec = layers.optimize_loss(dec_loss, tf.contrib.framework.get_or_create_global_step(\
                ), learning_rate=learning_rate, variables = self.Dec_params, optimizer='RMSProp', update_ops=[])

            self.train_dis = layers.optimize_loss(dis_loss, tf.contrib.framework.get_or_create_global_step(\
                ), learning_rate=learning_rate * alpha, variables = self.Dis_params, optimizer='RMSProp', update_ops=[])
        else:
            self.train     = layers.optimize_loss(total_loss, tf.contrib.framework.get_or_create_global_step(\
                ), learning_rate=learning_rate, variables = self.params, optimizer='RMSProp', update_ops=[])

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=config)
        self.sess.run(tf.initialize_all_variables())

        self.train_writer = tf.train.SummaryWriter(sum_dir + '/train',
                                                   self.sess.graph)
Exemple #24
0
    def __init__(self, model, channel_num, batch_size, seq_len, learning_rate,
                 ws, wg, wt, phase, sum_dir):
        if phase == 'train' or phase == 'test':
            self.inputNoiseList = [tf.placeholder(tf.float32, [batch_size, 128, 128, channel_num])\
                for _ in range(seq_len)]
            self.inputCleanList = [tf.placeholder(tf.float32, [batch_size, 128, 128, 3])\
                for _ in range(seq_len)]
        else:
            self.inputNoiseList = [tf.placeholder(tf.float32, [batch_size, 416, 800, channel_num])\
                for _ in range(seq_len)]
            self.inputCleanList = [tf.placeholder(tf.float32, [batch_size, 416, 800, 3])\
                for _ in range(seq_len)]

        with arg_scope(
            [layers.conv2d],
                activation_fn=tf.nn.leaky_relu,
                #normalizer_fn=layers.batch_norm,
                normalizer_params={'scale': True},
                padding='SAME'):
            with tf.variable_scope("model") as scope:  #Full VAEGAN structure
                if phase == 'train' or phase == 'test':
                    inpH, inpW = 128, 128
                else:
                    inpH, inpW = 416, 800
                if model == 'RAE':
                    with tf.name_scope("initalize_RNN_cell"):
                        cell1 = rnn.ConvLSTMCell(2, [inpH, inpW, 32],
                                                 32, [3, 3],
                                                 name='rnn1')
                        cell2 = rnn.ConvLSTMCell(2, [inpH / 2, inpW / 2, 43],
                                                 43, [3, 3],
                                                 name='rnn2')
                        cell3 = rnn.ConvLSTMCell(2, [inpH / 4, inpW / 4, 57],
                                                 57, [3, 3],
                                                 name='rnn3')
                        cell4 = rnn.ConvLSTMCell(2, [inpH / 8, inpW / 8, 76],
                                                 76, [3, 3],
                                                 name='rnn4')
                        cell5 = rnn.ConvLSTMCell(2,
                                                 [inpH / 16, inpW / 16, 101],
                                                 101, [3, 3],
                                                 name='rnn5')
                        cell6 = rnn.ConvLSTMCell(2,
                                                 [inpH / 32, inpW / 32, 101],
                                                 101, [3, 3],
                                                 name='rnn6')

                    # Encoder
                    l1, l2, l3, l4, l5, out = encoderRNN(self.inputNoiseList, batch_size, cell1, cell2, cell3, \
                        cell4, cell5, cell6, (inpH, inpW), reuse_vars=False)
                elif model == "AE":
                    l1, l2, l3, l4, l5, out = encoder(self.inputNoiseList,
                                                      batch_size,
                                                      reuse_vars=False)
                Enc_params_num = len(tf.trainable_variables())

                # Decoder / Generator
                self.denoised_imgList = decoder(l1,
                                                l2,
                                                l3,
                                                l4,
                                                l5,
                                                out, (inpH, inpW),
                                                reuse_vars=False)
                Enc_n_Dec_params_num = len(tf.trainable_variables())

        self.params = tf.trainable_variables()
        self.Enc_params = self.params[:Enc_params_num]
        self.Dec_params = self.params[Enc_params_num:Enc_n_Dec_params_num]
        print(len(self.params))
        for var in self.params:
            print(var.name)

        self.Spatial_loss = self.__get_L1_loss(self.denoised_imgList,
                                               self.inputCleanList)
        Spatial_loss_sum = tf.summary.scalar('Spatial_loss', self.Spatial_loss)
        self.Gradient_loss = self.__get_grad_L1_loss(self.denoised_imgList,
                                                     self.inputCleanList)
        Gradient_loss_sum = tf.summary.scalar('Gradient_loss',
                                              self.Gradient_loss)
        if model == 'RAE':
            self.Temporal_loss = self.__get_tem_L1_loss(
                self.denoised_imgList, self.inputCleanList)
            Temporal_loss_sum = tf.summary.scalar('Temporal_loss',
                                                  self.Temporal_loss)
            # merge  summary for Tensorboard
            self.detached_loss_summary_merged = tf.summary.merge(
                [Spatial_loss_sum, Gradient_loss_sum, Temporal_loss_sum])
            # loss function
            total_loss = ws * self.Spatial_loss + wg * self.Gradient_loss + wt * self.Temporal_loss

        elif model == 'AE':
            self.detached_loss_summary_merged = tf.summary.merge(
                [Spatial_loss_sum, Gradient_loss_sum])
            # loss function
            total_loss = ws * self.Spatial_loss + wg * self.Gradient_loss

        # self.train     = layers.optimize_loss(total_loss, tf.train.get_or_create_global_step(\
        #     ), learning_rate=learning_rate, variables = self.params, optimizer='RMSProp', update_ops=[])

        self.train = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                            beta1=0.9,
                                            beta2=0.99,
                                            epsilon=1e-08,
                                            name='Adam').minimize(
                                                total_loss,
                                                var_list=self.params)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self.sess = tf.Session(config=config)
        self.sess.run(tf.global_variables_initializer())
        #.replace('\\','/')
        self.train_writer = tf.summary.FileWriter(sum_dir, self.sess.graph)
    def __init__(self, hidden_size, batch_size, learning_rate, log_dir):
        self.input_tensor = tf.placeholder(tf.float32, [None, 28 * 28])
        # add gaussian noise to the input
        input_with_noise = gaussian_noise_layer(self.input_tensor, 0.3)

        with arg_scope([layers.fully_connected], activation_fn=tf.nn.relu):
            with tf.variable_scope("encoder"):
                self.latent_representation = encoder(input_with_noise,
                        hidden_size)
                encoder_params_num = len(tf.trainable_variables())
            with tf.variable_scope('encoder', reuse=True):
                self.true_latent_representation = encoder(self.input_tensor,
                        hidden_size)
            with tf.variable_scope('decoder'):
                self.recons = decoder(self.latent_representation)
                autoencoder_params_num = len(tf.trainable_variables())
            with tf.variable_scope('decoder', reuse=True):
                self.sampled_imgs = decoder(tf.random_normal([batch_size,
                        hidden_size]))

            pos_samples = tf.random_normal([batch_size, hidden_size],
                stddev=5.)
            neg_samples = self.latent_representation
            with tf.variable_scope('discriminator'):
                pos_samples_pred = discriminator(pos_samples)
            with tf.variable_scope('discriminator', reuse=True):
                neg_samples_pred = discriminator(neg_samples)
            #define losses
            reconstruction_loss = tf.reduce_mean(tf.square(self.recons -
                    self.input_tensor)) #* 28 * 28 scale recons loss
            classification_loss = tf.losses.sigmoid_cross_entropy(\
                    tf.ones(tf.shape(pos_samples_pred)), pos_samples_pred) +\
                    tf.losses.sigmoid_cross_entropy(tf.zeros(
                    tf.shape(neg_samples_pred)), neg_samples_pred)
            tf.summary.scalar('reconstruction_loss', reconstruction_loss)
            tf.summary.scalar('classification_loss', classification_loss)
            # define references to params
            params = tf.trainable_variables()
            encoder_params = params[:encoder_params_num]
            decoder_params = params[encoder_params_num:autoencoder_params_num]
            autoencoder_params = encoder_params + decoder_params
            discriminator_params = params[autoencoder_params_num:]
            # record true positive rate and true negative rate
            correct_pred_pos = tf.equal(tf.cast(pos_samples_pred>0, tf.float32),
                tf.ones(tf.shape(pos_samples_pred)))
            self.true_pos_rate = tf.reduce_mean(tf.cast(correct_pred_pos,
                tf.float32))
            correct_pred_neg = tf.equal(tf.cast(neg_samples_pred<0, tf.float32),
                tf.ones(tf.shape(pos_samples_pred)))
            self.true_neg_rate = tf.reduce_mean(tf.cast(correct_pred_neg,
                tf.float32))
            tf.summary.scalar('true_pos_rate', self.true_pos_rate)
            tf.summary.scalar('true_neg_rate', self.true_neg_rate)
            global_step = tf.contrib.framework.get_or_create_global_step()
            self.learn_rate = self._get_learn_rate(global_step, learning_rate)
            self.train_autoencoder = layers.optimize_loss(reconstruction_loss,
                    global_step, self.learn_rate/10, optimizer=lambda lr: \
                    tf.train.MomentumOptimizer(lr, momentum=0.9), variables=
                    autoencoder_params, update_ops=[])
            self.train_discriminator = layers.optimize_loss(classification_loss,
                    global_step, self.learn_rate, optimizer=lambda lr: \
                    tf.train.MomentumOptimizer(lr, momentum=0.1), variables=
                    discriminator_params, update_ops=[])
            self.train_encoder = layers.optimize_loss(-classification_loss,
                    global_step, self.learn_rate/10, optimizer=lambda lr: \
                    tf.train.MomentumOptimizer(lr, momentum=0.1), variables=
                    encoder_params, update_ops=[])
            self.sess = tf.Session()
            self.merged = tf.summary.merge_all()
            self.train_writer = tf.summary.FileWriter(log_dir,
                                      self.sess.graph)
            self.sess.run(tf.global_variables_initializer())