Example #1
0
    def house_add_userid(request, userid):
        try:
            url = Url.from_string(request.url)
            naidom.check_url(url)
            naidom.house_add(request, userid)
            FT.ft_update_by_url(url)
        except NaiDomEx as ex:
            return ApiErrorMessage(code=-1, text=ex.message)

        return ApiErrorMessage(code=0, text=u"Добавлен в базу URL:" + request.url)
Example #2
0
    def ft_update_by_url_userid(url, userid):
        """
        Возвращает информацию о доме по URL и userid (ключи)
        """
        try:
            naidom.check_url(url)
            FT.ft_update_by_url(url)
        except NaiDomEx as ex:
            return ApiErrorMessage(code=-1, text=ex.message)

        return ApiErrorMessage(code=0)
Example #3
0
    def house_add_userid(request, userid):
        try:
            url = Url.from_string(request.url)
            naidom.check_url(url)
            naidom.house_add(request, userid)
            FT.ft_update_by_url(url)
        except NaiDomEx as ex:
            return ApiErrorMessage(code=-1, text=ex.message)

        return ApiErrorMessage(code=0,
                               text=u"Добавлен в базу URL:" + request.url)
Example #4
0
    def ft_update_by_url_userid(url, userid):
        """
        Возвращает информацию о доме по URL и userid (ключи)
        """
        try:
            naidom.check_url(url)
            FT.ft_update_by_url(url)
        except NaiDomEx as ex:
            return ApiErrorMessage(code=-1, text=ex.message)

        return ApiErrorMessage(code=0)
Example #5
0
    def house_delete_userid(request, userid):
        try:
            url = Url.from_string(request.url)
            naidom.check_url(url)
            #naidom.url_user_kind(request.url, userid)
            naidom.house_delete(url, userid)
            FT.ft_update_by_url(url)
        except NaiDomEx as ex:
            return ApiErrorMessage(code=-1, text=ex.message)

        return ApiErrorMessage(code=0, text=u"Удалён из базы URL:" + request.url)
Example #6
0
    def house_delete_userid(request, userid):
        try:
            url = Url.from_string(request.url)
            naidom.check_url(url)
            #naidom.url_user_kind(request.url, userid)
            naidom.house_delete(url, userid)
            FT.ft_update_by_url(url)
        except NaiDomEx as ex:
            return ApiErrorMessage(code=-1, text=ex.message)

        return ApiErrorMessage(code=0,
                               text=u"Удалён из базы URL:" + request.url)
Example #7
0
    def __init__(self):
        self.Vocabulary_Size = 0

        self.l = tf.placeholder(tf.float32, [],
                                name='l')  # Gradient reversal scaler

        self.dataset = Combined_Data_Processor.Model()
        self.bi_dataset = BiCorpus_Data_Processor.Data_holder()

        self.SE = Sentence_Representation.Conv_Rep()
        self.Fea_GEN = FT.Feature_Translator(length=50)

        self.Word_Embedding_Dimension = 100

        self.Y_ = tf.placeholder(dtype=tf.int32, shape=[None])
        self.Y_2 = tf.placeholder(dtype=tf.int32, shape=[None])

        self.Y = tf.placeholder(dtype=tf.float32, shape=[None, 1])
        self.X_P = tf.placeholder(
            dtype=tf.float32,
            shape=[None, None, self.Word_Embedding_Dimension])
        self.X_Q = tf.placeholder(
            dtype=tf.float32,
            shape=[None, None, self.Word_Embedding_Dimension])

        self.X_Eng = tf.placeholder(
            dtype=tf.float32,
            shape=[None, None, self.Word_Embedding_Dimension])
        self.X_Kor = tf.placeholder(
            dtype=tf.float32,
            shape=[None, None, self.Word_Embedding_Dimension])
Example #8
0
    def __init__(self):
        self.dataset = Combined_Data_Processor.Model()
        self.bi_dataset = BiCorpus_Data_Processor.Data_holder()

        self.SE = Sentence_Representation.Conv_Rep()
        self.Fea_GEN = FT.Feature_Translator(length=50)

        self.Word_Embedding_Dimension = 100

        self.Y = tf.placeholder(dtype=tf.float32, shape=[None, 2])
        self.X_P = tf.placeholder(dtype=tf.float32, shape=[None, None, self.Word_Embedding_Dimension])
        self.X_Q = tf.placeholder(dtype=tf.float32, shape=[None, None, self.Word_Embedding_Dimension])

        self.X_Eng = tf.placeholder(dtype=tf.float32, shape=[None, None, self.Word_Embedding_Dimension])
        self.X_Kor = tf.placeholder(dtype=tf.float32, shape=[None, None, self.Word_Embedding_Dimension])
    return wp_E


# In[4]
if __name__ == '__main__':
    # gmode: using simulation; gmode=1: using csv file
    t, s = getSignal(sRate=1000, gmode=0)
    wp_(t, s, level=4)  # calculate wavelet packages with level 3

    # In[5]: using actual data
    sRate = 2000  # sampling rate
    t, s = getSignal(source_path, sRate, gmode=1)  # retreieve data from file
    r = [0, len(t)]  # specify the analysis range
    # r = [0,100000]

    ft.fft_(s[r[0]:r[1]], sRate)  # call fft function
    wp_(t[r[0]:r[1]], s[r[0]:r[1]], waveletFun='db3',
        level=4)  # calculate wavelet packages with level n

    # In[5]: apply low pass filter by wden with threshold
    ds = de.lowpassfilter(s, thresh=0.3, wavelet="db4")  #
    wp_(t, ds, waveletFun='db3', level=4)

    # In[6]: apply wden filter with soft wavelets
    # wden(X,TPTR,SORH,SCAL,N,'wname')
    #    TPTR: threshold selection rule, {rigrsure','heursure','sqtwolog', 'minimaxi'}
    #    SORH: ('soft' or 'hard') is for soft or hard thresholding
    #    SCAL: defines multiplicative threshold rescaling
    #        'one' for no rescaling
    #        'sln' for rescaling using a single estimation of level noise based on first-level coefficients
    #        'mln' for rescaling done using level-dependent estimation of level noise
Example #10
0
dft_time = []
fft_time = []

# Reading data of diff lengths
for i in range(len(path)):
    with open(path[i], 'r') as f:
        for line in f.readlines():
            data_list[i].append(int(line))

for i in range(len(data_list)):

    length.append(len(data_list[i]))

    # Calling DFT
    dft_start = time.time()
    dft_array = FT.DFT(data_list[i])
    dft_end = time.time()

    dft_time.append(dft_end - dft_start)

    #Calling FFT
    fft_start = time.time()
    fft_array = FT.FFT(data_list[i])
    fft_end = time.time()

    fft_time.append(fft_end - fft_start)

    #calculating the mean squared error
    meanSquaredError.append(
        np.abs(np.square(np.subtract(dft_array, fft_array)).mean()))
Example #11
0
project_name = "test_FSI"
#
# Is RBF the mesh deformation technique used?
#
RBF = True
if RBF: print "Mesh deformation is RBF"
#
############## END INPUT DATA ##################################################
#
#
############## MAIN BODY #######################################################
#
#
# Open FINE/Turbo project
#
FT.open_project(path + project_name + ".iec")
#
# ++++++ Save new project +++++++++++++++++++++++++++++++++++++++++++++++++++++
#
#FT.save_project_as("test_Py_FSI",0)
#
# ++++++ Get the number of blades (needs to be used for the IBPA defintion ++++
#
# Get the row object in order to find the number of blades on the row
# 0 signifies the first row, 1 the second etc
#
row_object = FT.get_row(0)
#
# Get the number of blades on the found row object
#
nb_blades = row_object.get_nb_blades()
Example #12
0
    def __init__(self):
        """
        추가해야 할 것:
        word2vec 읽어와서 tensor로 변환하여 넘겨주기
        """

        """
        english glove
        """
        in_path_glove = "C:\\Users\\Administrator\\Desktop\\qadataset\\glove6B100d.txt"
        glove_f = codecs.open(in_path_glove, 'r', 'utf-8')

        self.words = []
        self.vectors = []

        arr = []
        for i in range(100):
            pm = 1

            if i % 2 == 0:
                pm = -1

            arr.append(0.002 * pm * i)
        self.words.append('#END')
        self.vectors.append(arr)

        arr = []
        for i in range(100):
            pm = 1

            if i % 2 == 0:
                pm = 0.1
            elif i % 3 == 0:
                pm = -1

            arr.append(0.1 * pm)
        self.words.append('#START')
        self.vectors.append(arr)

        for line in glove_f:
            tokens = line.split(' ')
            self.words.append(tokens.pop(0))
            self.vectors.append(tokens)

        self.vectors = numpy.array((self.vectors), 'f').reshape((-1, self.Word_Embedding_Dimension))

        self.dictionary = numpy.array(self.words)
        self.glove_arg_index = self.dictionary.argsort()
        self.dictionary.sort()
        ###############

        self.word_embedding_eng_tensor = tf.convert_to_tensor(self.vectors, dtype=tf.float32, name='eng_embedding')
        del self.vectors

        """
        korean embedding
        """
        word2vec_kor = codecs.open('C:\\Users\\Administrator\\Desktop\\qadataset\\kor_word2vec_100d', 'r', 'utf-8')
        self.kor_words = []
        self.kor_vectors = []

        arr = []
        for i in range(100):
            pm = 1

            if i % 2 == 0:
                pm = -1

            arr.append(0.002 * pm * i)
        self.kor_words.append('#END')
        self.kor_vectors.append(arr)

        arr = []
        for i in range(100):
            pm = 1

            if i % 2 == 0:
                pm = 0.1
            elif i % 3 == 0:
                pm = -1

            arr.append(0.1 * pm)
        self.kor_words.append('#START')
        self.kor_vectors.append(arr)

        for line in word2vec_kor:
            tokens = line.split('\t')
            self.kor_words.append(tokens.pop(0))
            self.kor_vectors.append(tokens)

        print(self.kor_words[0])
        print(self.kor_words[1])

        self.kor_dictionary = numpy.array(self.kor_words)
        self.word2vec_arg_index = self.kor_dictionary.argsort()
        self.kor_dictionary.sort()
        ###################

        self.word_embedding_kor_tensor = tf.convert_to_tensor(self.kor_vectors, dtype=tf.float32, name='kor_embedding')
        del self.kor_vectors

        self.l = tf.placeholder(tf.float32, [], name='l')  # Gradient reversal scaler

        self.dataset = Combined_Data_Processor.Model()
        self.bi_dataset = BiCorpus_Data_Processor.Data_holder()

        self.SE = Sentence_Representation.Conv_Rep()
        self.Fea_GEN = FT.Feature_Translator(length=50)

        self.Word_Embedding_Dimension = 100

        self.Y_ = tf.placeholder(dtype=tf.int32, shape=[None])
        self.Y_2 = tf.placeholder(dtype=tf.int32, shape=[None])

        self.Y = tf.placeholder(dtype=tf.float32, shape=[None, 2])
        self.X_P = tf.placeholder(dtype=tf.float32, shape=[None, None, self.Word_Embedding_Dimension])
        self.X_Q = tf.placeholder(dtype=tf.float32, shape=[None, None, self.Word_Embedding_Dimension])

        self.X_Eng = tf.placeholder(dtype=tf.float32, shape=[None, None, self.Word_Embedding_Dimension])
        self.X_Kor = tf.placeholder(dtype=tf.float32, shape=[None, None, self.Word_Embedding_Dimension])

        self.eng_vocab_size = self.dictionary.shape[0]
        self.kor_vocab_size = self.kor_dictionary.shape[0]

        self.eng_start_token = tf.placeholder(dtype=tf.int32, shape=[None, 1])
        self.eng_end_token = tf.placeholder(dtype=tf.int32, shape=[None, 1])
        self.kor_start_token = tf.placeholder(dtype=tf.int32, shape=[None, 1])
        self.kor_end_token = tf.placeholder(dtype=tf.int32, shape=[None, 1])

        self.encoder_inputs_eng = tf.placeholder(dtype=tf.int32, shape=[None, None])
        self.encoder_inputs_eng_q = tf.placeholder(dtype=tf.int32, shape=[None, None])
        self.encoder_inputs_kor = tf.placeholder(dtype=tf.int32, shape=[None, None])

        self.shared_inputs_eng = tf.placeholder(dtype=tf.int32, shape=[None, None])
        self.shared_inputs_kor = tf.placeholder(dtype=tf.int32, shape=[None, None])

        self.decoder_inputs_eng = tf.concat([self.eng_start_token, enco])
        self.decoder_inputs_kor = tf.placeholder(dtype=tf.int32, shape=[None, None])

        self.class_label = tf.placeholder(dtype=tf.float32, shape=[None, None])
        self.domain_label = tf.placeholder(dtype=tf.float32, shape=[None, None])

        self.hidden_size = 200
        self.keep_prob = 0.8

        self.encoder_eng_length = seq_length(self.encoder_inputs_eng)
        self.encoder_kor_length = seq_length(self.encoder_inputs_kor)

        self.shared_length_eng = seq_length(self.shared_inputs_eng)
        self.shared_length_kor = seq_length(self.shared_inputs_kor)

        self.attention_hidden_size = 400
        self.batch_size = 64
        self.max_decoder_length = 50

        self.embedding_size = 100

        self.word_embedding_eng = tf.get_variable("encoder_embeddings",
                                                  shape=[self.eng_vocab_size, self.embedding_size],
                                                  dtype=tf.float32, trainable=True,
                                                  initializer=tf.constant_initializer(self.word_embedding_eng_tensor))
        self.word_embedding_kor = tf.get_variable("decoder_embeddings",
                                                  shape=[self.kor_vocab_size, self.embedding_size],
                                                  dtype=tf.float32, trainable=True,
                                                  initializer=tf.constant_initializer(self.word_embedding_kor_tensor))