コード例 #1
0
ファイル: Test_DQN.py プロジェクト: Frequel/SnakeRL
def evaluate(model, env, num_steps=1000):
    """
    Evaluate a RL agent
    :param model: (BaseRLModel object) the RL Agent
    :param num_steps: (int) number of timesteps to evaluate it
    :return: (float) Mean reward for the last 100 episodes
    """
    episode_rewards = [0.0]
    obs = env.reset()
    for i in range(num_steps):
        # _states are only useful when using LSTM policies
        action, _states = model.predict(obs)
        # here, action, rewards and dones are arrays
        # because we are using vectorized env
        obs, rewards, dones, info = env.step(action)

        # Stats
        episode_rewards[-1] += rewards[0]
        if dones[0]:
            obs = env.reset()
            episode_rewards.append(0.0)
    # Compute mean reward for the last 100 episodes
    mean_100ep_reward = round(np.mean(episode_rewards[-100:]), 1)
    print("Mean reward:", mean_100ep_reward, "Num episodes:",
          len(episode_rewards))

    return mean_100ep_reward
コード例 #2
0
def score_game(game_core):
    count_ls = []
    np.random.seed(
        1)  # фиксируем RANDOM SEED, чтобы ваш эксперимент был воспроизводим!
    random_array = np.random.randint(1, 101, size=(1000))
    for number in random_array:
        count_ls.append(game_core(number))
    score = int(np.mean(count_ls))
    print(f"Ваш алгоритм угадывает число в среднем за {score} попыток")
    return (score)
コード例 #3
0
def dataterm_mask(dataterm_weighting_mode=None,
                  N_std=None,
                  Mask=None,
                  *args,
                  **kwargs):

    if 0 == dataterm_weighting_mode:
        w = 1
# .\dataterm_mask.m:21
    else:
        if 1 == dataterm_weighting_mode:
            w = Mask / N_std
            # .\dataterm_mask.m:23
            w[np.isnan(w)] = 0
            # .\dataterm_mask.m:24
            w[np.isinf(w)] = 0
            # .\dataterm_mask.m:25
            w = multiply(w, (Mask > 0))
            # .\dataterm_mask.m:26
            w = w / mean(w[Mask > 0])
コード例 #4
0
def create_features(genre_list, base_dir):
    for label, genre in enumerate(genre_list):
        genre_dir = os.path.join(base_dir, genre, "*.wav")
        file_list = glob.glob(genre_dir)
        for fn in file_list:
            sample_rate, X = scipy.io.wavfile.read(fn)
            fft_features = np.mean(abs(scipy.fft(X)[:1000]))
            y, sr = librosa.load(fn, mono=True, duration=30)
            chroma_stft = np.mean(librosa.feature.chroma_stft(y=y, sr=sr))
            spec_cent = np.mean(librosa.feature.spectral_centroid(y=y, sr=sr))
            spec_bw = np.mean(librosa.feature.spectral_bandwidth(y=y, sr=sr))
            rolloff = np.mean(librosa.feature.spectral_rolloff(y=y, sr=sr))
            zcr = np.mean(librosa.feature.zero_crossing_rate(y))
            mfcc = librosa.feature.mfcc(y=y, sr=sr)
            data = [
                chroma_stft, spec_cent, spec_bw, rolloff, zcr, fft_features
            ]
            for i in mfcc:
                data.append(np.mean(i))
            data.append(genre_list[label])
            write_feat(data, fn)
    return
コード例 #5
0
# ])
# scoring = {'AUC': 'roc_auc', 'Accuracy': make_scorer(accuracy_score)}
# gs_clf = GridSearchCV(text_clf, parameters, cv=5, n_jobs=-1, scoring=scoring, refit='AUC')
# gs_clf.fit(X, y)
# predicted = gs_clf.predict(Z)
# print(np.mean(predicted == v))
# for param_name in sorted(parameters.keys()):
#   print("%s: %r" % (param_name, gs_clf.best_params_[param_name]))


# text
text_clf.fit(X, y)
v_pred = text_clf.predict(Z)
print(v_pred)
if test_reliability:
  print(np.mean(v_pred == v))
  from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
  print(v_pred)
  print(confusion_matrix(v,v_pred))
  print(classification_report(v,v_pred))
  print(accuracy_score(v, v_pred))

# max_features = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000]
# train_results = []
# test_results = []
# for max_feature in max_features:
#   vectorizer = CountVectorizer(max_features=max_feature, ngram_range=(1,3), stop_words=stopwords.words('english'))
#   X = vectorizer.fit_transform(documents).toarray()
#   X_model = vectorizer.fit(documents)
#   # print(X.vocabulary_)
#   vectorizer_new = CountVectorizer(decode_error='replace',max_features=max_feature, ngram_range=(1,3), vocabulary=X_model.vocabulary_)
コード例 #6
0
def get_sentence_length_stats(sentences_of_words):
    print(np.mean([len(sentence) for sentence in sentences_of_words]))
コード例 #7
0
    def decode(self):
        display_result = ''
        filtedData = []
        current_length = 0
        current_data = []
        data = []
        flag = 0
        impulse_fft = []
        impulse_fft_tmp = []
        bin = []
        real = []
        self.decode_text.setPlainText(''.join(real))

        bandpass1 = 3000
        bandpass2 = 7000
        read_signal, fs = sf.read(FILENAME)

        wn1 = 2.0 * bandpass1 / sampling_rate
        wn2 = 2.0 * bandpass2 / sampling_rate
        b, a = signal.butter(8, [wn1, wn2], 'bandpass')  # 범위를 조금 더 넓게 해야할
        filtedData = signal.filtfilt(b, a, read_signal)  # data为要过滤的信号

        current_length = len(filtedData)
        current_data = filtedData
        while 1:
            once_check = 0
            corr = []
            corr_index = dict()

            print('finding preamble')
            print('current_data length', len(current_data))
            for i in range(current_length - len(preamble_y)):
                corr.append(
                    np.corrcoef(current_data[i:i + len(preamble_y)],
                                preamble_y)[0, 1])
                if once_check + 24000 == i and once_check != 0:
                    print('corr 찾는거 ')
                    break

                if corr[i] > 0.5:
                    if once_check == 0:
                        once_check = i
                        print('once_check', once_check)

                    corr_index[i] = corr[i]

            try:
                flag = max(corr_index.items(), key=operator.itemgetter(1))[0]
            except:
                print('decode 结束')
                break

            print(flag)
            data = current_data[flag + len(preamble_y):flag + len(preamble_y) +
                                60000]

            target_fre = 6000
            n = len(data)
            window = 600
            impulse_fft = np.zeros(n)
            for i in range(int(n - window)):
                y = np.fft.fft(data[i:i + int(window) - 1])
                y = np.abs(y)
                index_impulse = round(target_fre / sampling_rate * window)
                impulse_fft[i] = max(y[index_impulse - 2:index_impulse + 2])

            sliding_window = 5
            impulse_fft_tmp = impulse_fft
            for i in range(1 + sliding_window, n - sliding_window):
                impulse_fft_tmp[i] = np.mean(impulse_fft[i - sliding_window:i +
                                                         sliding_window])
            impulse_fft = impulse_fft_tmp

            #
            #
            # position_impulse = [];
            # half_window = 800;
            #
            #
            #
            # for i in range(n-half_window*2):
            #     if impulse_fft[i+half_window] > 90 and impulse_fft[i+half_window] == max(impulse_fft[i - half_window: i + half_window]):
            #         position_impulse.append(i)
            # message_bin = np.zeros(230400)
            # for i in range(len(position_impulse)):
            #     message_bin[math.ceil(position_impulse / 4800)] = 1
            # real_message_start = 1
            # last_one_index = 1
            # for i in range(3):
            #     if message_bin[i] == 1:
            #         last_one_index = i
            #
            # real_message_start = last_one_index + 1
            #
            # real_message_bin = message_bin[real_message_start:230400]
            #
            # curr_package_index = 0
            # curr_bin_index = 1
            # real_message_bin = np.matrix.H(real_message_bin)

            plus = 0
            adjust = 0
            count = 0
            while 1:
                decode_length = ''
                if adjust == 1:
                    plus += 0.1
                    print(plus)
                for i in range(8):

                    bin = np.mean(impulse_fft[i * 1200:(i + 1) * 1200])
                    bin += plus
                    print(bin)
                    if bin < 5:
                        decode_length = decode_length + '0'
                    else:
                        decode_length = decode_length + '1'

                print(decode_length)
                decode_payload_length = int(decode_length, 2)
                count += 1
                if count == 40:
                    break
                if decode_payload_length != 35:
                    adjust = 1
                else:
                    break

            if count == 40:
                decode_length = ''

                for i in range(8):
                    bin = np.mean(impulse_fft[i * 1200:(i + 1) * 1200])
                    print(bin)
                    if bin < 3:
                        decode_length = decode_length + '0'
                    else:
                        decode_length = decode_length + '1'

                print(decode_length)
                decode_payload_length = int(decode_length, 2)
                adjust = 0

                decode_payload = ''
                for i in range(decode_payload_length):
                    bin = np.mean(impulse_fft[(i + 8) * 1200:(i + 1 + 8) *
                                              1200])

                    if bin < 3:
                        decode_payload = decode_payload + '0'
                    else:
                        decode_payload = decode_payload + '1'
                    print(bin)
            else:
                decode_payload = ''
                for i in range(decode_payload_length):
                    bin = np.mean(impulse_fft[(i + 8) * 1200:(i + 1 + 8) *
                                              1200])

                    if adjust == 1:
                        bin += plus
                    if bin < 5:
                        decode_payload = decode_payload + '0'
                    else:
                        decode_payload = decode_payload + '1'
                    print(bin)

            print(decode_payload)
            while 1:
                if len(decode_payload) % 7 != 0:
                    decode_payload = decode_payload + '0'
                else:
                    break

            print(1200 * (int(decode_length, 2) + 8))
            current_data = current_data[1200 *
                                        (int(decode_length, 2) + 8 + 20) +
                                        flag:len(current_data)]
            current_length = len(current_data)
            display_result = display_result + decode_payload

        real = []
        for i in range(int(len(display_result) / 7)):
            real.append(decode_c(display_result[i * 7:(i + 1) * 7]))
        print(real)
        self.decode_text.setPlainText(''.join(real))

        print('result:', ''.join(real))
        global start
        cost_time = "time:" + str(time.time() - start) + '\n'
        decode_payload = decode_payload + '\n'

        file = open("result_translate.txt", 'w')
        file.write(cost_time)
        file.write(decode_payload)
        file.write(''.join(real))
        file.close()
コード例 #8
0
    for i in range(len(contours)):
        if (hierarchy[0, i, 3] == -1):  #to zalatwia kontury wewnatrz konturow
            M = cv2.moments(contours[i])
            if M["m00"] != 0:
                cX = int(M["m10"] / M["m00"])
                cY = int(M["m01"] / M["m00"])
            else:
                continue  #bo nie mozemy dzielic przez zero
            #uzupelniamy tablice odleglosci miedzy wierzcholkami konturu a srodkiem
            for u in contours[i]:
                aX = u[0][0]
                aY = u[0][1]
                dist = np.sqrt((cX - aX)**2 + (cY - aY)**2)
                sredniDist.append(dist)
            #liczymy srednia odleglosc
            sredniaZSredniegoDista = np.mean(sredniDist)
            it = 0

            #sprawdzamy odleglosc wierzcholkow wzgledem sredniej odleglosci
            for m in contours[i]:
                aX = m[0][0]
                aY = m[0][1]
                dist = np.sqrt((cX - aX)**2 + (cY - aY)**2)
                if (dist > sredniaZSredniegoDista):
                    pom = sredniaZSredniegoDista / dist
                else:
                    pom = dist / sredniaZSredniegoDista
                if (pom > 0.8):
                    it = it + 1

            if (it > len(contours[i])):
コード例 #9
0
import librosa
import os
import glob
import csv
import np
import scipy.io.wavfile
from matplotlib.pyplot import specgram

#fn = music address
fn = '/Users/macbook/Desktop/Machine Learning/Final Project/monologue.wav'
sample_rate, X = scipy.io.wavfile.read(fn)
fft_features = np.mean(abs(scipy.fft(X)[:1000]))
y, sr = librosa.load(fn, mono=True, duration=30)
chroma_stft = np.mean(librosa.feature.chroma_stft(y=y, sr=sr))
spec_cent = np.mean(librosa.feature.spectral_centroid(y=y, sr=sr))
spec_bw = np.mean(librosa.feature.spectral_bandwidth(y=y, sr=sr))
rolloff = np.mean(librosa.feature.spectral_rolloff(y=y, sr=sr))
zcr = np.mean(librosa.feature.zero_crossing_rate(y))
mfcc = librosa.feature.mfcc(y=y, sr=sr)
data = [chroma_stft, spec_cent, spec_bw, rolloff, zcr, fft_features]
for i in mfcc:
    data.append(np.mean(i))
file = open('data_to_predict.csv', 'w', newline='')

with file:
    writer = csv.writer(file)
    writer.writerow(data)

file.close()
コード例 #10
0
 def average_rating(self):
     all_ratings = list(map(lambda x: x.rating, self.review_set.all()))
     return np.mean(all_ratings)
コード例 #11
0
def calcmean(x):
    for i in range(13):
        mean_x.append(np.mean(x[i]))
    return mean_x
コード例 #12
0
x11 =data["ptratio"].values
x12 =data["black"].values
x13 =data["lstat"].values

y = data["medv"].values

x =[x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13]
mean_x =[]
#calc mean of x
def calcmean(x):
    for i in range(13):
        mean_x.append(np.mean(x[i]))
    return mean_x

calcmean(x)
mean_y = np.mean(y)

#calcilate values of m and c as y= mx +c

target =[]
m=[]
mean =[]
def estimateCofficents(x,y):
    num = 0
    denom =0
    for i in range(13):
        target.append(x[i])
        mean.append(mean_x[i])
        for j in range (len(target)):
            b= sum((target[j] - mean[j]) * (y[i] - mean_y))/sum((target[j] - mean[j]) ** 2)
        m.append(b)