def multiple_figures():
    mpl.style.use(['seaborn-white', 'seaborn-paper', 'grayscale'])

    #width = 3.39
    #height = 3.39 * (np.sqrt(5) - 1.0) / 2.0
    latexify()


    for title, model in zip(['without', 'gaussian', 'constant', ], [without, guassian, constant, ]):
        y, y_pred = model(pipe)

        print(title, classifier[0])
        print(class_counter(y))
        print(metrics.classification_report(y, y_pred, labels=labels))


        acc = metrics.accuracy_score(y, y_pred)
        cm = metrics.confusion_matrix(y, y_pred, labels=labels)
        cm = norm_cm(cm)

        cm = pd.DataFrame(cm, index=labels, columns=labels)

        fig, ax = plt.subplots(dpi=92)

        sns.heatmap(cm, vmin=0, vmax=1, annot=True, fmt='.2f', cmap='Greys', ax=ax, cbar=False, square=True)
        format_axes_for_cm(ax)
        ax.set_title(f'accuracy = {acc:.3f}')

        fig.tight_layout()

        ensure_dir('./output/interpolations/')
        fig.savefig(f'./output/interpolations/{title}.pdf', dpi=92, bbox_inches='tight')
        plt.clf()
Beispiel #2
0
def generate_unique_log_folder():
    while True:
        log_folder = os.path.join(LOG_FOLDER, str(uuid.uuid4()))
        if not os.path.exists(log_folder):
            break
    tools.ensure_dir(log_folder)
    return log_folder
Beispiel #3
0
    def run(self):
        tools.ensure_dir(self._weights_dir)

        self.init_cfg()
        self.init_dataset()
        self._steps_per_epoch = len(self.train_dataloader)
        self._loss_print_interval = self._steps_per_epoch // 10
        self.init_model()

        for layer in self.model.module.module_list:
            if isinstance(layer, YOLOLayer):
                self.yolos.append(layer)

        state_dict = deepcopy(self.model.state_dict())
        for i in range(200):
            self.model.load_state_dict(state_dict)
            hypers = self.random_hyper()
            print(i, hypers)
            fitness = self.fit()
            print(fitness)
            self.records.append({
                'hyper': hypers,
                'fitness': fitness,
            })
            json.dump({'data': self.records}, open('evol2.json', 'w'))
Beispiel #4
0
def toMaya(params):
    # print "To Maya - Start"

    # search for camera point group...
    campg = checkCameraGroups()
    if campg is None:
        tde4.postQuestionRequester("Export Maya...",
                                   "Error, there is no camera point group.",
                                   "Ok")
        return

    # getParams
    filepath = params['file_browser'] + '.mel'

    cameras = params['cameras']
    camerasOutSize = params['camerasOutSize']
    camerasFrameStart = params['camerasFirstFrame']

    # openFile
    tools.ensure_dir(filepath)
    f = open(filepath, 'w')

    # write header
    f.write("//\n")
    f.write("// Maya/MEL export data written by %s\n" % tde4.get3DEVersion())
    f.write("//\n")
    f.write("// All lengths are in centimeter, all angles are in degree.\n")
    f.write("//\n\n")

    # write scene group...
    f.write("// create scene group...\n")
    f.write("string $sceneGroupName = `group -em -name \"Scene\"`;\n")

    # wrtie cameras
    exportCameras(f, cameras, camerasFrameStart, params['file_browser'], campg,
                  camerasOutSize, params['date'])

    # write camera points : campg

    exportCameraPoints(f, campg)

    # write moca / Objects
    exportMocaObjects(f, camerasFrameStart[0])

    # write global (scene node) transformation...
    p3d = tde4.getScenePosition3D()
    p3d = convertZup(p3d, yup)
    r3d = tde4.getSceneRotation3D()
    rot = convertToAngles(r3d)
    s = tde4.getSceneScale3D()
    f.write(
        "xform -zeroTransformPivots -rotateOrder zxy -translation %.15f %.15f %.15f -scale %.15f %.15f %.15f -rotation %.15f %.15f %.15f $sceneGroupName;\n\n"
        % (p3d[0], p3d[1], p3d[2], s, s, s, rot[0], rot[1], rot[2]))

    f.write("\n")

    # close file
    f.close()
Beispiel #5
0
    def save_mp3_to_file(self, mp3_data):
        mp3_folder = os.path.join(self.log_folder, MP3_FOLDERNAME)
        tools.ensure_dir(mp3_folder)

        files = tools.list_files(mp3_folder, ['*.mp3'])
        mp3_filename = os.path.join(mp3_folder, '{:04}.mp3'.format(len(files)))
        with open(mp3_filename, 'wb') as f:
            f.write(mp3_data)
        return mp3_filename
Beispiel #6
0
    def save_classifier_map_to_file(self, classifier_map):
        map_folder = os.path.join(self.log_folder, MAP_FOLDERNAME)
        tools.ensure_dir(map_folder)

        files = tools.list_files(map_folder, ['*.png'])
        map_filename = os.path.join(map_folder, '{:04}.png'.format(len(files)))

        web_tools.save_map_to_file(classifier_map, map_filename)

        return map_filename
Beispiel #7
0
    def save_drawing_to_file(self, drawing_data):
        drawing_folder = os.path.join(self.log_folder, DRAWING_FOLDERNAME)
        tools.ensure_dir(drawing_folder)

        drawing_files = tools.list_files(drawing_folder, ['*.json'])
        drawing_filename = os.path.join(
            drawing_folder, '{:04}.json'.format(len(drawing_files)))
        tools.save_json(drawing_filename, drawing_data)

        return drawing_filename
def preform_multilinear_regression(df_anova):

    params = ['u', 'm', 'n', 'h', 'q']
    combinations = []

    ### create all possible combinations between u, m, n, h, q
    for r in range(1, len(params) + 1):
        combinations += list(itertools.combinations(params, r))

    ### create all possible formulas from the combinations
    formulas = []
    for comb in combinations:
        formula = 'err' + '~'
        for c in comb:
            formula += '+' + c
        formulas += [formula]

    ### making dataframes for the different probabilities
    df_a = df_anova[df_anova['prob'] == 'a']
    df_b = df_anova[df_anova['prob'] == 'b']
    df_ab = df_anova[df_anova['prob'] == 'ab']
    dfs = {'a': df_a, 'b': df_b, 'ab': df_ab}

    ### calculating the regression model for each combinations for each probability
    for formula in formulas:
        for prob, current_df in dfs.items():
            reg_model = ols(formula, data=current_df).fit()
            # reg_model.summary()
            temp = pd.DataFrame.from_dict({
                'prob': [prob],
                'formula': [formula],
                'R_sq': [reg_model.rsquared],
                'p_value': [reg_model.f_pvalue]
            })
            if 'df_reg_models' not in locals():
                df_reg_models = temp.copy()
            else:
                df_reg_models = df_reg_models.append(temp)

    df_reg_models = df_reg_models.reset_index(drop=True)
    df_reg_models = df_reg_models.sort_values(by='R_sq')

    df_reg_models.to_csv('analysis/reg_results.csv')

    # plot R^2 as function of parameters in the model.
    for prob, current_df in dfs.items():
        fig, ax = plt.subplots(1, 1)
        cdf = df_reg_models[df_reg_models['prob'] == prob]
        g = sns.pointplot(x='formula', y='R_sq', data=cdf, ax=ax)
        g.set_xticklabels(g.get_xticklabels(), rotation=45)
        g.set_title(r"$R^2$" " of {} as function of combination".format(prob))
        fig_name = 'figs/r_sq_p{}.png'.format(prob)
        ensure_dir(fig_name)
        fig.savefig(fig_name, dpi=300)
Beispiel #9
0
def main():
    mpl.style.use(['seaborn-white', 'seaborn-paper', 'grayscale'])
    latexify()

    w_sizes = (5, 10, 50, 100)

    for (w_prr, w_history) in it.product(w_sizes, repeat=2):
        y, y_pred = different_window_sizes(w_prr, w_history)

        acc = metrics.accuracy_score(y, y_pred)
        prec = metrics.precision_score(y,
                                       y_pred,
                                       average='weighted',
                                       labels=labels)
        recall = metrics.recall_score(y,
                                      y_pred,
                                      average='weighted',
                                      labels=labels)
        f1 = metrics.f1_score(y, y_pred, average='weighted', labels=labels)

        print(
            f'& {w_history}\t& {w_prr}\t& {acc:.3f}\t& {prec:.3f}\t& {recall:.3f}\t& {f1:.3f}'
        )

        #print(f'Wh=={w_history}; Wprr=={w_prr}')
        #print(metrics.classification_report(y, y_pred, labels=labels))

        cm = metrics.confusion_matrix(y, y_pred, labels=labels)
        cm = norm_cm(cm)
        cm = pd.DataFrame(cm, index=labels, columns=labels)

        fig, ax = plt.subplots(dpi=92)
        sns.heatmap(cm,
                    vmin=0,
                    vmax=1,
                    annot=True,
                    fmt='.2f',
                    cmap='Greys',
                    ax=ax,
                    cbar=False,
                    square=True)
        #ax.set_title(f'$\\mathrm{{Acc}}(W_{{\\mathrm{{PRR}}}}={w_prr}, W_{{\\mathrm{{history}}}}={w_history})={acc:.3f}$')
        ax.set_title(
            f'Accuracy = {acc:.3f}\n(prec = {prec:.3f}, rec = {recall:.3f})')
        format_axes_for_cm(ax)

        fig.tight_layout()

        ensure_dir('./output/w_sizes/')
        fig.savefig(f'./output/w_sizes/Wprr{w_prr}_Wh{w_history}.pdf',
                    dpi=92,
                    bbox_inches='tight')
        plt.close(fig)
Beispiel #10
0
    def save_learner_logs_to_file(self, learner_logs):
        learner_logs_folder = os.path.join(self.log_folder,
                                           LEARNER_LOGS_FOLDERNAME)
        tools.ensure_dir(learner_logs_folder)

        files = tools.list_files(learner_logs_folder, ['*.json'])
        learner_logs_filename = os.path.join(learner_logs_folder,
                                             '{:04}.json'.format(len(files)))

        tools.save_json(learner_logs_filename, learner_logs)

        return learner_logs_filename
Beispiel #11
0
 def run(self):
     tools.ensure_dir(self._weights_dir)
     self.init_cfg()
     self.init_dataset()
     # 一轮训练的步数
     self._steps_per_epoch = len(self.train_dataloader)
     # 每一轮训练打印多少次损失
     self._loss_print_interval = self._steps_per_epoch // 5
     self.init_model()
     self.init_evaluator()
     self.init_optimizer()
     self.init_losses()
     if self._sparse_train:
         self.bns = tools.get_bn_layers(self.model)
     self.train()
Beispiel #12
0
    def play(self, n_episodes = 10, save_images = True, render_on_screen = False):
        log.info("Playing catch with the trained agent ({} episodes)".format(n_episodes));
        if save_images:
            output_folder = "{}/images".format(self.output_folder);
            ensure_dir(output_folder);
            log.info("Images will be saved in the folder: {}".format(output_folder));

        try:
            wins = 0
            for e in range(n_episodes):
                # The episode starts with resetting the environment.
                observation = self.env.reset();
                if render_on_screen:
                    self.env.render(mode='human');

                loss = 0.;
                done = False;

                c = 0;
                if save_images:
                    # save the initial observation as an image
                    if not render_on_screen:
                        self.env.render(mode='matplotlib');
                    plt.savefig("{}/{:02d}_{:02d}.png".format(output_folder, e, c));
                while not done:
                    c += 1;
                    # get next action
                    action = self.choose_action(observation, 'greedy');
                    # apply action, get rewards and new state
                    observation, reward, done, info = self.env.step(action);
                    if render_on_screen:
                        self.env.render(mode='human');

                    # Count the number of wins
                    if reward == 1:
                        wins += 1;

                    if save_images:
                        # save the observation as an image
                        if not render_on_screen:
                            self.env.render(mode='matplotlib');
                        plt.savefig("{}/{:02d}_{:02d}.png".format(output_folder, e, c))

        except KeyboardInterrupt:
            print("");
            log.warn('Playing interrupted by user');

        log.info("Won {} out of {} games ({} %)".format(wins, n_episodes, (100*wins/n_episodes)));
Beispiel #13
0
 def save_audio(self, save_audio_dir, save_audio_name):
     dir = tools.ensure_dir(save_audio_dir)
     if self.audioclip is None:
         print('Video has no audio')
     else:
         self.audioclip.write_audiofile(os.path.join(save_audio_dir, save_audio_name), nbytes=2, codec='pcm_s16le',
                                        bitrate='1000k', verbose=True)
Beispiel #14
0
 def save_frames(self, save_video_dir):
     dir = tools.ensure_dir(save_video_dir)
     num_frame = int(self.videoclip.fps * self.videoclip.duration)
     for i, frame in enumerate(self.videoclip.iter_frames()):
         Image.fromarray(frame).save('{0}/{1}.jpeg'.format(dir, i))
         print('saving frame {0}/{1}'.format(i, num_frame - 1))
     with open(config.frames_output_dir+'/fps.txt', 'w') as f:
         f.write(str(self.videoclip.fps))
Beispiel #15
0
    def save_latest(self, directory, model_and_loss, stats_dict, store_as_best=False):
        # -----------------------------------------------------------------------------------------
        # Make sure directory exists
        # -----------------------------------------------------------------------------------------
        tools.ensure_dir(directory)

        # check previous latest file version
        latest_statistics_filename = os.path.join(
            directory, self._prefix + self._latest_postfix + ".json")
        if os.path.isfile(latest_statistics_filename):
            statistics = tools.read_json(latest_statistics_filename)
            shadow_is_latest = statistics['shadow']
        else:
            shadow_is_latest = True
        stats_dict['shadow'] = not shadow_is_latest

        # -----------------------------------------------------------------------------------------
        # Save
        # -----------------------------------------------------------------------------------------
        save_dict = dict(stats_dict)
        save_dict[self._model_key] = model_and_loss.state_dict()

        if shadow_is_latest:
            latest_checkpoint_filename = os.path.join(
                directory, self._prefix + self._latest_postfix + self._extension)
        else:
            latest_checkpoint_filename = os.path.join(
                directory, self._prefix + self._latest_postfix + '_shadow' + self._extension)

        torch.save(save_dict, latest_checkpoint_filename)
        tools.write_json(data_dict=stats_dict, filename=latest_statistics_filename)

        # -----------------------------------------------------------------------------------------
        # Possibly store as best
        # -----------------------------------------------------------------------------------------
        if store_as_best:
            best_checkpoint_filename = os.path.join(
                directory, self._prefix + self._best_postfix + self._extension)

            best_statistics_filename = os.path.join(
                directory, self._prefix + self._best_postfix + ".json")

            logging.info("Saved checkpoint as best model..")
            shutil.copyfile(latest_checkpoint_filename, best_checkpoint_filename)
            shutil.copyfile(latest_statistics_filename, best_statistics_filename)
Beispiel #16
0
 def __init__( self, env
             , memory_maxlen = 1000
             , net = None
             , discount = 0.9
             , output_folder = "output"
             ):
     log.info("Creating an agent to play Catch");
     self.env = env;
     self.memory = deque(maxlen=memory_maxlen);
     if net is None:
         self.net = qnet( (None, self.env.grid_size, self.env.grid_size)
                        , self.env.action_space.n);
     else:
         self.net = net;
     self.discount = discount;
     ensure_dir(output_folder);
     self.output_folder = output_folder;
     self.results_filename = "{}/results.csv".format(self.output_folder);
Beispiel #17
0
 def __init__(self, broker, account, local_base_dir, export_base_dir):
     self.trading_day = time.strftime('%Y%m%d', time.localtime(time.time()))
     self.broker = broker
     self.account = account
     self.local_log_path = '%s\\%s\\%s.log' % (local_base_dir, account,
                                               self.trading_day)
     self.export_basedir = '%s\\%s' % (export_base_dir, account)
     self.export_dir = '%s\\%s\\%s' % (export_base_dir, account,
                                       self.trading_day)
     self.export_log_path = '%s\\export.log' % (self.export_dir)
     self.src_property_path = '%s\\property.txt' % (self.export_dir)
     self.src_entrust_path = '%s\\entrust.txt' % (self.export_dir)
     self.src_trade_path = '%s\\trade.txt' % (self.export_dir)
     self.dst_fund_path = '%s\\_fund.txt' % (self.export_dir)
     self.dst_position_path = '%s\\_position.txt' % (self.export_dir)
     self.dst_entrust_path = '%s\\_entrust.txt' % (self.export_dir)
     self.dst_trade_path = '%s\\_trade.txt' % (self.export_dir)
     tools.ensure_dir(self.local_log_path)
     tools.ensure_dir(self.export_log_path)
Beispiel #18
0
    def save_latest(self,
                    directory,
                    model_and_loss,
                    stats_dict,
                    store_as_best=False):
        # -----------------------------------------------------------------------------------------
        # Make sure directory exists
        # -----------------------------------------------------------------------------------------
        tools.ensure_dir(directory)

        # -----------------------------------------------------------------------------------------
        # Save
        # -----------------------------------------------------------------------------------------
        save_dict = dict(stats_dict)
        save_dict[self._model_key] = model_and_loss.state_dict()

        latest_checkpoint_filename = os.path.join(
            directory, self._prefix + self._latest_postfix + self._extension)

        latest_statistics_filename = os.path.join(
            directory, self._prefix + self._latest_postfix + ".json")

        torch.save(save_dict, latest_checkpoint_filename)
        tools.write_json(data_dict=stats_dict,
                         filename=latest_statistics_filename)

        # -----------------------------------------------------------------------------------------
        # Possibly store as best
        # -----------------------------------------------------------------------------------------
        if store_as_best:
            best_checkpoint_filename = os.path.join(
                directory, self._prefix + self._best_postfix + self._extension)

            best_statistics_filename = os.path.join(
                directory, self._prefix + self._best_postfix + ".json")

            logging.info("Saved checkpoint as best model..")
            shutil.copyfile(latest_checkpoint_filename,
                            best_checkpoint_filename)
            shutil.copyfile(latest_statistics_filename,
                            best_statistics_filename)
Beispiel #19
0
def multiplots():
    mpl.style.use(['seaborn-white', 'seaborn-paper', 'grayscale'])

    latexify()

    for model, title in zip([no_resample, undersample, oversample], ['none', 'undersample', 'oversample']):
        y, y_pred, c = model(classifier)

        acc = metrics.accuracy_score(y, y_pred)
        #prec = metrics.precision_score(y, y_pred, labels=labels, average='macro')
        #rec = metrics.recall_score(y, y_pred, labels=labels, average='macro')
        #f1 = metrics.f1_score(y, y_pred, labels=labels, average='macro')

        cm = metrics.confusion_matrix(y, y_pred, labels=labels)
        cm = norm_cm(cm)

        cm = pd.DataFrame(cm, index=labels, columns=labels)

        fig, ax = plt.subplots(dpi=92, constrained_layout=True)
        #print(f'{title}\t-- Acc.: {acc:.3f};\t Prec.: {prec:.3f}\t Rec.: {rec:.3f}\t F1: {f1:.3f}')
        print('Resample:', title, classifier[0], f'accuracy={acc:.3f}')
        print(metrics.classification_report(y, y_pred, labels=labels))

        plt.suptitle(f'accuracy={acc:.3f}')
        #plt.suptitle(f'good={c["good"]:,}\ninterm.={c["interm."]:,}\nbad={c["bad"]:,}', ha='left')

        sns.heatmap(cm, vmin=0, vmax=1, annot=True, fmt='.2f', cmap='Greys', ax=ax, cbar=False, square=True)

        #ax.set_title(f'Accuracy = {acc:.3f}', loc='center')
        ax.set_title(
            f'good:    {c["good"]:,}\ninterm.: {c["interm."]:,}\nbad:      {c["bad"]:,}',
            fontdict={'fontsize': 9},
            loc='left'
        )

        format_axes_for_cm(ax)

        #fig.tight_layout()
        ensure_dir('./output/resampling/')
        fig.savefig(f'./output/resampling/{title}.pdf', dpi=92, bbox_inches='tight')
        plt.close(fig)
Beispiel #20
0
def advanced_charts():
    mpl.style.use(['seaborn-white', 'seaborn-paper', 'grayscale'])
    latexify(columns=2)

    w_sizes = (2, 5, 10, 15, 20, 30, 50, 80,
               100)[::-1]  # [::-1] will reverse order

    fig, ax = plt.subplots(dpi=92)

    colors = sns.color_palette("cubehelix", len(w_sizes))

    for w_prr, color in zip(w_sizes, colors):
        acc = []
        for w_history in w_sizes:
            y, y_pred = different_window_sizes(w_prr, w_history)
            acc.append(metrics.accuracy_score(y, y_pred))

        ax.plot(w_sizes,
                acc,
                label=f'W$_\\mathrm{{PRR}}={w_prr}$',
                color=color)

    ax.set_ylabel('accuracy')
    ax.set_xlabel(f'W$_\\mathrm{{history}}$')
    ax.set_xticks(w_sizes[::-1])
    ax.set_xlim(min(w_sizes), max(w_sizes))
    format_axes_for_chart(ax)

    fig.tight_layout()
    fig.legend(loc='right')

    ensure_dir('./output/')
    fig.savefig('./output/different_window_sizes_linechart.pdf',
                dpi=92,
                bbox_inches='tight')

    plt.close(fig)
Beispiel #21
0
def multiple_figures():
    mpl.style.use(['seaborn-white', 'seaborn-paper', 'grayscale'])

    latexify()

    pipe = pipe_dtree

    for features in feature_sets:
        y, y_pred = different_features(pipe, features)

        acc = metrics.accuracy_score(y, y_pred)
        prec = metrics.precision_score(y,
                                       y_pred,
                                       average='weighted',
                                       labels=labels)
        recall = metrics.recall_score(y,
                                      y_pred,
                                      average='weighted',
                                      labels=labels)

        #prec = metrics.precision_score(y, y_pred, labels=labels, average='micro')
        #rec = metrics.recall_score(y, y_pred, labels=labels, average='micro')

        cm = metrics.confusion_matrix(y, y_pred, labels=labels)
        cm = norm_cm(cm)

        cm = pd.DataFrame(cm, index=labels, columns=labels)

        fig, ax = plt.subplots(dpi=92)
        sns.heatmap(cm,
                    vmin=0,
                    vmax=1,
                    annot=True,
                    fmt='.2f',
                    cmap='Greys',
                    ax=ax,
                    cbar=False,
                    square=True)
        format_axes_for_cm(ax)

        feature_str = stringify_features(features)
        ax.set_title(
            f'Accuracy = {acc:.3f}\n(prec = {prec:.3f}; rec = {recall:.3f})')

        fig.tight_layout()

        ensure_dir('./output/features/dtree/')
        fig.savefig(f'./output/features/dtree/{feature_str}.pdf',
                    dpi=92,
                    bbox_inches='tight')
        plt.close(fig)
        print(f'Done {features}')

    pipe = pipe_logreg

    for features in feature_sets:
        print('Features', features)

        y, y_pred = different_features(pipe, features)

        acc = metrics.accuracy_score(y, y_pred)
        prec = metrics.precision_score(y,
                                       y_pred,
                                       average='micro',
                                       labels=labels)
        recall = metrics.recall_score(y,
                                      y_pred,
                                      average='micro',
                                      labels=labels)

        #prec = metrics.precision_score(y, y_pred, labels=labels, average='micro')
        #rec = metrics.recall_score(y, y_pred, labels=labels, average='micro')

        cm = metrics.confusion_matrix(y, y_pred, labels=labels)
        cm = norm_cm(cm)

        cm = pd.DataFrame(cm, index=labels, columns=labels)

        fig, ax = plt.subplots(dpi=92)
        sns.heatmap(cm,
                    vmin=0,
                    vmax=1,
                    annot=True,
                    fmt='.2f',
                    cmap='Greys',
                    ax=ax,
                    cbar=False,
                    square=True)
        format_axes_for_cm(ax)

        feature_str = stringify_features(features)
        ax.set_title(
            f'Accuracy = {acc:.3f}\n(prec = {prec:.3f}, rec = {recall:.3f})')

        fig.tight_layout()

        ensure_dir('./output/features/logistic/')
        fig.savefig(f'./output/features/logistic/{feature_str}.pdf',
                    dpi=92,
                    bbox_inches='tight')
        plt.close(fig)
        print(f'Done {features}')
Beispiel #22
0
import numpy as np


# In[50]:


allmean =[]
freq=[]
standard_deviation=[]


# In[51]:


EXP_FOLDERNAME = os.path.join('experiments', 'sampling_rate')
tools.ensure_dir(EXP_FOLDERNAME)
experimental_data= (os.path.join(EXP_FOLDERNAME,'calculated_data.json'))


# In[52]:


def getdata(name):
    EXP_FILENAME =(os.path.join(EXP_FOLDERNAME, name + '.json'))

    EXP_TIME_SECOND = 20
    if __name__ == '__main__':

        handler = SerialHandler('/dev/ttyACM3', 115200)

        handler.current_state = {'time': 0}
Beispiel #23
0
# Step1: Split video and audio
GENERATE_FRAMES = True
if GENERATE_FRAMES:
    sp = AVSplit(config.source_video)
    sp.save_frames(config.frames_output_dir)
    sp.save_audio(config.audio_output_dir, config.audio_output_name)

frame_names = tools.load_frames(config.FRAMES_PATH_BASE)

# Step2: Detect logo frame-wise
num_frames = len(frame_names)
for i, frame in enumerate(frame_names):
    try:
        print('detecting frame {0}/{1}'.format(i, num_frames))
        img = LogoDetect(config.DETECT_OBJ, frame)
        directory = tools.ensure_dir(config.save_detect_dir)
        Image.fromarray(img).save('{0}/{1}.jpeg'.format(directory, i))
    finally:
        pass

# Step3: export video file
detected_frames = tools.load_frames(config.DETECTED_PATH_BASE)
clip = moviepy.editor.ImageSequenceClip(detected_frames, fps=10)
if GENERATE_FRAMES:
    fps = sp.videoclip.fps
    clip = clip.set_fps(fps)
    if sp.audioclip is not None:
        clip = clip.set_audio(sp.audioclip)
else:
    with open(config.frames_output_dir + '/fps.txt', 'r') as f:
        fps = float(f.read())
Beispiel #24
0
import inspect
HERE_PATH = os.path.dirname(
    os.path.abspath(inspect.getfile(inspect.currentframe())))

from datetime import datetime

import pandas as pd
import tinydb

import tools

DB_FOLDERNAME = os.path.join(HERE_PATH, 'db')
TRACKING_DB_FILENAME = os.path.join(DB_FOLDERNAME, 'tracking_info.json')

CSV_FOLDERNAME = os.path.join(HERE_PATH, 'csv')
tools.ensure_dir(CSV_FOLDERNAME)

CSV_FILENAME = os.path.join(CSV_FOLDERNAME, 'tracking_data.csv')

UNKNOWN_REFERENCE = ''
LOCALHOST_MARKER = 'LOCALHOST'


def get_location_info(client_ip_info):

    location_info = {}

    # fill list with unknown values
    field_list = ['ip', 'country', 'region', 'city', 'geo']
    for field_name in field_list:
        location_info[field_name] = UNKNOWN_REFERENCE
Beispiel #25
0
 def save(self, net_file):
     ensure_dir(os.path.dirname(net_file));
     log.info("Saving net file: {}".format(net_file));
     with open(net_file, 'wb') as pkl_file:
         param_values = lasagne.layers.get_all_param_values(self.net['output']);
         pickle.dump(param_values, pkl_file);
Beispiel #26
0
import os

# this get our current location in the file system
import inspect
HERE_PATH = os.path.dirname(
    os.path.abspath(inspect.getfile(inspect.currentframe())))

import tinydb

import tools

DB_FOLDERNAME = os.path.join(HERE_PATH, 'db')
tools.ensure_dir(DB_FOLDERNAME)

TRACKING_DB_FILENAME = os.path.join(DB_FOLDERNAME, 'tracking_info.json')
PROCESSED_DB_FILENAME = os.path.join(DB_FOLDERNAME, 'log_processed.json')

CONNECTION_INFO_FILENAME = 'connection_info.json'
URL_INFO_FILENAME = 'url_info.json'

# LOG_ROOT_FOLDER = os.path.join(HERE_PATH, 'logs')
LOG_ROOT_FOLDER = os.path.join(HERE_PATH,
                               '../server/logs')  ## for local server log build

if __name__ == '__main__':

    tracking_info_db = tinydb.TinyDB(TRACKING_DB_FILENAME)
    log_processed_db = tinydb.TinyDB(PROCESSED_DB_FILENAME)

    log_folders = tools.list_folders(LOG_ROOT_FOLDER)
    for log_folder in log_folders:
Beispiel #27
0
def toNuke(params, oneFile=False):
    log.debug('To Nuke - Start')

    cameras = params['cameras']
    camerasOutSize = params['camerasOutSize']
    camerasFrameStart = params['camerasFirstFrame']

    filesGenerated = []
    argsToBatchs = []

    if oneFile:
        filepath = params['file_browser'] + '_undisto.nk'
        tools.ensure_dir(filepath)
        fileObj = open(filepath, 'w')
        for index, cam in enumerate(cameras):
            fileLines = []
            argsExportNuke = [
                cam, index, camerasOutSize[index], filepath, params['date'],
                camerasFrameStart[index]
            ]

            fileLines, argsToBatch = exportNuke(*argsExportNuke)

            if fileLines == -1:
                # print ("Problem with camera "+ str(cam) )
                continue

            writeLines(fileObj, fileLines)
            argsToBatchs.append(argsToBatch)
        filesGenerated.append(filepath)

    else:
        # create Directory
        folderCameras = params['file_browser'] + '_undisto/'
        try:
            os.makedirs(folderCameras)
        except:
            pass  # Todo check if is only that the folder exists

        for index, cam in enumerate(cameras):
            cameraName = "%s_%s_1" % (tools.validName(
                tde4.getCameraName(cam)), index)
            filepath = folderCameras + cameraName + '_undisto.nk'
            tools.ensure_dir(filepath)
            fileObj = open(filepath, 'w')
            fileLines = []
            argsExportNuke = [
                cam, index, camerasOutSize[index], filepath, params['date'],
                camerasFrameStart[index]
            ]

            fileLines, argsToBatch = exportNuke(*argsExportNuke)

            if fileLines == -1:
                # print ("Problem with camera "+ str(cam) )
                continue

            writeLines(fileObj, fileLines)
            argsToBatchs.append(argsToBatch)
            filesGenerated.append(filepath)

    # Launch batch Render
    log.debug('Launching batch Render')

    import toBatch
    for argsToBatch in argsToBatchs:
        toBatch.batch3DE(*(argsToBatch))
    return filesGenerated

    log.debug('To Nuke - End')
def multiple_figures():
    mpl.style.use(['seaborn-white', 'seaborn-paper', 'grayscale'])
    latexify()

    cv = model_selection.StratifiedKFold(n_splits=10, shuffle=True)
    scaler = preprocessing.StandardScaler()
    resample = over_sampling.RandomOverSampler()

    baseline = pipeline.make_pipeline(
        scaler, resample,
        dummy.DummyClassifier(strategy='constant', constant='good'))

    logreg = pipeline.make_pipeline(
        scaler,
        resample,
        linear_model.LogisticRegression(solver='lbfgs', multi_class='ovr'),
    )

    dtree = pipeline.make_pipeline(
        scaler,
        resample,
        tree.DecisionTreeClassifier(),
    )

    knn = pipeline.make_pipeline(
        scaler,
        resample,
        neighbors.KNeighborsClassifier(),
    )

    mlp = pipeline.make_pipeline(
        scaler,
        resample,
        neural_network.MLPClassifier(hidden_layer_sizes=(
            100,
            100,
            100,
        ),
                                     activation='relu',
                                     solver='adam'),
    )

    svc = pipeline.make_pipeline(
        scaler,
        resample,
        svm.LinearSVC(),
    )

    RForest = pipeline.make_pipeline(
        scaler,
        resample,
        ensemble.RandomForestClassifier(n_estimators=100),
    )

    models = (
        ('Constant', baseline),
        ('Logistic Regression', logreg),
        ('Decision Tree', dtree),
        #('kNN', knn),
        ('Multi-Layer Perceptron', mlp),
        ('linearSVM', svc),
        ('Random Forest', RForest),
    )

    # Special case of baseline
    filename = 'baseline-link-overall'
    df = prepare_data()
    y, y_pred = df['class'].ravel(), df['class_overall'].ravel()

    acc = metrics.accuracy_score(y, y_pred)
    prec = metrics.precision_score(y,
                                   y_pred,
                                   average='weighted',
                                   labels=labels)
    recall = metrics.recall_score(y, y_pred, average='weighted', labels=labels)

    cm = metrics.confusion_matrix(y, y_pred, labels=labels)
    cm = norm_cm(cm)

    cm = pd.DataFrame(cm, index=labels, columns=labels)

    fig, ax = plt.subplots(dpi=92)
    sns.heatmap(cm,
                vmin=0,
                vmax=1,
                annot=True,
                fmt='.2f',
                cmap='Greys',
                ax=ax,
                cbar=False,
                square=True)
    ax.set_title(
        f'accuracy = {acc:.3f}\n(prec = {prec:.3f}, rec = {recall:.3f})')
    format_axes_for_cm(ax)

    fig.tight_layout()

    ensure_dir('./output/models/')
    fig.savefig(f'./output/models/{filename}.pdf', dpi=92, bbox_inches='tight')
    plt.close(fig)
    print(f'Done {filename}')

    for name, pipe in models:
        filename = name.lower().replace(' ', '_')

        y, y_pred = different_models(pipe)

        acc = metrics.accuracy_score(y, y_pred)
        #prec = metrics.precision_score(y, y_pred, average='weighted', labels=labels)
        #recall = metrics.recall_score(y, y_pred, average='weighted', labels=labels)
        print(name)
        print(metrics.classification_report(y, y_pred, labels=labels))

        cm = metrics.confusion_matrix(y, y_pred, labels=labels)
        cm = norm_cm(cm)

        cm = pd.DataFrame(cm, index=labels, columns=labels)

        fig, ax = plt.subplots(dpi=92)
        sns.heatmap(cm,
                    vmin=0,
                    vmax=1,
                    annot=True,
                    fmt='.2f',
                    cmap='Greys',
                    ax=ax,
                    cbar=False,
                    square=True)
        ax.set_title(f'accuracy={acc:.3f}')
        format_axes_for_cm(ax)

        fig.tight_layout()

        ensure_dir('./output/models/')
        fig.savefig(f'./output/models/{filename}.pdf',
                    dpi=92,
                    bbox_inches='tight')
        plt.close(fig)
Beispiel #29
0
def main():
    mpl.style.use(['seaborn-white', 'seaborn-paper', 'grayscale'])
    latexify(columns=2)

    #cv = model_selection.StratifiedKFold(n_splits=10, shuffle=True)
    #poly = preprocessing.PolynomialFeatures(degree=2)
    scaler = preprocessing.StandardScaler()
    resample = over_sampling.RandomOverSampler()

    baseline = pipeline.make_pipeline(
        scaler, resample, dummy.DummyClassifier(strategy='constant',
                                                constant=0))

    logreg = pipeline.make_pipeline(
        scaler,
        resample,
        linear_model.LogisticRegression(),
    )

    sgd = pipeline.make_pipeline(
        scaler,
        resample,
        linear_model.SGDClassifier(),
    )

    dtree = pipeline.make_pipeline(
        scaler,
        resample,
        tree.DecisionTreeClassifier(),
    )

    mlp = pipeline.make_pipeline(scaler, resample,
                                 neural_network.MLPClassifier())

    svc = pipeline.make_pipeline(scaler, resample, svm.LinearSVC())

    RForest = pipeline.make_pipeline(scaler, resample,
                                     ensemble.RandomForestClassifier())

    models = (
        ('Constant', baseline),
        ('Logistic Reg.', logreg),
        ('Decision Tree', dtree),
        #('kNN', knn),
        ('Multi-Layer Perceptron', mlp),
        ('SVM (linear kernel)', svc),
        ('Random Forest', RForest),
    )

    colors = sns.color_palette("cubehelix", len(models))

    fig, ax = plt.subplots(dpi=92)  # Setup a figure

    #ax.set_title('Precision-Recall curve')

    #ax.set_xlim(0, 1)
    #ax.set_ylim(0, 1)

    ax.set_xlabel('Recall = $\\frac{{TP}}{{TP+FN}}$')
    ax.set_ylabel('Precision = $\\frac{{TP}}{{TP+FP}}$')

    # Prepare data for processing
    data = prepare_data()
    X, y = data[['rssi', 'rssi_avg', 'rssi_std']].values, data['class'].ravel()
    Y = preprocessing.label_binarize(y, classes=classes)
    X_train, X_test, y_train, y_test = model_selection.train_test_split(
        X, Y, test_size=0.2, random_state=random_state)

    for (name, model), color in zip(models, colors):
        classifier = multiclass.OneVsRestClassifier(
            model)  # Make model support *.decision_function

        classifier.fit(X_train, y_train)

        # generate y_score
        if hasattr(classifier, 'decision_function'):
            y_score = classifier.decision_function(X_test)
        else:
            y_score = classifier.predict_proba(X_test)
            #continue

        # generate probabilities
        #y_proba = classifier.predict_proba(X_test)

        # generate predictions
        y_pred = classifier.predict(X_test)

        precision = dict()
        recall = dict()
        average_precision = dict()

        acc = metrics.accuracy_score(y_test, y_pred)

        for i in [1]:  # We observe only intermediate class
            precision[i], recall[i], _ = metrics.precision_recall_curve(
                y_test[:, i], y_score[:, i])
            average_precision[i] = metrics.average_precision_score(
                y_test[:, i], y_score[:, i])

            ax.step(recall[i],
                    precision[i],
                    where='post',
                    color=color,
                    alpha=0.65,
                    label=f'{name}')

        print(f'Plotted {name}')

    ax.legend(loc="best")
    format_axes_for_chart(ax)
    fig.tight_layout()

    ensure_dir('./output/')
    fig.savefig('./output/precision-recall-curve.pdf',
                dpi=92,
                bbox_inches='tight')
    #plt.show()
    plt.close(fig)