예제 #1
0
def make_app(env="dev"):
    
    DEBUG = True #= env == "dev"
    
    url_root = {
        "dev": "/",
        "prod": "/dist/",
        "test": "/dist/"
    }[env]
    
    app_home = os.path.dirname(__file__)
    
    cfg = {
        "dev": dict(
            static_url_path="",
            template_folder="..",
            static_folder=opa(opj(app_home, ".."))
            ),
        "prod": dict(
            static_url_path="/dist",
            template_folder="..",
            static_folder=opa(opj(app_home, "..", "dist"))
            ),
        "test": dict(
            static_url_path="/dist",
            template_folder="../dist",
            static_folder=opa(opj(app_home, "..", "dist"))
            )
    }[env]
    
    app = Flask(__name__, **cfg)
    app.config['CSRF_ENABLED'] = DEBUG
    app.config['SECRET_KEY'] = "totally-insecure"
    app.config['DEBUG'] = DEBUG

    @app.route(url_root)
    def home():
        
        kwargs = {}
        if env != "test":
            kwargs.update(assets())
        
        return render_template("index.html", env=env, **kwargs)

    @app.route(url_root + "frame/")
    def frame():
        kwargs = {}
        if env != "test":
            kwargs.update(assets("../frame/"))
            
        return render_template("frame/index.html", env=env, **kwargs)
        
    return app
예제 #2
0
def rust():
    tDir = opa(os.sep, "tmp", id_generator)
    mkdir(tDir)
    with cd(tDir):
        s="install_rust"
        sudo("curl https://sh.rustup.rs -sSf > {}".format(s))
        sudo("chmod u+x {}".format(s))
        sudo("./{} --no-modify-path -y".format(s))
    rmf(tDir)
예제 #3
0
def arc():
    """This step won't be needed after Ubuntu 16.04
    """
    link = "http://download.opensuse.org/repositories/home:Horst3180/xUbuntu_16.04/"
    tDir = opa(os.sep, "tmp", id_generator)
    mkdir(tDir)
    with cd(tDir):
        wget("{}Release.key".format(link))
        sudo("apt-key add - < Release.key")
        sudo("rm Release.key")
        add_repo("'deb {} /'".format(link))
    rmf(tDir)
예제 #4
0
        def do_POST(self):
            content_length = int(self.headers['Content-Length'])
            post_data = json.loads(self.rfile.read(content_length))

            mlog(fnc="do_POST()", msg="POST req data: Last request - {}, Last quality - {}, Rebuffer Time - {}".format(
                post_data['lastRequest'], post_data['lastquality'], float(post_data['RebufferTime'] - self.input_dict['last_total_rebuf'])))
            send_data = ""

            if ( 'lastquality' in post_data ):
                rebuffer_time = float(post_data['RebufferTime'] -self.input_dict['last_total_rebuf'])
                reward = \
                   VIDEO_BIT_RATE[post_data['lastquality']] / M_IN_K \
                   - REBUF_PENALTY * (post_data['RebufferTime'] - self.input_dict['last_total_rebuf']) / M_IN_K \
                   - SMOOTH_PENALTY * np.abs(VIDEO_BIT_RATE[post_data['lastquality']] -
                                                  self.input_dict['last_bit_rate']) / M_IN_K
                # reward = BITRATE_REWARD[post_data['lastquality']] \
                #         - 8 * rebuffer_time / M_IN_K - np.abs(BITRATE_REWARD[post_data['lastquality']] - BITRATE_REWARD_MAP[self.input_dict['last_bit_rate']])

                video_chunk_fetch_time = post_data['lastChunkFinishTime'] - post_data['lastChunkStartTime']
                video_chunk_size = post_data['lastChunkSize']
                
                # log wall_time, bit_rate, buffer_size, rebuffer_time, video_chunk_size, download_time, reward
                self.log_file.write(str(time.time()) + '\t' +
                                    str(VIDEO_BIT_RATE[post_data['lastquality']]) + '\t' +
                                    str(post_data['buffer']) + '\t' +
                                    str(float(post_data['RebufferTime'] - self.input_dict['last_total_rebuf']) / M_IN_K) + '\t' +
                                    str(video_chunk_size) + '\t' +
                                    str(video_chunk_fetch_time) + '\t' +
                                    str(reward) + '\n')
                self.log_file.flush()

                self.input_dict['last_total_rebuf'] = post_data['RebufferTime']
                self.input_dict['last_bit_rate'] = VIDEO_BIT_RATE[post_data['lastquality']]

                if ( post_data['lastRequest'] == TOTAL_VIDEO_CHUNKS ):
                    send_data = "REFRESH"
                    self.input_dict['last_total_rebuf'] = 0
                    self.input_dict['last_bit_rate'] = DEFAULT_QUALITY
                    self.log_file.write('\n')  # so that in the log we know where video ends
                    lock_path = "./locks/video_" + opb(self.input_dict['log_file_path']) + ".lock"
                    with open(lock_path, "w") as fp:
                        fp.close()
                    mlog(fnc="do_POST()", msg="Created lock file: {}".format(opa(lock_path)))

            self.send_response(200)
            self.send_header('Content-Type', 'text/plain')
            self.send_header('Content-Length', len(send_data))
            self.send_header('Access-Control-Allow-Origin', "*")
            self.end_headers()
            self.wfile.write(send_data)
            if len(send_data) > 0:
                mlog(fnc="do_POST()", msg="Response to POST req: {}".format(send_data))
예제 #5
0
def install_desktop():
    #Make usefull directories & links
    for path in ["local", "Projets", "Sandbox"]:
        mkdir(opa(os.environ["HOME"], path))
    #ln -s ${HOME}/dotfiles/bin ${HOME}

    #install ssh
    """if [ -d ${HOME}/dotfiles/private/ssh ]
    then
        chmod -R 700 ${HOME}/dotfiles/private/ssh
        chmod 600 ${HOME}/dotfiles/private/ssh/*
        mkdir -p ${HOME}/.ssh
        cp ${HOME}/dotfiles/private/ssh/id_rs* ${HOME}/.ssh
    fi
    """

    add_repositories()
    #Make sure everything is up to date before messing with stuff
    apt(action="update")
    apt(action="upgrade")
    apt_install_pkgs(APT_PKGS + APT_REPOS)

    #modulefiles
    #"lmod", #à déplacer
    #ln -s ${HOME}/dotfiles/modulefiles ${HOME}/local

    #Slack
    sudo apt install -y ${HOME}"/dotfiles/private/src/slack-desktop-2.6.6-amd64.deb"

    #Freesurfer
    #sudo apt install -y tcsh

    #The End
    apt(action="update")
    apt(action="upgrade")
    apt(action="autoremove")
예제 #6
0
"""
    #
    if args.useplot:
        lg.info(" * Adding plotting imports...")
        sc = sc.replace("PLOT_IMPORTS", plot_imports)
    else:
        sc = sc.replace("PLOT_IMPORTS", "")

    lg.info(" *")


    ## The classes folder name.
    class_folder_name = args.classdir

    ## The classes folder path.
    class_folder_path = opa(opj(output_path, class_folder_name))
    #
    # Does the class subfolder already exist?
    if not os.path.isdir(class_folder_path):
        lg.info(" * Making class folder path '%s'" % (class_folder_path))
        os.mkdir(class_folder_path)
    else:
        lg.info(" * Class folder '%s' already exists." % (class_folder_name))
    lg.info(" *")

    ## The __init__.py file path.
    init_file_path = opj(class_folder_path, "__init__.py")
    #
    if not ope(init_file_path):
        with open(init_file_path, "w") as f:
            f.write("")
예제 #7
0
        def do_POST(self):
            content_length = int(self.headers['Content-Length'])
            post_data = json.loads(self.rfile.read(content_length))

            mlog(
                fnc="do_POST()",
                msg=
                "POST req data: Last request - {}, Last quality - {}, Rebuffer Time - {}"
                .format(
                    post_data['lastRequest'], post_data['lastquality'],
                    float(post_data['RebufferTime'] -
                          self.input_dict['last_total_rebuf'])))
            send_data = ""

            if ('pastThroughput' in post_data):
                # @Hongzi: this is just the summary of throughput/quality at the end of the load
                # so we don't want to use this information to send back a new quality
                mlog(fnc="do_POST()",
                     msg="Past throughput is present in post_data, \
                        not using this information to send back quality")
            else:
                # option 1. reward for just quality
                # reward = post_data['lastquality']
                # option 2. combine reward for quality and rebuffer time
                #           tune up the knob on rebuf to prevent it more
                # reward = post_data['lastquality'] - 0.1 * (post_data['RebufferTime'] - self.input_dict['last_total_rebuf'])
                # option 3. give a fixed penalty if video is stalled
                #           this can reduce the variance in reward signal
                # reward = post_data['lastquality'] - 10 * ((post_data['RebufferTime'] - self.input_dict['last_total_rebuf']) > 0)

                # option 4. use the metric in SIGCOMM MPC paper
                rebuffer_time = float(post_data['RebufferTime'] -
                                      self.input_dict['last_total_rebuf'])

                # --linear reward--
                reward = VIDEO_BIT_RATE[post_data['lastquality']] / M_IN_K \
                        - REBUF_PENALTY * rebuffer_time / M_IN_K \
                        - SMOOTH_PENALTY * np.abs(VIDEO_BIT_RATE[post_data['lastquality']] -
                                                  self.input_dict['last_bit_rate']) / M_IN_K

                # --log reward--
                # log_bit_rate = np.log(VIDEO_BIT_RATE[post_data['lastquality']] / float(VIDEO_BIT_RATE[0]))
                # log_last_bit_rate = np.log(self.input_dict['last_bit_rate'] / float(VIDEO_BIT_RATE[0]))

                # reward = log_bit_rate \
                #          - 4.3 * rebuffer_time / M_IN_K \
                #          - SMOOTH_PENALTY * np.abs(log_bit_rate - log_last_bit_rate)

                # --hd reward--
                # reward = BITRATE_REWARD[post_data['lastquality']] \
                #         - 8 * rebuffer_time / M_IN_K - np.abs(BITRATE_REWARD[post_data['lastquality']] - BITRATE_REWARD_MAP[self.input_dict['last_bit_rate']])

                self.input_dict['last_bit_rate'] = VIDEO_BIT_RATE[
                    post_data['lastquality']]
                self.input_dict['last_total_rebuf'] = post_data['RebufferTime']

                # retrieve previous state
                if len(self.s_batch) == 0:
                    state = [np.zeros((S_INFO, S_LEN))]
                else:
                    state = np.array(self.s_batch[-1], copy=True)

                # compute bandwidth measurement
                video_chunk_fetch_time = post_data[
                    'lastChunkFinishTime'] - post_data['lastChunkStartTime']
                video_chunk_size = post_data['lastChunkSize']

                # compute number of video chunks left
                video_chunk_remain = TOTAL_VIDEO_CHUNKS - self.input_dict[
                    'video_chunk_coount']
                self.input_dict['video_chunk_coount'] += 1

                # dequeue history record
                state = np.roll(state, -1, axis=1)

                # this should be S_INFO number of terms
                try:
                    state[
                        0,
                        -1] = VIDEO_BIT_RATE[post_data['lastquality']] / float(
                            np.max(VIDEO_BIT_RATE))
                    state[1, -1] = post_data['buffer'] / BUFFER_NORM_FACTOR
                    state[2, -1] = rebuffer_time / M_IN_K
                    state[3, -1] = float(video_chunk_size) / float(
                        video_chunk_fetch_time) / M_IN_K  # kilo byte / ms
                    state[4, -1] = np.minimum(video_chunk_remain,
                                              CHUNK_TIL_VIDEO_END_CAP) / float(
                                                  CHUNK_TIL_VIDEO_END_CAP)
                except ZeroDivisionError:
                    # this should occur VERY rarely (1 out of 3000), should be a dash issue
                    # in this case we ignore the observation and roll back to an eariler one
                    if len(self.s_batch) == 0:
                        state = [np.zeros((S_INFO, S_LEN))]
                    else:
                        state = np.array(self.s_batch[-1], copy=True)

                # log wall_time, bit_rate, buffer_size, rebuffer_time, video_chunk_size, download_time, reward
                self.log_file.write(
                    str(time.time()) + '\t' +
                    str(VIDEO_BIT_RATE[post_data['lastquality']]) + '\t' +
                    str(post_data['buffer']) + '\t' +
                    str(rebuffer_time / M_IN_K) + '\t' +
                    str(video_chunk_size) + '\t' +
                    str(video_chunk_fetch_time) + '\t' + str(reward) + '\n')
                self.log_file.flush()

                # pick bitrate according to MPC
                # first get harmonic mean of last 5 bandwidths
                past_bandwidths = state[3, -5:]
                while past_bandwidths[0] == 0.0:
                    past_bandwidths = past_bandwidths[1:]
                #if ( len(state) < 5 ):
                #    past_bandwidths = state[3,-len(state):]
                #else:
                #    past_bandwidths = state[3,-5:]
                bandwidth_sum = 0
                for past_val in past_bandwidths:
                    bandwidth_sum += (1 / float(past_val))
                future_bandwidth = 1.0 / (bandwidth_sum / len(past_bandwidths))

                # future chunks length (try 4 if that many remaining)
                last_index = int(post_data['lastRequest'])
                future_chunk_length = MPC_FUTURE_CHUNK_COUNT
                if (TOTAL_VIDEO_CHUNKS - last_index < 4):
                    future_chunk_length = TOTAL_VIDEO_CHUNKS - last_index

                # all possible combinations of 5 chunk bitrates (9^5 options)
                # iterate over list and for each, compute reward and store max reward combination
                max_reward = -100000000
                best_combo = ()
                start_buffer = float(post_data['buffer'])
                #start = time.time()
                for full_combo in CHUNK_COMBO_OPTIONS:
                    combo = full_combo[0:future_chunk_length]
                    # calculate total rebuffer time for this combination (start with start_buffer and subtract
                    # each download time and add 2 seconds in that order)
                    curr_rebuffer_time = 0
                    curr_buffer = start_buffer
                    bitrate_sum = 0
                    smoothness_diffs = 0
                    last_quality = int(post_data['lastquality'])
                    for position in range(0, len(combo)):
                        chunk_quality = combo[position]
                        index = last_index + position + 1  # e.g., if last chunk is 3, then first iter is 3+0+1=4
                        download_time = (
                            get_chunk_size(chunk_quality, index) / 1000000.
                        ) / future_bandwidth  # this is MB/MB/s --> seconds
                        if (curr_buffer < download_time):
                            curr_rebuffer_time += (download_time - curr_buffer)
                            curr_buffer = 0
                        else:
                            curr_buffer -= download_time
                        curr_buffer += 4

                        # linear reward
                        #bitrate_sum += VIDEO_BIT_RATE[chunk_quality]
                        #smoothness_diffs += abs(VIDEO_BIT_RATE[chunk_quality] - VIDEO_BIT_RATE[last_quality])

                        # log reward
                        # log_bit_rate = np.log(VIDEO_BIT_RATE[chunk_quality] / float(VIDEO_BIT_RATE[0]))
                        # log_last_bit_rate = np.log(VIDEO_BIT_RATE[last_quality] / float(VIDEO_BIT_RATE[0]))
                        # bitrate_sum += log_bit_rate
                        # smoothness_diffs += abs(log_bit_rate - log_last_bit_rate)

                        # hd reward
                        bitrate_sum += BITRATE_REWARD[chunk_quality]
                        smoothness_diffs += abs(BITRATE_REWARD[chunk_quality] -
                                                BITRATE_REWARD[last_quality])

                        last_quality = chunk_quality
                    # compute reward for this combination (one reward per 5-chunk combo)
                    # bitrates are in Mbits/s, rebuffer in seconds, and smoothness_diffs in Mbits/s

                    # linear reward
                    #reward = (bitrate_sum/1000.) - (4.3*curr_rebuffer_time) - (smoothness_diffs/1000.)

                    # log reward
                    # reward = (bitrate_sum) - (4.3*curr_rebuffer_time) - (smoothness_diffs)

                    # hd reward
                    reward = bitrate_sum - (8 * curr_rebuffer_time) - (
                        smoothness_diffs)

                    if (reward > max_reward):
                        max_reward = reward
                        best_combo = combo
                # send data to html side (first chunk of best combo)
                send_data = 0  # no combo had reward better than -1000000 (ERROR) so send 0
                if (best_combo != ()):  # some combo was good
                    send_data = str(best_combo[0])

                end = time.time()
                #print "TOOK: " + str(end-start)

                end_of_video = False
                if (post_data['lastRequest'] == TOTAL_VIDEO_CHUNKS):
                    send_data = "REFRESH"
                    end_of_video = True
                    self.input_dict['last_total_rebuf'] = 0
                    self.input_dict['last_bit_rate'] = DEFAULT_QUALITY
                    self.input_dict['video_chunk_coount'] = 0
                    self.log_file.write(
                        '\n')  # so that in the log we know where video ends
                    lock_path = "./locks/video_" + opb(
                        self.input_dict['log_file_path']) + ".lock"
                    with open(lock_path, "w") as fp:
                        fp.close()
                    mlog(fnc="do_POST()",
                         msg="Created lock file: {}".format(opa(lock_path)))

                self.send_response(200)
                self.send_header('Content-Type', 'text/plain')
                self.send_header('Content-Length', len(send_data))
                self.send_header('Access-Control-Allow-Origin', "*")
                self.end_headers()
                self.wfile.write(send_data)
                if len(send_data) > 0:
                    mlog(fnc="do_POST()",
                         msg="Response to POST req: {}".format(send_data))

                # record [state, action, reward]
                # put it here after training, notice there is a shift in reward storage

                if end_of_video:
                    self.s_batch = [np.zeros((S_INFO, S_LEN))]
                else:
                    self.s_batch.append(state)
예제 #8
0
# -*- coding: utf-8 -*-

import sys
import numpy as np

from os.path import dirname as opd
from os.path import abspath as opa
TEST_PATH = opa(opd(opd(__file__)))
PRJ_PATH = opd(TEST_PATH)
sys.path.insert(0, PRJ_PATH)

from pycontour.transform import smooth_cnt
from pycontour.img import build_cnt_mask


def test_smooth_cnt():
    cnt1 = np.array([[300, 400, 450, 400, 300, 200, 0, 50, 100, 200],
                     [100, 100, 200, 300, 400, 500, 500, 400, 300, 200]])
    # img1 = build_cnt_mask(cnt1)
    smooth_cnt1 = smooth_cnt(cnt1, sigma=10)
    smooth_img1 = build_cnt_mask(smooth_cnt1)
    import matplotlib.pyplot as plt
    plt.imshow(smooth_img1)
예제 #9
0
        def do_POST(self):
            content_length = int(self.headers['Content-Length'])
            post_data = json.loads(self.rfile.read(content_length))
            
            mlog(fnc="do_POST()", msg="POST req data: Last request - {}, Last quality - {}, Rebuffer Time - {}".format(
                post_data['lastRequest'], post_data['lastquality'], float(post_data['RebufferTime'] - self.input_dict['last_total_rebuf'])))
            send_data = ""

            if ( 'pastThroughput' in post_data ):
                # @Hongzi: this is just the summary of throughput/quality at the end of the load
                # so we don't want to use this information to send back a new quality
                mlog(fnc="do_POST()", msg="Past throughput is present in post_data, \
                        not using this information to send back quality")
            else:
                # option 1. reward for just quality
                # reward = post_data['lastquality']
                # option 2. combine reward for quality and rebuffer time
                #           tune up the knob on rebuf to prevent it more
                # reward = post_data['lastquality'] - 0.1 * (post_data['RebufferTime'] - self.input_dict['last_total_rebuf'])
                # option 3. give a fixed penalty if video is stalled
                #           this can reduce the variance in reward signal
                # reward = post_data['lastquality'] - 10 * ((post_data['RebufferTime'] - self.input_dict['last_total_rebuf']) > 0)

                # option 4. use the metric in SIGCOMM MPC paper
                rebuffer_time = float(post_data['RebufferTime'] -self.input_dict['last_total_rebuf'])

                # --linear reward--
                reward = VIDEO_BIT_RATE[post_data['lastquality']] / M_IN_K \
                        - REBUF_PENALTY * rebuffer_time / M_IN_K \
                        - SMOOTH_PENALTY * np.abs(VIDEO_BIT_RATE[post_data['lastquality']] -
                                                  self.input_dict['last_bit_rate']) / M_IN_K

                # --log reward--
                # log_bit_rate = np.log(VIDEO_BIT_RATE[post_data['lastquality']] / float(VIDEO_BIT_RATE[0]))   
                # log_last_bit_rate = np.log(self.input_dict['last_bit_rate'] / float(VIDEO_BIT_RATE[0]))

                # reward = log_bit_rate \
                #          - 4.3 * rebuffer_time / M_IN_K \
                #          - SMOOTH_PENALTY * np.abs(log_bit_rate - log_last_bit_rate)

                # --hd reward--
                # reward = BITRATE_REWARD[post_data['lastquality']] \
                #         - 8 * rebuffer_time / M_IN_K - np.abs(BITRATE_REWARD[post_data['lastquality']] - BITRATE_REWARD_MAP[self.input_dict['last_bit_rate']])

                self.input_dict['last_bit_rate'] = VIDEO_BIT_RATE[post_data['lastquality']]
                self.input_dict['last_total_rebuf'] = post_data['RebufferTime']

                # retrieve previous state
                if len(self.s_batch) == 0:
                    state = [np.zeros((S_INFO, S_LEN))]
                else:
                    state = np.array(self.s_batch[-1], copy=True)

                # compute bandwidth measurement
                video_chunk_fetch_time = post_data['lastChunkFinishTime'] - post_data['lastChunkStartTime']
                video_chunk_size = post_data['lastChunkSize']

                # compute number of video chunks left
                video_chunk_remain = TOTAL_VIDEO_CHUNKS - self.input_dict['video_chunk_coount']
                self.input_dict['video_chunk_coount'] += 1

                # dequeue history record
                state = np.roll(state, -1, axis=1)

                next_video_chunk_sizes = []
                for i in xrange(A_DIM):
                    next_video_chunk_sizes.append(get_chunk_size(i, self.input_dict['video_chunk_coount']))

                # this should be S_INFO number of terms
                try:
                    state[0, -1] = VIDEO_BIT_RATE[post_data['lastquality']] / float(np.max(VIDEO_BIT_RATE))
                    state[1, -1] = post_data['buffer'] / BUFFER_NORM_FACTOR
                    state[2, -1] = float(video_chunk_size) / float(video_chunk_fetch_time) / M_IN_K  # kilo byte / ms
                    state[3, -1] = float(video_chunk_fetch_time) / M_IN_K / BUFFER_NORM_FACTOR  # 10 sec
                    state[4, :A_DIM] = np.array(next_video_chunk_sizes) / M_IN_K / M_IN_K  # mega byte
                    state[5, -1] = np.minimum(video_chunk_remain, CHUNK_TIL_VIDEO_END_CAP) / float(CHUNK_TIL_VIDEO_END_CAP)
                except ZeroDivisionError:
                    # this should occur VERY rarely (1 out of 3000), should be a dash issue
                    # in this case we ignore the observation and roll back to an eariler one
                    if len(self.s_batch) == 0:
                        state = [np.zeros((S_INFO, S_LEN))]
                    else:
                        state = np.array(self.s_batch[-1], copy=True)

                # log wall_time, bit_rate, buffer_size, rebuffer_time, video_chunk_size, download_time, reward
                self.log_file.write(str(time.time()) + '\t' +
                                    str(VIDEO_BIT_RATE[post_data['lastquality']]) + '\t' +
                                    str(post_data['buffer']) + '\t' +
                                    str(rebuffer_time / M_IN_K) + '\t' +
                                    str(video_chunk_size) + '\t' +
                                    str(video_chunk_fetch_time) + '\t' +
                                    str(reward) + '\n')
                self.log_file.flush()

                action_prob = self.actor.predict(np.reshape(state, (1, S_INFO, S_LEN)))
                action_cumsum = np.cumsum(action_prob)
                bit_rate = (action_cumsum > np.random.randint(1, RAND_RANGE) / float(RAND_RANGE)).argmax()
                # Note: we need to discretize the probability into 1/RAND_RANGE steps,
                # because there is an intrinsic discrepancy in passing single state and batch states

                # send data to html side
                send_data = str(bit_rate)

                end_of_video = False
                if ( post_data['lastRequest'] == TOTAL_VIDEO_CHUNKS ):
                    send_data = "REFRESH"
                    end_of_video = True
                    self.input_dict['last_total_rebuf'] = 0
                    self.input_dict['last_bit_rate'] = DEFAULT_QUALITY
                    self.input_dict['video_chunk_coount'] = 0
                    self.log_file.write('\n')  # so that in the log we know where video ends
                    lock_path = "./locks/video_" + opb(self.input_dict['log_file_path']) + ".lock"
                    with open(lock_path, "w") as fp:
                        fp.close()
                    mlog(fnc="do_POST()", msg="Created lock file: {}".format(opa(lock_path)))

                self.send_response(200)
                self.send_header('Content-Type', 'text/plain')
                self.send_header('Content-Length', len(send_data))
                self.send_header('Access-Control-Allow-Origin', "*")
                self.end_headers()
                self.wfile.write(send_data)
                if len(send_data) > 0:
                    mlog(fnc="do_POST()", msg="Response to POST req: {}".format(send_data))

                # record [state, action, reward]
                # put it here after training, notice there is a shift in reward storage

                if end_of_video:
                    self.s_batch = [np.zeros((S_INFO, S_LEN))]
                else:
                    self.s_batch.append(state)
예제 #10
0
def create_output(filename, atlas, voxelThresh=2, clusterExtend=5,
                  probabilityThreshold=5):
    """
    Generates output table containing each clusters' number of voxels,
    average activation across voxels, peak voxel coordinates, and
    neuroanatomical location of the peak voxel based on the specified atlas.

    In addition, separate stat maps are created for each cluster. In each
    image, cross hairs are located on the cluster's peak voxel.

    Parameters
    ----------
    filename : str
        The full or relative path to the statistical map to use
    atlas : str
        Atlas name to use
    voxelThresh : int
        Value threshold for voxels to be considered in cluster extraction
    clusterExtend : int
        Required number of contiguous voxels for a cluster to be retained for
        analysis
    probabilityThreshold : int
        Probability threshold for when using a probabilistic atlas

    Returns
    ------
    None
    """
    fname = opa(filename)

    # Get data from NIfTI file
    img = nb.load(fname)
    imgdata = img.get_data()
    if len(imgdata.shape) != 3:
        imgdata = imgdata[:, :, :, 0]

    # Get top x-% of voxels if voxelThresh is negative
    if voxelThresh < 0:
        voxelThresh = np.percentile(
            np.abs(imgdata[imgdata != 0]), (100 + voxelThresh))

    # Get clusters from data
    clusters, nclusters = get_clusters(
        abs(imgdata) > voxelThresh, min_extent=clusterExtend)

    # Clean img data
    imgdata[clusters == 0] = 0
    new_image = nb.Nifti1Image(imgdata, img.affine, img.header)

    # Plot Glass Brain
    color_max = np.array([imgdata.min(), imgdata.max()])
    color_max = np.abs(np.min(color_max[color_max != 0]))
    try:
        plot_glass_brain(new_image, vmax=color_max,
                         threshold='auto', display_mode='lyrz', black_bg=True,
                         plot_abs=False, colorbar=True,
                         output_file='%s_glass.png' % fname[:-7])
    except ValueError:
        plot_glass_brain(new_image, vmax=color_max,
                         threshold='auto', black_bg=True,
                         plot_abs=False, colorbar=True,
                         output_file='%s_glass.png' % fname[:-7])

    # Get coordinates of peaks
    coords = get_peak_coords(clusters, img.affine, np.abs(imgdata))

    # Get Peak and Cluster information
    peak_summary = []
    peak_value = []
    cluster_summary = []
    cluster_mean = []
    volume_summary = []

    for c in coords:
        peakinfo = get_peak_info(
            c, atlastype=atlas, probThresh=probabilityThreshold)
        peak_summary.append([p[1] if type(p[1]) != list else '; '.join(
            ['% '.join(e) for e in np.array(p[1])]) for p in peakinfo])
        voxID = get_vox_coord(img.affine, c)
        peak_value.append(imgdata[voxID[0], voxID[1], voxID[2]])

        idx = get_vox_coord(img.affine, c)
        clusterID = clusters[idx[0], idx[1], idx[2]]
        clusterinfo = get_cluster_info(
            clusters == clusterID,
            img.affine,
            atlastype=atlas,
            probThresh=probabilityThreshold)
        cluster_summary.append(['; '.join(
            ['% '.join([str(round(e[0], 2)), e[1]]) for e in c[1]]) for c in
             clusterinfo])
        cluster_mean.append(imgdata[clusters == clusterID].mean())

        voxel_volume = int(img.header['pixdim'][1:4].prod())
        volume_summary.append(np.sum(clusters == clusterID) * voxel_volume)

    # Write output file
    header = [p[0] for p in peakinfo]
    with open('%s.csv' % fname[:-7], 'w') as f:
        f.writelines(','.join(
            ['ClusterID', 'Peak_Location', 'Cluster_Mean', 'Volume'] + header)
            + '\n')

        for i, c in enumerate(cluster_summary):
            f.writelines(
                ','.join(['Cluster%.02d' % (i + 1), '_'.join(
                    [str(xyz) for xyz in coords[i]]), str(cluster_mean[i]),
                     str(volume_summary[i])] + c) + '\n')

        f.writelines('\n')

        f.writelines(
            ','.join(['PeakID', 'Peak_Location', 'Peak_Value', 'Volume'] +
                     header) + '\n')

        for i, p in enumerate(peak_summary):
            f.writelines(
                ','.join(['Peak%.02d' % (i + 1), '_'.join(
                    [str(xyz) for xyz in coords[i]]), str(peak_value[i]),
                     str(volume_summary[i])] + p) + '\n')

    # Plot Clusters
    bgimg = nb.load('templates/MNI152_T1_1mm_brain.nii.gz')
    for idx, coord in enumerate(coords):
        outfile = 'cluster%02d' % (idx + 1)
        try:
            plot_stat_map(new_image, bg_img=bgimg, cut_coords=coord,
                          display_mode='ortho', colorbar=True, title=outfile,
                          threshold=voxelThresh, draw_cross=True,
                          black_bg=True, symmetric_cbar=True, vmax=color_max,
                          output_file='%s%s.png' % (fname[:-7], outfile))
        except ValueError:
            plot_stat_map(new_image, vmax=color_max,
                          colorbar=True, title=outfile, threshold=voxelThresh,
                          draw_cross=True, black_bg=True, symmetric_cbar=True,
                          output_file='%s%s.png' % (fname[:-7], outfile))
예제 #11
0
파일: fragile.py 프로젝트: tonyfast/fragile
def make_app(env="dev"):
    """
    This is still needlessly complicated.

    Returns a Flask WSGI instance.
    """
    debug = env == "dev"

    url_root = dict(
        dev="/",
        build="/dist/",
        test="/dist/",
        prod="/"
    )[env]

    app_home = os.path.dirname(__file__)

    cfg = dict(
        dev=dict(
            static_url_path="/static",
            template_folder="./templates",
            static_folder=opa(opj(app_home, "static"))
        ),
        build=dict(
            static_url_path="/",
            template_folder="./templates",
            static_folder=opa(opj(app_home, "static"))
        ),
        test=dict(
            static_url_path="/dist",
            template_folder="../dist",
            static_folder=opa(opj(app_home, "..", "dist"))
        ),
        prod=dict(
            static_url_path="/static",
            template_folder="./templates",
            static_folder=opa(opj(app_home, "static"))
        )
    )[env]

    app = Flask(__name__, **cfg)

    app.config.update(dict(
        CSRF_ENABLED=debug,
        SECRET_KEY=os.environ.get("FLASK_SECRET_KEY", "totally-insecure"),
        DEBUG=debug,
        ASSETS_DEBUG=debug,
        BOOTSTRAP_JQUERY_VERSION=None
    ))

    Bootstrap(app)

    def font_stuff(url):
        """
        Some font URL rewriting
        """
        repl = "./lib/awesome/font/"
        if env == "build":
            repl = "./font/"
        return url.replace("../font/", repl)

    fix_font_css = CSSRewrite(replace=font_stuff)

    assets = Environment(app)

    bundles = YAMLLoader(os.path.join(app_home, 'assets.yaml')).load_bundles()

    for to_fix in ["prod", "build"]:
        bundles["css-%s" % to_fix].filters.insert(0, fix_font_css)

    for name, bundle in bundles.iteritems():
        assets.register(name, bundle)


    @app.route(url_root)
    def index():
        kwargs = {
            "gh_client_id": os.environ.get("GITHUB_CLIENT_ID", 
                "deadbeef")
        }

        return render_template("index.html", env=env, **kwargs)


    @app.route("/login")
    def login(code=None):
        return render_template("login.html")


    @app.route("/oauth")
    def oauth():
        oauth_args = dict(
            code=request.args.get("code", ""),
            state=request.args.get("state", ""),
            client_id=os.environ["GITHUB_CLIENT_ID"],
            client_secret=os.environ["GITHUB_CLIENT_SECRET"]
        )
        
        req = requests.post(
            "https://github.com/login/oauth/access_token",
            data=oauth_args
        )
        
        query = urlparse.parse_qs(req.content)
        
        return query["access_token"][0]

    return app
예제 #12
0
def main():
    """ Main function """

    ipaddr = sys.argv[1]
    abr_algo = sys.argv[2]
    dummy_run_time = int(sys.argv[3])
    process_id = sys.argv[4]
    trace_file = sys.argv[5]
    sleep_time = sys.argv[6]

    # Prevent multiple process from being synchronized
    sleep(int(sleep_time))

    # Generate URL
    url = "http://{}/myindex_{}.html".format(ipaddr, abr_algo)
    mlog(abr_algo=abr_algo,
         trace_file=trace_file,
         msg="Server URL: {}".format(url))

    # Set timeout alarm and handler
    signal.signal(signal.SIGALRM, timeout_handler)

    # Set timeout value depending on what algorithm is being used
    # FIXED and BOLA take longer time to playback (from experience)
    if abr_algo == "FIXED":
        curr_runtime = FIXED_RUN_TIME
    elif abr_algo == "BOLA":
        curr_runtime = BOLA_RUN_TIME
    else:
        curr_runtime = ABR_RUN_TIME

    # Timeout set after current run time as decided
    signal.alarm(curr_runtime)
    mlog(abr_algo=abr_algo,
         trace_file=trace_file,
         msg="Run time alarm set at {}.".format(curr_runtime))

    try:
        # Copy over the chrome user dir
        default_chrome_user_dir = "../abr_browser_dir/chrome_data_dir"
        chrome_user_dir = "/tmp/chrome_user_dir_id_{}".format(process_id)
        system("rm -r {}".format(chrome_user_dir))
        system("cp -r {} {}".format(default_chrome_user_dir, chrome_user_dir))

        # Display the page in browser: Yes/No
        if BROWSER_DISPLAY:
            mlog(abr_algo=abr_algo,
                 trace_file=trace_file,
                 msg="Started display on browser.")
        else:
            display = Display(visible=0, size=(800, 600))
            display.start()
            mlog(abr_algo=abr_algo,
                 trace_file=trace_file,
                 msg="Started supressed display.")

        # Initialize Chrome driver
        options = Options()
        chrome_driver = "../abr_browser_dir/chromedriver"
        options.add_argument("--user-data-dir={}".format(chrome_user_dir))
        options.add_argument("--ignore-certificate-errors")
        driver = webdriver.Chrome(chrome_driver, chrome_options=options)

        # Run Chrome
        driver.set_page_load_timeout(10)
        driver.get(url)
        mlog(abr_algo=abr_algo,
             trace_file=trace_file,
             msg="Video playback started.")

        # Sleep until lock is created by ABR server
        lock_file_path = "./locks/video_log_" + abr_algo + "_" + trace_file + ".lock"
        mlog(abr_algo=abr_algo,
             trace_file=trace_file,
             msg="Looking for log file: {}".format(opa(lock_file_path)))
        sleep(200)  # running time of video is 193s
        while not ope(lock_file_path):
            mlog(abr_algo=abr_algo,
                 trace_file=trace_file,
                 msg="Not found lock file, going back to sleep for 20 secs.")
            sleep(20)

        # Remove lock after it's existence is known
        orm(lock_file_path)

        # Quit the video playback
        driver.quit()
        if BROWSER_DISPLAY:
            mlog(abr_algo=abr_algo,
                 trace_file=trace_file,
                 msg="Stopped Chrome driver.")
        else:
            display.stop()
            mlog(abr_algo=abr_algo,
                 trace_file=trace_file,
                 msg="Stopped supressed display and Chrome driver.")

        print 'DONE!'

    except Exception as exception1:
        mlog(abr_algo=abr_algo,
             trace_file=trace_file,
             msg="Exception: {}".format(exception1))
        if not BROWSER_DISPLAY:
            try:
                display.stop()
                mlog(abr_algo=abr_algo,
                     trace_file=trace_file,
                     msg="Exception Handler: Stopped suppressed display.")
            except Exception as exception2:
                mlog(abr_algo=abr_algo,
                     trace_file=trace_file,
                     msg="Exception Again (Suppressed display): {}".format(
                         exception2))
        try:
            driver.quit()
            mlog(abr_algo=abr_algo,
                 trace_file=trace_file,
                 msg="Exception Handler (Chrome driver): Quit Chrome driver.")
        except Exception as exception3:
            mlog(abr_algo=abr_algo,
                 trace_file=trace_file,
                 msg="Exception Again (Chrome driver): {}".format(exception3))