Beispiel #1
0
 def __clear_key(key):
     try:
         uwsgi.cache_del(key)
         return True
     except Exception:
         logger.info("FAILED TO clear cache-key: {}".format(key))
     return False
Beispiel #2
0
def invalidate_cache_item(cache_name, key):
    """Deletes a specific item in the specified cache"""
    try:
        import uwsgi

        uwsgi.cache_del(str(key), cache_name)
    except ImportError:
        pass
Beispiel #3
0
def reset_debug_level(level):
    import uwsgi
    if level == '-1':
        uwsgi.cache_del("DEBUG")
    else:
        if uwsgi.cache_exists("DEBUG"):
            uwsgi.cache_update("DEBUG", level)
        else:
            uwsgi.cache_set("DEBUG", level)
    return redirect(url_for('tell_me_if_im_going_to_die', lat=39.9708657, lon=-75.1427425, meters=1000))
Beispiel #4
0
 def save(self):
     cheapskate_raw = "/".join([
         key + "=" + value for key, value in self.cheapskate.items()
         if key in Instance.CHEAPSKATE.keys()
     ])
     subprocess.check_output([
         "aws", "ec2", "create-tags", "--resources", self.instance_id,
         "--tags", 'Key=cheapskate,Value="{}"'.format(cheapskate_raw)
     ])
     uwsgi.cache_del("raw_aws")
Beispiel #5
0
 def cache_del(self, key, cache_server=None):
     '''
     Delete the given cached value from the cache. 
     key : The cache key to delete.
     cache_server : The UNIX/TCP socket where the cache portal2 is listening. Optional.
     '''
     if cache_server != None:
         return uwsgi.cache_del(key, cache_server)
     else:
         return uwsgi.cache_del(key)
 def _cache_get_msg(self, worker_id):
     for msg_id in range(0, 10):
         msg_key = self.cache_msg_key % (worker_id, msg_id)
         msg = uwsgi.cache_get(msg_key, self.cache)
         if msg is not None:
             logger.debug('Get and send message from worker %s - %s' %
                          (self.worker_id, msg_key))
             if worker_id:
                 # delete message if worker_id is different from 0, else `short_cache_timeout` will do the job
                 uwsgi.cache_del(msg_key, self.cache)
             yield msg
Beispiel #7
0
def process_request(json_in):
    uid = str(uuid.uuid4())
    json_in["id"] = uid
    uwsgi.queue_push(json.dumps(json_in))
    # Actual content of message does not really matter
    # This is just to triger mule execution
    uwsgi.mule_msg("s")
    while not uwsgi.cache_exists(uid, CACHE_NAME):
        continue
    res = uwsgi.cache_get(uid, CACHE_NAME)
    uwsgi.cache_del(uid, CACHE_NAME)
    return Response(response=res, status=200, mimetype="application/json")
Beispiel #8
0
    def test_multi_delete(self):
        for i in range(0, 100):
            self.assertTrue(uwsgi.cache_set('key1', 'X' * 50 , 0, 'items_4_10'))
            self.assertTrue(uwsgi.cache_del('key1', 'items_4_10'))

        for i in range(0, 100):
            self.assertIsNone(uwsgi.cache_set('key1', 'X' * 51 , 0, 'items_4_10'))
            self.assertIsNone(uwsgi.cache_del('key1', 'items_4_10'))

        for i in range(0, 100):
            self.assertTrue(uwsgi.cache_set('key1', 'X' * 50 , 0, 'items_4_10'))
            self.assertTrue(uwsgi.cache_del('key1', 'items_4_10'))
Beispiel #9
0
 def test_big_update(self):
     self.assertTrue(uwsgi.cache_set('key1', 'X' * 40, 0, 'items_4_10'))
     self.assertTrue(uwsgi.cache_update('key1', 'X' * 10, 0, 'items_4_10'))
     self.assertTrue(uwsgi.cache_del('key1', 'items_4_10'))
     self.assertIsNone(uwsgi.cache_update('key1', 'X' * 51, 0,
                                          'items_4_10'))
     self.assertTrue(uwsgi.cache_update('key1', 'X' * 50, 0, 'items_4_10'))
Beispiel #10
0
 def test_big_delete(self):
     self.assertTrue(uwsgi.cache_set('key1', 'X' * 50 , 0, 'items_4_10'))
     self.assertTrue(uwsgi.cache_del('key1', 'items_4_10'))
     self.assertTrue(uwsgi.cache_set('key1', 'HELLOHELLO', 0, 'items_4_10'))
     self.assertTrue(uwsgi.cache_set('key2', 'HELLOHELLO', 0, 'items_4_10'))
     self.assertTrue(uwsgi.cache_set('key3', 'HELLOHELLO', 0, 'items_4_10'))
     self.assertTrue(uwsgi.cache_set('key4', 'HELLOHELLO', 0, 'items_4_10'))
     self.assertIsNone(uwsgi.cache_set('key5', 'HELLOHELLO', 0, 'items_4_10'))
Beispiel #11
0
 def test_non_bitmap(self):
     self.assertTrue(uwsgi.cache_set('KEY', 'X' * 20, 0,
                                     'items_non_bitmap'))
     self.assertTrue(uwsgi.cache_del('KEY', 'items_non_bitmap'))
     self.assertIsNone(
         uwsgi.cache_set('KEY', 'X' * 21, 0, 'items_non_bitmap'))
     self.assertTrue(uwsgi.cache_set('KEY', 'X' * 20, 0,
                                     'items_non_bitmap'))
Beispiel #12
0
 def test_big_random(self):
     blob = self.rand_blob(100000)
     self.assertTrue(uwsgi.cache_set('KEY', blob, 0, 'items_1_100000'))
     get_blob = uwsgi.cache_get('KEY', 'items_1_100000')
     self.assertEqual(blob, get_blob)
     self.assertTrue(uwsgi.cache_del('KEY', 'items_1_100000'))
     self.assertIsNone(uwsgi.cache_set('KEY', 'X' * 100001, 0, 'items_1_100000'))
     self.assertTrue(uwsgi.cache_set('KEY', 'X' * 10000, 0, 'items_1_100000'))
Beispiel #13
0
def uwsgi_report_events(user_config):
    try:
        config = _get_config(user_config)
        seconds = config.get('eventsRefreshRate', 30)
        events_cache = UWSGIEventsCache(get_uwsgi())
        sdk_api = api_factory(config)
        task = EventsSyncTask(sdk_api, events_cache, seconds, 500)
        while True:
            task._send_events()
            for _ in xrange(0, seconds):
                if uwsgi.cache_get(UWSGIEventsCache._EVENTS_FLUSH,
                                   _SPLITIO_STATS_CACHE_NAMESPACE):
                    uwsgi.cache_del(UWSGIEventsCache._EVENTS_FLUSH,
                                    _SPLITIO_STATS_CACHE_NAMESPACE)
                    break
                time.sleep(1)
    except:
        _logger.exception('Exception caught posting metrics')
Beispiel #14
0
def delete_tests():
    """Delete test data from cache upon successful DELETE."""
    if "receipt" not in request.args:
        return make_response(
            jsonify({"error": "Required 'receipt' parameter not found."}), 400)
    receipt = request.args.get("receipt")
    if not uwsgi.cache_del(receipt, "receipts"):
        return make_response(
            jsonify({"error": "Provided 'receipt' not found."}), 404)
    return jsonify(
        {"message": "Provided 'receipt' has been successfully deleted."})
Beispiel #15
0
def get_url(url):
    """Download a file from url to cache_dir."""
    # set a lock to prevent multiple simultaneous downloads of the same file
    mypid = os.getpid()
    uwsgi.lock()
    otherpid = uwsgi.cache_get(url)
    if otherpid:
        uwsgi.unlock()
        while otherpid:
            log('D: [%d] waiting for pid %s to download %s' %
                (mypid, otherpid, url))
            time.sleep(1)
            otherpid = uwsgi.cache_get(url)
        return 200
    else:
        uwsgi.cache_set(url, str(mypid))
        uwsgi.unlock()

    dest = localfile(url)
    log('D: [%d] downloading %s to %s' % (mypid, url, dest))
    curl = pycurl.Curl()
    curl.setopt(curl.URL, url)
    curl.setopt(curl.FOLLOWLOCATION, True)
    path = '/'.join(dest.split('/')[:-1])
    if not os.path.exists(path):
        # parallel download of rpms in subdir will create it right now
        try:
            os.makedirs(path)
        except OSError as e:
            # this catches duplicate creation (so just W not E)
            # TODO: need to bypass the open() on real errors
            # like permissions
            log('W: [%d] OS error(%d): %s' %
                (mypid, e.errno, e.strerror))
    with open(dest, 'wb') as fil:
        curl.setopt(curl.WRITEFUNCTION, fil.write)
        curl.perform()
    uwsgi.cache_del(url)
    return curl.getinfo(curl.HTTP_CODE)
Beispiel #16
0
def get_url(url):
    """Download a file from url to cache_dir."""
    # set a lock to prevent multiple simultaneous downloads of the same
    # file
    mypid = os.getpid()
    uwsgi.lock()
    otherpid = uwsgi.cache_get(url)
    if otherpid:
        uwsgi.unlock()
        while otherpid:
            log('D: pid %d waiting for pid %s to download %s' %
                (mypid, otherpid, url))
            time.sleep(1)
            otherpid = uwsgi.cache_get(url)
        return 200
    else:
        uwsgi.cache_set(url, str(mypid))
        uwsgi.unlock()

    dest = localfile(url)
    log('D: pid %d downloading %s' % (mypid, url))
    curl = pycurl.Curl()
    curl.setopt(curl.URL, url)
    path = '/'.join(dest.split('/')[:-1])
    if not os.path.exists(path):
        # parallel download of rpms in subdir will create it right now
        try:
            os.makedirs(path)
        except OSError as e:
            # this catches duplicate creation (so just W not E)
            # TODO: need to bypass the open() on real errors
            # like permissions
            log('W: OS error(%d): %s' % (e.errno, e.strerror))
    with open(dest, 'wb') as fil:
        curl.setopt(curl.WRITEFUNCTION, fil.write)
        curl.perform()
    uwsgi.cache_del(url)
    return curl.getinfo(curl.HTTP_CODE)
Beispiel #17
0
 def get(self):
     nlp_bytes = uwsgi.cache_get('nlp')
     if nlp_bytes or uwsgi.cache_get('busy'):
         if nlp_bytes:
             temp_nlp = pickle.loads(nlp_bytes)
             temp_nlp.close()
             uwsgi.cache_del('nlp')
             uwsgi.cache_del('busy')
             return 'success. closed.', 200
         else:
             uwsgi.cache_del('busy')
             return 'success', 200
     return 'Server already closed.', 304
Beispiel #18
0
 def test_non_bitmap(self):
     self.assertTrue(uwsgi.cache_set('KEY', 'X' * 20, 0, 'items_non_bitmap'))
     self.assertTrue(uwsgi.cache_del('KEY', 'items_non_bitmap'))
     self.assertIsNone(uwsgi.cache_set('KEY', 'X' * 21, 0, 'items_non_bitmap'))
     self.assertTrue(uwsgi.cache_set('KEY', 'X' * 20, 0, 'items_non_bitmap'))
Beispiel #19
0
 def clear(cls):
     uwsgi.cache_del('binds')
Beispiel #20
0
 def clear(cls):
     uwsgi.cache_del('binds')
Beispiel #21
0
 def test_big_update(self):
     self.assertTrue(uwsgi.cache_set('key1', 'X' * 40 , 0, 'items_4_10'))
     self.assertTrue(uwsgi.cache_update('key1', 'X' * 10 , 0, 'items_4_10'))
     self.assertTrue(uwsgi.cache_del('key1', 'items_4_10'))
     self.assertIsNone(uwsgi.cache_update('key1', 'X' * 51 , 0, 'items_4_10'))
     self.assertTrue(uwsgi.cache_update('key1', 'X' * 50 , 0, 'items_4_10'))
Beispiel #22
0
 def __delitem__(self, item):
     uwsgi.cache_del(item)
Beispiel #23
0
 def test_set(self):
     self.assertTrue(uwsgi.cache_set('key1', 'HELLO', 0, 'items_17'))
     self.assertIsNone(uwsgi.cache_set('key1', 'HELLO', 0, 'items_17'))
     self.assertTrue(uwsgi.cache_del('key1', 'items_17'))
     self.assertTrue(uwsgi.cache_set('key1', 'HELLO', 0, 'items_17'))
     self.assertIsNone(uwsgi.cache_set('key1', 'HELLO', 0, 'items_17'))
Beispiel #24
0
 def delete(self, cache, key):
     uwsgi.cache_del(key, cache)
Beispiel #25
0
 def test_two_items_using_four_blocks(self):
     self.assertTrue(uwsgi.cache_update('key1', 'HE', 0, 'items_2'))
     self.assertTrue(uwsgi.cache_update('key2', 'LL', 0, 'items_2'))
     self.assertTrue(uwsgi.cache_del('key1', 'items_2'))
     self.assertIsNone(uwsgi.cache_update('key1', 'HEL', 0, 'items_2'))
     self.assertTrue(uwsgi.cache_update('key1', 'HE', 0, 'items_2'))
Beispiel #26
0
def update_best_scores(game_data):
    logger.debug('Update user score')
    storage.update_user_score(game_data['user_id'],game_data['score1'],game_data['score2'])
    uwsgi.cache_del('bestscore') #this will recalculate bestscores
Beispiel #27
0
def run(debug, host, port, close, memory, input, output, arango, user, project,
        limit, pictures, summary, relations, corefs, newgraph, documentedges):
    uwsgi.cache_update('busy', b'1')

    if debug:
        logging.basicConfig(level=logging.DEBUG)
        logging.debug("Debug on.")
    else:
        logging.basicConfig(level=logging.INFO)

    nlp_bytes = None
    nlp_bytes = uwsgi.cache_get('nlp')

    # Set progress bar start parameters
    if nlp_bytes:
        init_time = 2
    else:
        init_time = 10

    if pictures or summary:
        nlp_time = 60
    else:
        nlp_time = 80

    yield "data:1\n\n"

    # If standford corenlp server host and port given use that, otherwise start a new instance through python wrapper
    if host and port:
        if nlp_bytes:
            temp_nlp = pickle.loads(nlp_bytes)
            temp_nlp.close()

        nlp = StanfordCoreNLP(host, port)
        uwsgi.cache_update('nlp', pickle.dumps(nlp))
        logging.debug("nlp to cache: host {}".format(uwsgi.cache_get('nlp')))
    elif nlp_bytes:
        nlp = pickle.loads(nlp_bytes)
        logging.debug("nlp from cache: {}".format(uwsgi.cache_get('nlp')))
    else:
        nlp = StanfordCoreNLP(r'../deps/stanford-corenlp/',
                              memory=memory,
                              timeout=200000,
                              quiet=not debug)
        uwsgi.cache_update('nlp', pickle.dumps(nlp))
        logging.debug("nlp to cache: file {}".format(uwsgi.cache_get('nlp')))

    DOC_CHUNK_SIZE = 10000

    # Initialise corenlp properties, s3 bucket connection, and doc count for progress bar
    data, n_items, properties, s3 = init(input,
                                         output,
                                         nlp,
                                         relations=relations,
                                         corefs=corefs,
                                         chunk_size=DOC_CHUNK_SIZE,
                                         limit=limit)
    logging.debug("items to process: {}".format(n_items))

    logging.debug("Loading CoreNLP models...")

    # Load corenlp models in separate thread to allow to send regular pings to the frontend
    server_init_thread = Thread(target=nlp.annotate, args=("", properties))
    server_init_thread.start()

    while server_init_thread.is_alive():
        time.sleep(30)
        yield "data:1\n\n"
    else:
        server_init_thread.join()
        yield "data:" + str(init_time) + "\n\n"

    # Create or load existing networkx graph object for this project
    graph_path = os.path.join(output, user, project, "nlp_outputs",
                              'graph_temp.pkl')
    if not newgraph:
        if output[:5] == 's3://' and s3.exists(graph_path):
            with s3.open(graph_path, 'rb') as f:
                logging.debug("Reading existing graph...")
                G = nx.read_gpickle(f)
        elif os.path.isfile(graph_path):
            G = nx.read_gpickle(graph_path)
        else:
            G = nx.MultiGraph()
    else:
        if arango:
            r = requests.delete("http://" + arango + "/ingest/" + user + "/" +
                                project + "/")
        G = nx.MultiGraph()

    # Main NLP parsing loop. Run corenlp annotator pipeline, resolve coreferences and extract relations. Then load into networkx graph
    i = 0
    for document in parse_docs(data,
                               input,
                               output,
                               user,
                               project,
                               nlp,
                               properties,
                               chunk_size=DOC_CHUNK_SIZE,
                               limit=limit,
                               s3=s3):
        yield "data:" + str(int(i / n_items * nlp_time) + init_time) + "\n\n"

        if corefs:
            resolve_coreferences(document[1])
            yield "data:" + str(int(i / n_items * nlp_time) +
                                init_time) + "\n\n"

        for r in make_entity_relationships(document[0],
                                           document[1],
                                           document[2],
                                           document[3],
                                           relations=relations,
                                           documentedges=documentedges):
            key_suffix = r.semantic_type or ""
            G.add_edge(r.entity1._key,
                       r.entity2._key,
                       key=r.type + key_suffix,
                       source_file=r.source_file,
                       word_dist=r.word_dist,
                       document_id=r.document_id,
                       document_date=r.document_date,
                       from_char_offset=(r.e1_char_start, r.e1_char_end),
                       to_char_offset=(r.e2_char_start, r.e2_char_end),
                       semantic_type=r.semantic_type,
                       label_first=r.entity1.label_orig,
                       label_second=r.entity2.label_orig)

            nodes = []
            elements1 = r.entity1.__dict__
            nodes.append((r.entity1._key, elements1))
            elements2 = r.entity2.__dict__
            nodes.append((r.entity2._key, elements2))

            G.add_nodes_from(nodes)
        yield "data:" + str(int(i / n_items * nlp_time) + init_time) + "\n\n"
        i += 1

    # Close the NLP server if required. Keep open to avoid model loading next time
    if close:
        nlp.close()
        uwsgi.cache_del('nlp')

    logging.debug("Calculating same sentence centrality...")
    set_type_centrality(G, "same_sentence")

    if documentedges:
        yield "data:" + str(init_time + nlp_time + 2) + "\n\n"
        set_type_centrality(G, "same_document")
        yield "data:" + str(init_time + nlp_time + 5) + "\n\n"
    else:
        yield "data:" + str(init_time + nlp_time + 5) + "\n\n"

    # Write graph object to JSON representation
    out_data = json_graph.node_link_data(G)

    # Serialise and write the graph object for use in next upload
    if output[:5] == 's3://':
        with s3.open(graph_path, 'wb') as f:
            nx.write_gpickle(G, f)
    else:
        nx.write_gpickle(G, graph_path)

    del G

    # remove and rename output variables to fit data api requirements
    out_data.pop('directed')
    out_data.pop('multigraph')

    out_data['vertices'] = out_data.pop('nodes')
    out_data['edges'] = out_data.pop('links')

    # Run wikipedia lookups of thumbnail urls and article summaries
    if pictures or summary:
        processes = []
        with ThreadPoolExecutor(max_workers=None) as executor:
            for idx, v in enumerate(out_data['vertices']):
                v.pop('id')

                if v['_key'].split("_")[-1] not in ('LOCATION', 'MISC',
                                                    'ORGANIZATION', 'PERSON',
                                                    'COREF'):
                    url = 'https://en.wikipedia.org/wiki/' + v['_key']
                    processes.append(
                        executor.submit(getWikiImageSummary, url, pictures,
                                        summary, idx))

            i = 0
            for task in as_completed(processes):
                logging.debug(
                    "Finished processing vertex: {} out of {}".format(
                        i + 1, len(processes)))
                imageurl, summarytext, idx = task.result()
                out_data['vertices'][idx]['image_url'], out_data['vertices'][
                    idx]['summary'] = imageurl, summarytext
                if i % 10 == 0:
                    yield "data:" + str(
                        int(i / len(processes) * (80 - nlp_time)) + nlp_time +
                        init_time + 5) + "\n\n"
                i += 1

    # More renaming to fit data api requirements
    for e in out_data['edges']:
        e['_from'] = "vertices/" + clean_label(e.pop('source'))
        e['_to'] = "vertices/" + clean_label(e.pop('target'))
        e['type'] = e.pop('key')[:13]
        e['_key'] = str(uuid.uuid4())

    yield "data:96\n\n"

    # Either load data into arango db, or save json representation to file system or s3
    LINE_LIMIT = 100000

    if arango:
        logging.debug("sending: {}, {}, {}".format(arango, user, project))

        send_to_arango(out_data,
                       arango,
                       user,
                       project,
                       LINE_LIMIT,
                       doc_type="vertices")
        yield "data:97\n\n"

        send_to_arango(out_data,
                       arango,
                       user,
                       project,
                       LINE_LIMIT,
                       doc_type="same_sentence")

        yield "data:98\n\n"

        if documentedges:
            logging.debug("adding document edges")
            send_to_arango(out_data,
                           arango,
                           user,
                           project,
                           LINE_LIMIT,
                           doc_type="same_document")

    else:
        edges_ss = [
            e for e in out_data['edges'] if e['type'] == "same_sentence"
        ]

        if documentedges:
            edges_sd = [
                e for e in out_data['edges'] if e['type'] == "same_document"
            ]

        write_list_in_chunks(out_data['vertices'], LINE_LIMIT // 10, output,
                             user, project, 'vertices')
        yield "data:97\n\n"
        write_list_in_chunks(edges_ss, LINE_LIMIT, output, user, project,
                             'edges_ss')
        yield "data:98\n\n"
        if documentedges:
            write_list_in_chunks(edges_sd, LINE_LIMIT, output, user, project,
                                 'edges_sd')

    uwsgi.cache_del('busy')
    yield "data:100\n\n"
 def save(self):
     cheapskate_raw = "/".join([key + "=" + value for key, value in self.cheapskate.items() if key in Instance.CHEAPSKATE.keys()])
     subprocess.check_output(["aws", "ec2", "create-tags", "--resources", self.instance_id, "--tags", 'Key=cheapskate,Value="{}"'.format(cheapskate_raw)])
     uwsgi.cache_del("raw_aws")
Beispiel #29
0
def _open_notebook(request, coursename, student, notebook, *, forcecopy,
                   init_student_git):  # pylint: disable=r0914
    """
    implement both edx_request and classroom_request
    that behave almost exactly the same
    """
    ok, explanation = authorized(request)

    if not ok:
        return HttpResponseForbidden(f"Access denied: {explanation}")

    coursedir = CourseDir.objects.get(coursename=coursename)
    if not coursedir.is_valid():
        return error_page(
            request,
            coursename,
            student,
            notebook,
            f"no such course `{coursename}'",
            header=True,
        )

    # the ipynb extension is removed from the notebook name in urls.py
    exists, notebook_with_ext, _, is_genuine_notebook = \
        locate_notebook(coursedir.git_dir, notebook)

    # second attempt from the student's space
    # in case the student has created it locally...
    if not exists:
        exists, notebook_with_ext, _, is_genuine_notebook = \
            locate_notebook(coursedir.student_dir(student), notebook)

    if not exists:
        msg = f"notebook `{notebook}' not known in this course or student"
        return error_page(request,
                          coursename,
                          student,
                          notebook,
                          msg,
                          header="notebook not found")

    # deal with concurrent requests on the same container
    # by using a shared memory (a uwsgi cache)
    # starting_containers is the cache name
    # as configured in nbhosting.ini(.in)

    # import here and not at toplevel as that would be too early
    # https://uwsgi-docs.readthedocs.io/en/latest/PythonModule.html?highlight=cache#cache-functions
    import uwsgi
    idling = 0.5
    # just a safety in case our code would not release stuff properly
    expire_in_s = 15
    expire = TimeDelta(seconds=expire_in_s)

    def my_repr(timedelta):
        return f"{timedelta.seconds}s {timedelta.microseconds}µs"

    container = f'{coursename}-x-{student}'
    for attempt in itertools.count(1):
        already = uwsgi.cache_get(container, 'starting_containers')

        # good to go
        if not already:
            logger.info(
                f"{attempt=} going ahead with {container=} and {notebook=}")
            now_bytes = pickle.dumps(DateTime.now())
            uwsgi.cache_set(container, now_bytes, 0, "starting_containers")
            break

        # has the stored token expired ?
        already_datetime = pickle.loads(already)
        age = DateTime.now() - already_datetime
        if age >= expire:
            logger.info(
                f"{attempt=} expiration ({my_repr(age)} is > {expire_in_s}s) "
                f"going ahead with {container=} and {notebook=}")
            break

        # not good, waiting our turn...
        logger.info(
            f"{attempt=} waiting for {idling=} because {my_repr(age)} is < {expire_in_s}s "
            f"with {container=} and {notebook=}")
        time.sleep(0.5)

    subcommand = 'container-view-student-course-notebook'

    # build command
    command = ['nbh', '-d', sitesettings.nbhroot]
    if DEBUG:
        command.append('-x')
    command.append(subcommand)
    # propagate the forcecopy flag for reset_from_origin
    if forcecopy:
        command.append('-f')
    # propagate that a git initialization was requested
    # forcecopy has no effect in this case
    if init_student_git:
        command.append('-g')
        # a student repo gets cloned from local course git
        # for lower delays when updating, and removing issues
        # like accessing private repos from the students space
        ref_giturl = str(coursedir.git_dir)
    else:
        ref_giturl = coursedir.giturl

    # add arguments to the subcommand
    command += [
        student, coursename, notebook_with_ext, coursedir.image, ref_giturl
    ]
    command_str = " ".join(command)
    logger.info(f'edxfront is running (DEBUG={DEBUG}): {command_str}')
    completed = subprocess.run(command,
                               universal_newlines=True,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE)
    log_completed_process(completed, subcommand)

    try:
        action, _container_name, actual_port, jupyter_token = completed.stdout.split(
        )

        if completed.returncode != 0 or action.startswith("failed"):
            message = failed_command_message(
                command_str,
                completed,
                prefix="failed to spawn notebook container")
            header = failed_command_header(action)
            return error_page(request, coursename, student, notebook, message,
                              header)

        # remember that in events file for statistics
        Stats(coursename).record_open_notebook(student, notebook, action,
                                               actual_port)
        # redirect with same proto (http or https) as incoming
        scheme = request.scheme
        # get the host part of the incoming URL
        host = request.get_host()
        # remove initial port if present in URL
        if ':' in host:
            host, _ = host.split(':', 1)
        ########## forge a URL that nginx will intercept
        # passing along course and student is for 'reset_from_origin'
        if is_genuine_notebook:
            url = (f"{scheme}://{host}/{actual_port}/notebooks/"
                   f"{notebook_with_ext}?token={jupyter_token}&"
                   f"course={coursename}&student={student}")
        else:
            url = (
                f"{scheme}://{host}/{actual_port}/lab/tree/{notebook_with_ext}"
            )
        logger.info(f"edxfront: redirecting to {url}")
        return HttpResponseRedirect(url)

    except Exception as exc:
        prefix = (f"exception when parsing output of nbh {subcommand}\n"
                  f"{type(exc)}: {exc}")
        message = failed_command_message(command_str, completed, prefix=prefix)
        return error_page(request, coursename, student, notebook, message)
    finally:
        uwsgi.cache_del(container, "starting_containers")
Beispiel #30
0
def invalidate_cache_item(cache_name, key):
    try:
        import uwsgi
        uwsgi.cache_del(key, cache_name)
    except ImportError:
        pass
Beispiel #31
0
def application(env, start_response):
	uwsgi.websocket_handshake(env["HTTP_SEC_WEBSOCKET_KEY"], env.get("HTTP_ORIGIN", ""))
	if (not uwsgi.cache_exists("chats")):
		uwsgi.cache_update("chats", "")
	if (not uwsgi.cache_exists("names")):
		uwsgi.cache_update("names", "")
	if (not uwsgi.cache_exists("roomNumbers")):
		uwsgi.cache_update("roomNumbers", "")
	#Static data for testing:
	if (uwsgi.cache_get("roomNumbers") == ""):
		uwsgi.cache_update("roomNumbers", uwsgi.cache_get("roomNumbers") + "".join([str(number) for number in [0, 10, 11, 12]]))
	if (not uwsgi.cache_exists("0")):
		uwsgi.cache_update("0", "1Reimu11Marisa22Rumia33Daiyousei44")
	if (not uwsgi.cache_exists("10")):
		uwsgi.cache_update("10", "2Cirno11Meiling22Koakuma33Patchouli44")
	if (not uwsgi.cache_exists("11")):
		uwsgi.cache_update("11", "3Sakuya11Remilia22Flandre33Letty44")
	if (not uwsgi.cache_exists("12")):
		uwsgi.cache_update("12", "0Chen11Alice22Lily33")
	playersMax = 4
	nameChat = ""
	roomsMax = 100
	roomNumberChat = -1
	while (True):
		msg = uwsgi.websocket_recv()
		msg_type = ""
		msg_data = ""
		if (msg and (msg != "")):
			msg_type = msg.split("")[0]
			msg_data = msg.split("")[1]
			print "Message: " + repr(msg) + "; " + "Type: " + repr(msg_type) + "; " + "Data: " + repr(msg_data)
		if (msg_type == "chat"):
			chats = uwsgi.cache_get("chats")
			chats += "" + msg_data + ""
			uwsgi.cache_update("chats", chats)
		if (msg_type == "close"):
			roomNumber = msg_data.split("")[0]
			name = msg_data.split("")[1]
			if (name):
				names = uwsgi.cache_get("names").split("")
				names.remove(name)
				uwsgi.cache_update("names", "".join(names))
				chats = uwsgi.cache_get("chats").split("")
				i = 0
				while (i < len(chats)):
					chat = chats[i].split("")
					if (name in chats[3:]):
						del chat[chat.index(name, 3)]
						chats[i] = "".join(chat)
			if (int(roomNumber) > -1):
				room = uwsgi.cache_get(roomNumber).split("")
				i = 1
				while (i < len(room)):
					if (name == room[i].split("")[0]):
						room[i] = ""
						room = "".join(room)
						uwsgi.cache_update(roomNumber, room)
						if (room[room.index(""):] == playersMax * ""):
							roomNumbers = uwsgi.cache_get("roomNumbers").split("")
							roomNumbers.remove(roomNumber)
							uwsgi.cache_update("roomNumbers", "".join(roomNumbers))
							uwsgi.cache_del(roomNumber)
						break
					i += 1
				print name + " disconnected."
			return [""]
		if (msg_type == "leave"):
			roomNumber = msg_data.split("")[0]
			name = msg_data.split("")[1]
			roomNumberChat = -1
			room = uwsgi.cache_get(roomNumber).split("")
			i = 1
			while (i < len(room)):
				if (name == room[i].split("")[0]):
					room[i] = ""
					room = "".join(room)
					uwsgi.cache_update(roomNumber, room)
					if (room[room.index(""):] == playersMax * ""):
						roomNumbers = uwsgi.cache_get("roomNumbers").split("")
						roomNumbers.remove(roomNumber)
						uwsgi.cache_update("roomNumbers", "".join(roomNumbers))
						uwsgi.cache_del(roomNumber)
					break
				i += 1
		if (msg_type == "join"):
			roomNumber = msg_data.split("")[0]
			name = msg_data.split("")[1]
			room = uwsgi.cache_get(roomNumber).split("")
			if (room[0] != "0"):
				uwsgi.websocket_send("false")
			else:
				i = 1
				while (i < len(room)):
					if ((room[i] == "") and (room[i] != name + "")):
						room[i] = name + room[i]
						room = "".join(room)
						uwsgi.cache_update(roomNumber, room)
						uwsgi.websocket_send(room)
						roomNumberChat = int(roomNumber)
						break
					i += 1
				else:
					uwsgi.websocket_send("false")
		if (msg_type == "name"):
			if (msg_data in uwsgi.cache_get("names").split("")):
				uwsgi.websocket_send("false")
			else:
				names = uwsgi.cache_get("names").split("")
				names.append(msg_data)
				uwsgi.cache_update("names", "".join(names))
				print msg_data + " connected."
				nameChat = msg_data
				uwsgi.websocket_send("true")
		if (msg_type == "roomCreate"):
			roomNumbers = uwsgi.cache_get("roomNumbers").split("")
			if (len(roomNumbers) == 100): #The cache is full
				uwsgi.websocket_send("false")
			roomNumbers = [int(number) for number in roomNumbers if number]
			#Not most efficient but easy way to find the lowest available room number:
			roomNumber = 0
			while (roomNumber in roomNumbers):
				roomNumber += 1
			roomNumbers.append(roomNumber)
			roomNumbers = sorted(roomNumbers)
			uwsgi.cache_update("roomNumbers", "".join([str(number) for number in roomNumbers]))
			roomNumberChat = roomNumber
			roomNumber = str(roomNumber)
			uwsgi.cache_update(roomNumber, "0" + "" + msg_data + "" + (playersMax - 1) * "")
			uwsgi.websocket_send(roomNumber)
		if (msg_type == "rooms"):
			rooms = []
			for number in uwsgi.cache_get("roomNumbers").split(""):
				if (number):
					rooms.append(number + "" + uwsgi.cache_get(number))
			uwsgi.websocket_send("".join(rooms))
		if (msg_type == "wait"):
			uwsgi.websocket_send(uwsgi.cache_get(msg_data.split("")[0]))
			room = uwsgi.cache_get(msg_data.split("")[0]).split("")
			room = [player.split("") for player in room]
			for player in room[1:]:
				if (not player[0]):
					break
			else:
				uwsgi.websocket_send("ready")
		chats = uwsgi.cache_get("chats")
		chats = chats.split("")
		i = 0
		while (i < len(chats)):
			chat = chats[i].split("")
			if (chat == [""]):
				i += 1
				continue
			if (nameChat not in chat[3:]):
				chat.append(nameChat)
				chats[i] = "".join(chat)
				if (roomNumberChat == int(chat[0])):
					uwsgi.websocket_send("chat" + chat[1] + "" + chat[2])
				names = uwsgi.cache_get("names").split("")
				namesChat = chat[3:]
				for name in names:
					if (name not in namesChat):
						break
				else:
					del chats[i]
			i += 1
		uwsgi.cache_update("chats", "".join(chats))
Beispiel #32
0
 def test_overlapping(self):
     self.assertTrue(uwsgi.cache_update('key1', 'HE', 0, 'items_2'))
     self.assertIsNone(uwsgi.cache_update('key1', 'HELL', 0, 'items_2'))
     self.assertTrue(uwsgi.cache_del('key1', 'items_2')) 
     self.assertTrue(uwsgi.cache_update('key1', 'HELL', 0, 'items_2'))
Beispiel #33
0
 def __delitem__(self, item):
     uwsgi.cache_del(item)
Beispiel #34
0
 def delete(self, key):
     cache_del(key, self.name)
Beispiel #35
0
		def delete(self, key):
			'''
			Deletes the given key from the cache. Returns True if the key existed and was deleted, False otherwise
			'''
			return uwsgi.cache_del(key) is not None
Beispiel #36
0
 def del_(self, key):
     uwsgi.cache_del(key, self.cachename)
     self._keys.discard(key)