Example #1
0
 def test_auth_v2(self):
     import simplejson as json
     SetTopBox = apps.get_model('client', 'SetTopBox')
     SetTopBoxChannel = apps.get_model('client', 'SetTopBoxChannel')
     Channel = apps.get_model('tv', 'Channel')
     clientmodels.SetTopBox.options.auto_create = False
     clientmodels.SetTopBox.options.auto_add_channel = False
     clientmodels.SetTopBox.options.use_mac_as_serial = True
     clientmodels.SetTopBox.options.auto_enable_recorder_access = True
     decoder = json.JSONDecoder()
     url_all = reverse('tv_v2:api_dispatch_list',
                       kwargs={
                           'api_name': 'v2',
                           'resource_name': 'channel'
                       })
     self.assertEqual('/tv/api/tv/v2/channel/', url_all)
     response = self.c.get(url_all)
     jcanal = decoder.decode(response.content)
     self.assertEqual(3, jcanal['meta']['total_count'])
     url_auth = reverse('tv_v2:api_dispatch_list',
                        kwargs={
                            'api_name': 'v2',
                            'resource_name': 'userchannel'
                        })
     self.assertEqual('/tv/api/tv/v2/userchannel/', url_auth)
     response = self.c.get(url_auth)
     self.assertEqual(401, response.status_code)
     auth_login = reverse('client_auth')
     auth_logoff = reverse('client_logoff')
     response = self.c.get(auth_logoff)
     self.assertEqual(200, response.status_code)
     # Cria o STB
     stb = SetTopBox.objects.create(serial_number='01:02:03:04:05:06',
                                    mac='01:02:03:04:05:06')
     # self.assertTrue(stb)
     response = self.c.post(auth_login, data={'MAC': '01:02:03:04:05:06'})
     self.assertEqual(200, response.status_code)
     # get api_key
     jobj = decoder.decode(response.content)
     api_key = jobj['api_key']
     # Primeira consulta (Lista vazia)
     response = self.c.get(url_auth + '?api_key=' + api_key)
     self.assertEqual(200, response.status_code)
     # log.debug('Conteudo:%s', response.content)
     canais = Channel.objects.all()
     log.debug('STB-CH=%s', SetTopBoxChannel.objects.all())
     SetTopBoxChannel.objects.create(settopbox=stb,
                                     channel=canais[1],
                                     recorder=False)
     response = self.c.get(url_auth + '?api_key=' + api_key)
     self.assertEqual(200, response.status_code)
     # log.debug('Conteudo:%s', response.content)
     self.assertContains(response, canais[1].channelid)
     SetTopBoxChannel.objects.create(settopbox=stb,
                                     channel=canais[0],
                                     recorder=True)
     response = self.c.get(url_auth + '?api_key=' + api_key)
     self.assertEqual(200, response.status_code)
     log.debug('Conteudo:%s', response.content)
     self.assertContains(response, canais[0].channelid)
     SetTopBoxChannel.objects.create(settopbox=stb,
                                     channel=canais[2],
                                     recorder=True)
     response = self.c.get(url_auth + '?api_key=' + api_key)
     self.assertEqual(200, response.status_code)
     log.debug('Conteudo:%s', response.content)
     self.assertContains(response, canais[2].channelid)
     # Cria um novo STB
     stb1 = SetTopBox.objects.create(serial_number='01:02:03:04:05:07',
                                     mac='01:02:03:04:05:07')
     # Sair do sistema
     response = self.c.get(auth_logoff)
     self.assertEqual(200, response.status_code)
     # login com novo STB
     response = self.c.post(auth_login, data={'MAC': '01:02:03:04:05:07'})
     self.assertEqual(200, response.status_code)
     # get api_key
     jobj = decoder.decode(response.content)
     api_key = jobj['api_key']
     # Primeira consulta (Lista vazia)
     response = self.c.get(url_auth + '?api_key=' + api_key)
     self.assertEqual(200, response.status_code)
     self.assertContains(response, '"total_count": 0')
     SetTopBoxChannel.objects.create(settopbox=stb1,
                                     channel=canais[2],
                                     recorder=True)
     response = self.c.get(url_auth + '?api_key=' + api_key)
     self.assertEqual(200, response.status_code)
     self.assertContains(response, '"total_count": 1')
Example #2
0
def main(request, response):
    import simplejson as json
    f = file('config.json')
    source = f.read()
    s = json.JSONDecoder().decode(source)
    url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
    response.headers.set(
        "Content-Security-Policy",
        "default-src 'self'; script-src 'self' 'unsafe-inline'")
    response.headers.set(
        "X-Content-Security-Policy",
        "default-src 'self'; script-src 'self' 'unsafe-inline'")
    response.headers.set(
        "X-WebKit-CSP",
        "default-src 'self'; script-src 'self' 'unsafe-inline'")
    return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.

Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:

* Redistributions of works must retain the original copyright notice, this list
  of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
  this list of conditions and the following disclaimer in the documentation
  and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
  may be used to endorse or promote products derived from this work without
  specific prior written permission.

THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

Authors:
        Hao, Yunfei <*****@*****.**>

-->

<html>
  <head>
    <title>CSP Test: csp_default-src_self-style</title>
    <link rel="author" title="Intel" href="http://www.intel.com/"/>
    <link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#default-src"/>
    <meta name="flags" content=""/>
    <meta name="assert" content="default-src 'self' 'unsafe-inline'"/>
    <meta charset="utf-8"/>
    <script src="../resources/testharness.js"></script>
    <script src="../resources/testharnessreport.js"></script>
    <link rel="stylesheet" type="text/css" href='""" + url1 + """/csp/support/w3c/canvas-index.css'/>
    <link rel="stylesheet" type="text/css" href="support/blue-100x100.css"/>
    <style>
      #test-green {
        background-color: green;
      }
    </style>
  </head>
  <body>
    <div id="log"></div>
    <div id="test-blue"></div>
    <div id="test-green"></div>
    <h3>ext-css:""" + url1 + """/tests/csp/support/w3c/canvas-index.css</h3>
Example #3
0
 def __init__(self, handle, uri, arg_data, block=True):
     self.decoder = json.JSONDecoder()
     self.handle = handle
     self.buf = b""
     self.block = block
Example #4
0
def baskets(request):
    message = None
    error = None
    if request.method == 'PUT':
        dict = simplejson.JSONDecoder().decode(request.body)
        # adding from basket
        if 'add' in dict:
            dict = dict['add']
            if 'basket_product_id' in dict:
                basket_product_id = dict['basket_product_id']
                product_name = dict['product_name']
                try:
                    basket_product = BasketProduct.objects.get(
                        id=basket_product_id)
                    basket_product.quantity += 1
                    basket_product.save()
                    message = str(
                        basket_product.quantity
                    ) + ' ' + product_name + '\'s are in your basket.'
                except BasketProduct.DoesNotExist:
                    error = 'Something went wrong adding ' + product_name + '.'
            # adding from shop
            elif 'product_id' in dict:
                try:
                    basket = Basket.objects.get(user=request.user)
                    try:
                        product_id = dict['product_id']
                        product = Product.objects.get(id=product_id)
                        try:
                            basket_product = BasketProduct.objects.get(
                                basket=basket, product=product)
                            basket_product.quantity += 1
                            basket_product.save()
                            message = str(
                                basket_product.quantity
                            ) + ' ' + product.name + '\'s are in your basket.'
                        except BasketProduct.DoesNotExist:
                            BasketProduct.objects.create(basket=basket,
                                                         product=product)
                            message = product.name + ' has been added to your basket.'
                    except Product.DoesNotExist:
                        error = 'Product does not exist.'
                except Basket.DoesNotExist:
                    error = 'User does not have a basket.'
            else:
                error = 'Something went wrong.'
        elif 'remove' in dict:
            basket_product_id = dict['remove']['basket_product_id']
            product_name = dict['remove']['product_name']
            try:
                basket_product = BasketProduct.objects.get(
                    id=basket_product_id)
                basket_product.quantity -= 1
                basket_product.save()
                if basket_product.quantity == 0:
                    basket_product.delete()
                    message = product_name + ' has been completely removed from your basket.'
                else:
                    if basket_product.quantity > 1:
                        message = str(
                            basket_product.quantity
                        ) + ' ' + product_name + '\'s are in your basket.'
                    else:
                        message = str(
                            basket_product.quantity
                        ) + ' ' + product_name + ' is in your basket.'
            except BasketProduct.DoesNotExist:
                error = 'User does not have a basket.'
    elif request.method == 'DELETE':
        basket_product_id = request.GET.get('basket_product_id')
        product_name = request.GET.get('product_name')
        try:
            basket_product = BasketProduct.objects.get(id=basket_product_id)
            basket_product.delete()
            message = product_name + ' has been completely removed from your basket.'
        except BasketProduct.DoesNotExist:
            error = 'Product could not be removed from your basket.'
    if error:
        return HttpResponseServerError(error)
    else:
        return JsonResponse({'message': message})
def main(args):
    line_result = ""
    #decode json
    f = file(args[1])
    query_file = args[2]
    write_file = args[3]
    source = f.read()

    json_object = json.JSONDecoder().decode(source)

    #read query_file
    with open(write_file, 'w') as result_file:
        with open(query_file, 'r') as wordbag:
            for line in wordbag:
                line_list = line.strip().decode("utf8").split(' ')
                line_result = ''

                #whether all

                if json_object["criteria"]["custom"]["pv_all"] == True:
                    line_result += "全量#".decode("utf8")

                #traverse origin
                origin_length = len(json_object["criteria"]["origin"])

                for pos_idx in range(origin_length):
                    csv_pos_idx = json_object["criteria"]["origin"][pos_idx][
                        "csv_pos_idx"]
                    tag = line_list[int(csv_pos_idx)]
                    col_option = json_object["criteria"]["origin"][pos_idx][
                        "csv_col_option"]

                    if tag in col_option:  #json_object["criteria"]["origin"][pos_idx]["csv_col_option"]

                        line_result += tag + "#"

                #traverse combined

                combined_length = len(json_object["criteria"]["combined"])

                for combined_idx in range(combined_length):
                    option_name = json_object["criteria"]["combined"][
                        combined_idx]["csv_option_name"]
                    options_length = len(json_object["criteria"]["combined"]
                                         [combined_idx]["csv_col_options"])

                    for pos_idx in range(options_length):
                        idx_key = json_object["criteria"]["combined"][
                            combined_idx]["csv_col_options"][pos_idx][
                                "csv_pos_idx"]
                        col_option_name = json_object["criteria"]["combined"][
                            combined_idx]["csv_col_options"][pos_idx][
                                "csv_col_option"]

                        judge = 1
                        if line_list[idx_key] == col_option_name:
                            judge = judge * 1
                        else:
                            judge = judge * 0
                            break

                    if judge != 0:
                        line_result += option_name + "#"

                if line_result != "":
                    line_result = line_list[0] + '\t' + line_result
                    result_file.write(line_result[:-1].encode("utf8"))
                    result_file.write("\n")
 def initialize(self):
     self.movie = json.JSONDecoder().decode(self.page)['movie']
import time
import simplejson as json

#ipadd = sys.argv[1]
if len(sys.argv) != 2:
    print "Please enter the ip"
    sys.exit(0)
else:
    try:

        ipadd = sys.argv[1]
        data = os.popen("curl -s http://%s:50060/jmx > ./taskdata.txt" %
                        ipadd).readlines()
        f = file('taskdata.txt')
        source = f.read().strip()
        ddata = json.JSONDecoder().decode(source)
        target = ddata['beans']
        f.close()
    except:
        print "Execution timeout!"
        sys.exit(0)

int_RpcQueueTime_num_ops = target[7]["RpcQueueTime_num_ops"]
float_RpcQueueTime_avg_time = target[7]["RpcQueueTime_avg_time"]
int_RpcProcessingTime_num_ops = target[7]["RpcProcessingTime_num_ops"]
float_RpcProcessingTime_avg_time = target[7]["RpcProcessingTime_avg_time"]
int_getTask_num_ops = target[8]["getTask_num_ops"]
float_getTask_avg_time = target[8]["getTask_avg_time"]
int_getMapCompletionEvents_num_ops = target[8][
    "getMapCompletionEvents_num_ops"]
float_getMapCompletionEvents_avg_time = target[8][
Example #8
0
def getQuantAlphaData(request):
    samples = Sample.objects.all()
    samples.query = pickle.loads(request.session['selected_samples'])
    selected = samples.values_list('sampleid')
    qs1 = Sample.objects.all().filter(sampleid__in=selected)

    if request.is_ajax():
        allJson = request.GET["all"]
        all = simplejson.loads(allJson)

        button = int(all["button"])
        sig_only = int(all["sig_only"])
        norm = int(all["normalize"])

        taxaString = all["taxa"]
        taxaDict = simplejson.JSONDecoder(
            object_pairs_hook=multidict).decode(taxaString)

        metaString = all["meta"]
        metaDict = simplejson.JSONDecoder(
            object_pairs_hook=multidict).decode(metaString)
        metaDF = quantAlphaMetaDF(qs1, metaDict)

        myList = metaDF['sampleid'].tolist()
        mySet = list(set(myList))
        taxaDF = taxaProfileDF(mySet)

        factor = 'none'
        if norm == 1:
            factor = 'none'
        elif norm == 2:
            factor = 'min'
        elif norm == 3:
            factor = '10th percentile'
        elif norm == 4:
            factor = '25th percentile'
        elif norm == 5:
            factor = 'median'

        final_fieldList = []
        for key in metaDict:
            final_fieldList.append(metaDict[key])

        normDF = normalizeAlpha(taxaDF, taxaDict, mySet, factor)
        finalDF = metaDF.merge(normDF, on='sampleid', how='outer')
        finalDF[[
            final_fieldList[0], 'count', 'rel_abund', 'rich', 'diversity'
        ]] = finalDF[[
            final_fieldList[0], 'count', 'rel_abund', 'rich', 'diversity'
        ]].astype(float)
        pd.set_option('display.max_rows', finalDF.shape[0],
                      'display.max_columns', finalDF.shape[1], 'display.width',
                      1000)

        finalDict = {}
        seriesList = []
        xAxisDict = {}
        yAxisDict = {}
        grouped1 = finalDF.groupby(['rank', 'taxa_name', 'taxa_id'])
        for name1, group1 in grouped1:
            dataList = []
            x = []
            y = []
            if button == 1:
                dataList = group1[[final_fieldList[0],
                                   'count']].values.tolist()
                x = group1[final_fieldList[0]].values.tolist()
                y = group1['count'].values.tolist()
            elif button == 2:
                dataList = group1[[final_fieldList[0],
                                   'rel_abund']].values.tolist()
                x = group1[final_fieldList[0]].values.tolist()
                y = group1['rel_abund'].values.tolist()
            elif button == 3:
                dataList = group1[[final_fieldList[0], 'rich']].values.tolist()
                x = group1[final_fieldList[0]].values.tolist()
                y = group1['rich'].values.tolist()
            elif button == 4:
                dataList = group1[[final_fieldList[0],
                                   'diversity']].values.tolist()
                x = group1[final_fieldList[0]].values.tolist()
                y = group1['diversity'].values.tolist()

            if max(x) == min(x):
                stop = 0
            else:
                stop = 1
                slope, intercept, r_value, p_value, std_err = stats.linregress(
                    x, y)
                p_value = "%0.3f" % p_value
                r_square = r_value * r_value
                r_square = "%0.4f" % r_square
                min_y = slope * min(x) + intercept
                max_y = slope * max(x) + intercept
                slope = "%.3E" % slope
                intercept = "%.3E" % intercept

                regrList = []
                regrList.append([min(x), min_y])
                regrList.append([max(x), max_y])

            if sig_only == 0:
                seriesDict = {}
                seriesDict['type'] = 'scatter'
                seriesDict['name'] = name1
                seriesDict['data'] = dataList
                seriesList.append(seriesDict)
                if stop == 0:
                    regDict = {}
                elif stop == 1:
                    regrDict = {}
                    regrDict['type'] = 'line'
                    name2 = list(name1)
                    temp = 'R2: ' + str(r_square) + '; p-value: ' + str(
                        p_value) + '<br>' + '(y = ' + str(
                            slope) + 'x' + ' + ' + str(intercept)
                    print temp
                    name2.append(temp)
                    print name2
                    regrDict['name'] = name2
                    regrDict['data'] = regrList
                    seriesList.append(regrDict)

            if sig_only == 1:
                if p_value <= 0.05:
                    seriesDict = {}
                    seriesDict['type'] = 'scatter'
                    name2 = list(name1)
                    temp = 'R2: ' + str(r_square) + '; p-value: ' + str(
                        p_value) + '<br>' + '(y = ' + str(
                            slope) + 'x' + ' + ' + str(intercept)
                    name2.append(temp)
                    seriesDict['name'] = name2
                    seriesDict['data'] = dataList
                    seriesList.append(seriesDict)

                    regrDict = {}
                    regrDict['type'] = 'line'
                    regrDict['name'] = name1
                    regrDict['data'] = regrList
                    seriesList.append(regrDict)

            xTitle = {}
            xTitle['text'] = final_fieldList[0]
            xAxisDict['title'] = xTitle

            yTitle = {}
            if button == 1:
                yTitle['text'] = 'Sequence Reads'
            elif button == 2:
                yTitle['text'] = 'Relative Abundance'
            elif button == 3:
                yTitle['text'] = 'Species Richness'
            elif button == 4:
                yTitle['text'] = 'Shannon Diversity'
            yAxisDict['title'] = yTitle

        finalDict['series'] = seriesList
        finalDict['xAxis'] = xAxisDict
        finalDict['yAxis'] = yAxisDict
        if not seriesList:
            finalDict['empty'] = 0
        else:
            finalDict['empty'] = 1

        finalDF.reset_index(drop=True, inplace=True)
        res_table = finalDF.to_html(classes="table display")
        res_table = res_table.replace('border="1"', 'border="0"')
        finalDict['res_table'] = str(res_table)

        res = simplejson.dumps(finalDict)
        return HttpResponse(res, content_type='application/json')
Example #9
0
 def __init__(self, url, decoder=None, timeout=60):
     self.decoder = decoder or json.JSONDecoder()
     self.url = url
     self.timeout = timeout
     self.log = self._get_log()
Example #10
0
 def setUp(self):
     self.decoder = json.JSONDecoder()
     self.encoder = json.JSONEncoderForHTML()
     self.non_ascii_encoder = json.JSONEncoderForHTML(ensure_ascii=False)
Example #11
0
def getCatAlphaData(request):
    ### get sample list from cookie
    samples = Sample.objects.all()
    samples.query = pickle.loads(request.session['selected_samples'])
    selected = samples.values_list('sampleid')
    qs1 = Sample.objects.all().filter(sampleid__in=selected)

    if request.is_ajax():
        allJson = request.GET["all"]
        all = simplejson.loads(allJson)
        button = int(all["button"])
        sig_only = int(all["sig_only"])
        norm = int(all["normalize"])
        selectAll = int(all["selectAll"])

        metaString = all["meta"]

        ### function to merge values on common keys
        metaDict = simplejson.JSONDecoder(
            object_pairs_hook=multidict).decode(metaString)

        ### function to create a meta variable DataFrame
        metaDF = catAlphaMetaDF(qs1, metaDict)

        myList = metaDF['sampleid'].tolist()
        mySet = list(set(myList))

        ### function to create a taxa DataFrame
        taxaDF = taxaProfileDF(mySet)

        ### function to merge values on common keys
        taxaString = all["taxa"]
        ### this taxaDict is from the dynatree (ajax call)
        taxaDict = simplejson.JSONDecoder(
            object_pairs_hook=multidict).decode(taxaString)

        # change dict if selectAll levels is on (avoids loading entire tree first)
        if selectAll == 1:
            taxaDict = {}
            qs1 = Profile.objects.all().filter(sampleid__in=mySet).values_list(
                'kingdomid', flat='True').distinct()
            taxaDict['Kingdom'] = qs1
        elif selectAll == 2:
            taxaDict = {}
            qs1 = Profile.objects.all().filter(sampleid__in=mySet).values_list(
                'phylaid', flat='True').distinct()
            taxaDict['Phyla'] = qs1
        elif selectAll == 3:
            taxaDict = {}
            qs1 = Profile.objects.all().filter(sampleid__in=mySet).values_list(
                'classid', flat='True').distinct()
            taxaDict['Class'] = qs1
        elif selectAll == 4:
            taxaDict = {}
            qs1 = Profile.objects.all().filter(sampleid__in=mySet).values_list(
                'orderid', flat='True').distinct()
            taxaDict['Order'] = qs1
        elif selectAll == 5:
            taxaDict = {}
            qs1 = Profile.objects.all().filter(sampleid__in=mySet).values_list(
                'familyid', flat='True').distinct()
            taxaDict['Family'] = qs1
        elif selectAll == 6:
            taxaDict = {}
            qs1 = Profile.objects.all().filter(sampleid__in=mySet).values_list(
                'genusid', flat='True').distinct()
            taxaDict['Genus'] = qs1
        elif selectAll == 7:
            taxaDict = {}
            qs1 = Profile.objects.all().filter(sampleid__in=mySet).values_list(
                'speciesid', flat='True').distinct()
            taxaDict['Species'] = qs1

        factor = 'none'
        if norm == 1:
            factor = 'none'
        elif norm == 2:
            factor = 'min'
        elif norm == 3:
            factor = '10th percentile'
        elif norm == 4:
            factor = '25th percentile'
        elif norm == 5:
            factor = 'median'

        ### function to normalize the number of sequence reads per sample
        normDF = normalizeAlpha(taxaDF, taxaDict, mySet, factor)

        finalDF = metaDF.merge(normDF, on='sampleid', how='outer')
        finalDF[['count', 'rel_abund', 'rich', 'diversity'
                 ]] = finalDF[['count', 'rel_abund', 'rich',
                               'diversity']].astype(float)
        pd.set_option('display.max_rows', finalDF.shape[0],
                      'display.max_columns', finalDF.shape[1], 'display.width',
                      1000)

        final_fieldList = []
        for key in metaDict:
            final_fieldList.append(key)

        finalDict = {}
        result = ""
        seriesList = []
        xAxisDict = {}
        yAxisDict = {}

        ### group DataFrame by each taxa level selected
        grouped1 = finalDF.groupby(['rank', 'taxa_name', 'taxa_id'])
        equal_error = 'no'

        ### group DataFrame by each meta variable selected
        for name1, group1 in grouped1:
            trtList = []
            valList = []
            grouped2 = pd.DataFrame()
            if button == 1:
                grouped2 = group1.groupby(final_fieldList)['count']
            elif button == 2:
                grouped2 = group1.groupby(final_fieldList)['rel_abund']
            elif button == 3:
                grouped2 = group1.groupby(final_fieldList)['rich']
                ### for taxa with only 1 species all values will be '1' and cause an anova error
                if group1['rich'].sum() == group1['rich'].count():
                    equal_error = 'yes'
            elif button == 4:
                grouped2 = group1.groupby(final_fieldList)['diversity']

            for name2, group2 in grouped2:
                if isinstance(name2, unicode):
                    trt = name2
                else:
                    trt = ' & '.join(list(name2))
                trtList.append(trt)
                valList.append(list(group2.T))

            ### One-way ANOVA with some error checking
            D = Anova1way()
            if equal_error == 'no':
                try:
                    D.run(valList, conditions_list=trtList)
                    anova_error = 'no'
                except:
                    D['p'] = 1
                    anova_error = 'yes'
            else:
                D['p'] = 1
                anova_error = 'yes'

            ### select only significant ANOVAs for output (graph & text area)
            if sig_only == 1:
                if D['p'] <= 0.05:
                    result = result + '===============================================\n'
                    result = result + 'Taxa level: ' + str(name1[0]) + '\n'
                    result = result + 'Taxa name: ' + str(name1[1]) + '\n'
                    if button == 1:
                        result = result + 'Dependent Variable: Sequence Reads' + '\n'
                    elif button == 2:
                        result = result + 'Dependent Variable: Relative Abundance' + '\n'
                    elif button == 3:
                        result = result + 'Dependent Variable: Species Richness' + '\n'
                    elif button == 4:
                        result = result + 'Dependent Variable: Shannon Diversity' + '\n'

                    indVar = ' x '.join(final_fieldList)
                    result = result + 'Independent Variable: ' + str(
                        indVar) + '\n'

                    if equal_error == 'yes' or anova_error == 'yes':
                        result = result + 'Analysis cannot be performed...' + '\n'
                    else:
                        result = result + str(D) + '\n'
                    result = result + '===============================================\n'
                    result = result + '\n\n\n\n'

                    dataList = []
                    grouped2 = group1.groupby(final_fieldList).mean()

                    if button == 1:
                        dataList.extend(list(grouped2['count'].T))
                    elif button == 2:
                        dataList.extend(list(grouped2['rel_abund'].T))
                    elif button == 3:
                        dataList.extend(list(grouped2['rich'].T))
                    elif button == 4:
                        dataList.extend(list(grouped2['diversity'].T))

                    seriesDict = {}
                    seriesDict['name'] = name1
                    seriesDict['data'] = dataList
                    seriesList.append(seriesDict)

                    xTitle = {}
                    xTitle['text'] = indVar
                    xAxisDict['title'] = xTitle
                    xAxisDict['categories'] = trtList

                    yTitle = {}
                    if button == 1:
                        yTitle['text'] = 'Sequence Reads'
                    elif button == 2:
                        yTitle['text'] = 'Relative Abundance'
                    elif button == 3:
                        yTitle['text'] = 'Species Richness'
                    elif button == 4:
                        yTitle['text'] = 'Shannon Diversity'
                    yAxisDict['title'] = yTitle

            ### select all ANOVAs for output (graph & text area)
            if sig_only == 0:
                result = result + '===============================================\n'
                result = result + 'Taxa level: ' + str(name1[0]) + '\n'
                result = result + 'Taxa name: ' + str(name1[1]) + '\n'
                if button == 1:
                    result = result + 'Dependent Variable: Sequence Reads' + '\n'
                elif button == 2:
                    result = result + 'Dependent Variable: Relative Abundance' + '\n'
                elif button == 3:
                    result = result + 'Dependent Variable: Species Richness' + '\n'
                elif button == 4:
                    result = result + 'Dependent Variable: Shannon Diversity' + '\n'

                indVar = ' x '.join(final_fieldList)
                result = result + 'Independent Variable: ' + str(indVar) + '\n'

                if equal_error == 'yes' or anova_error == 'yes':
                    result = result + 'Analysis cannot be performed...' + '\n'
                else:
                    result = result + str(D) + '\n'
                result = result + '===============================================\n'
                result = result + '\n\n\n\n'

                dataList = []
                grouped2 = group1.groupby(final_fieldList).mean()
                if button == 1:
                    dataList.extend(list(grouped2['count'].T))
                elif button == 2:
                    dataList.extend(list(grouped2['rel_abund'].T))
                elif button == 3:
                    dataList.extend(list(grouped2['rich'].T))
                elif button == 4:
                    dataList.extend(list(grouped2['diversity'].T))

                seriesDict = {}
                seriesDict['name'] = name1
                seriesDict['data'] = dataList
                seriesList.append(seriesDict)

                xTitle = {}
                xTitle['text'] = indVar
                xAxisDict['title'] = xTitle
                xAxisDict['categories'] = trtList

                yTitle = {}
                if button == 1:
                    yTitle['text'] = 'Sequence Reads'
                elif button == 2:
                    yTitle['text'] = 'Relative Abundance'
                elif button == 3:
                    yTitle['text'] = 'Species Richness'
                elif button == 4:
                    yTitle['text'] = 'Shannon Diversity'
                yAxisDict['title'] = yTitle

        finalDict['series'] = seriesList
        finalDict['xAxis'] = xAxisDict
        finalDict['yAxis'] = yAxisDict
        finalDict['text'] = result
        if not seriesList:
            finalDict['empty'] = 0
        else:
            finalDict['empty'] = 1

        finalDF.reset_index(drop=True, inplace=True)
        res_table = finalDF.to_html(classes="table display")
        res_table = res_table.replace('border="1"', 'border="0"')
        finalDict['res_table'] = str(res_table)

        res = simplejson.dumps(finalDict)
        return HttpResponse(res, content_type='application/json')
def grabJSON(s):
    decoder = simplejson.JSONDecoder()
    obj, end = decoder.raw_decode(s)
    end = WHITESPACE.match(s, end).end()
    return obj, s[end:]
Example #13
0
#  gets more complex data,
# creates a sparse array (implemented as a dictionary of dictionaries) representing
# how many times user in row i has @ mentioned user in col j in all tweets
# note, use mentionsArrayFormat.py to read the output pickle from this code and write
# to a CSV for MATLAB.  File will be large... O_o
import re, cPickle
import urllib2
import simplejson as json

# infochimps stuff
API_KEY = 'akilzer-UIqIhWBpM0zto8Uw9QW0uaM7569'
CMDSTR = r'http://api.infochimps.com/social/network/tw/util/map_id?apikey=%s&screen_name=%s'
json_d = json.JSONDecoder()

# parsing stuff
pattern = re.compile(
    r'(\d+)\s(\d+)\s(.*)(20\d\d-\d\d-\d\d\s\d\d:\d\d:\d\d)\s*')
retweetPattern = re.compile(r'RT\s@(\w+)(.*)')
mentionPattern = re.compile('@(\w+).*')

# open the uid mapping
mf = open('mapping.pickle', 'rb')
mapping = cPickle.load(mf)
mf.close()

#f = open('training_tweets_time.txt', 'r+')
f = open('test_tweets_time.txt', 'r+')

mentionArray = {}

for line in f.readlines():
Example #14
0
import simplejson as json
f = open('api.json', 'r')
source = f.read()
target = json.JSONDecoder().decode(source)
print(target)
Example #15
0
def main(request, response):
    import simplejson as json
    f = file('config.json')
    source = f.read()
    s = json.JSONDecoder().decode(source)
    url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
    url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
    response.headers.set("Content-Security-Policy",
                         "media-src *; script-src 'self' 'unsafe-inline'")
    response.headers.set("X-Content-Security-Policy",
                         "media-src *; script-src 'self' 'unsafe-inline'")
    response.headers.set("X-WebKit-CSP",
                         "media-src *; script-src 'self' 'unsafe-inline'")
    return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.

Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:

* Redistributions of works must retain the original copyright notice, this list
  of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
  this list of conditions and the following disclaimer in the documentation
  and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
  may be used to endorse or promote products derived from this work without
  specific prior written permission.

THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

Authors:
        Zhang, Zhiqiang <*****@*****.**>

-->

<html>
  <head>
    <title>CSP Test: csp_media-src_asterisk_audio_allowed_ext</title>
    <link rel="author" title="Intel" href="http://www.intel.com"/>
    <link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#media-src"/>
    <meta name="flags" content=""/>
    <meta name="assert" content="media-src *; script-src 'self' 'unsafe-inline'"/>
    <meta charset="utf-8"/>
    <script src="../resources/testharness.js"></script>
    <script src="../resources/testharnessreport.js"></script>
  </head>
  <body>
    <div id="log"></div>
    <audio id="m"></audio>
    <script>
        var t = async_test(document.title);
        var m = document.getElementById("m");
        m.src = '""" + url1 + """/csp/support/khronos/red-green.theora.ogv';
Example #16
0
import sys
import os
import signal

import boto3
import botocore.exceptions  # $#!+, not easily accessible via boto3

import web
import web.httpserver

try:
    import simplejson as json
except ImportError:
    import json
_json_dec = json.JSONDecoder().decode

# === boto client setup
sessions = {}


def clt(n, region_name):
    s = sessions.get(region_name)
    if not s:
        s = boto3.session.Session(region_name=region_name)
        sessions[region_name] = s
    return s.client(n)


# === signal handlers
def sigchld(s, f):
Example #17
0
 def load_json(cls, text):
     return json.JSONDecoder(object_pairs_hook=multidict).decode(text)
Example #18
0
def read_file(filename):
    with open(filename, 'r') as fp:
        content = fp.read()
    d = json.JSONDecoder().decode(content)
    return d
Example #19
0
    url_error = False
    input = ""
    try:
        input = urllib.urlopen(url)
        
    except IOError, i:
        print "ERROR in executeSagexAPIJSONCall: Unable to connect to SageTV server"
        xbmc.executebuiltin('WakeOnLan(%s)'% sage_mac)
        xbmc.sleep(15000)
        url_error = True
        
    if url_error:
      input = urllib.urlopen(url)
      
    fileData = input.read()
    resp = unicodeToStr(json.JSONDecoder().decode(fileData))

    objKeys = resp.keys()
    numKeys = len(objKeys)
    if(numKeys == 1):
        return resp.get(resultToGet)    

    else:
        return None

def unicodeToStr(obj):
    t = obj
    if(t is unicode):
        return obj.encode(DEFAULT_CHARSET)
    elif(t is list):
        for i in range(0, len(obj)):
Example #20
0
    RESTFULQUERY=url
    res=requests.delete(url, headers=headers, verify=False)
    if (res.text != ""):
        print("E: Deletion failed with:")
        print(res.text.replace('"','').replace('{','').replace('}',''))

if (res.status_code < 200 or res.status_code > 299):
    print ("E: Non 200 status code returned: " + str(res.status_code))

## IF DEBUG IS ON, PRETTY PRINT WHAT WAS SENT AND WHAT WAS RECEIVED
if (DEBUG != ""):
    print ("SENT HEADERS")
    print (pp_json(headers))
    print ("SENT TEXT")
    print (RESTFULQUERY)
    print ("")
    if (CREATEVM != ""):
        print ("EXTENDED JSON SENT FOR ADD")
        print (pp_json(CREATEVM))
    print "RETURNED HEADERS"
    ## COMPLEXITY HERE FIXES SINGLE QUOTES RATHER THAN DOUBLES
    print (pp_json(str(res.headers).replace("'",'"')))
    print "RETURNED TEXT"
    ## COMPLEXITY HERE FIXES u PREPRENDING SO PARSING WORKS
    if (res.text != ""):
        print (pp_json(json.JSONDecoder().decode(res.text)))
    else:
        print ("")
    print "RETURNED STATUS CODE"
    print (res.status_code)
Example #21
0
 def __init__(self, config):
     """Constructor. Requires json config object."""
     self.config = json.JSONDecoder().decode(config)
     self.stats = {}
     (self.dist, _) = utils.os_release()
Example #22
0
 def load_json(cls, text):
     data = json.loads(text)
     if "legend" in data:
         # riak cs before v2.1 had duplicate keys
         data = json.JSONDecoder(object_pairs_hook=multidict).decode(text)
     return data
Example #23
0
    # Python 2
    from urllib import unquote as parse_unquote

    def unquote_qs(atom, encoding, errors='strict'):
        return parse_unquote(atom.replace('+', ' ')).decode(encoding, errors)


# 2012/05 textextractor
__json_indent = None
if hasattr(builtins, 'json_indent'):
    __json_indent = builtins.json_indent

try:
    # Prefer simplejson, which is usually more advanced than the builtin module.
    import simplejson as json
    json_decode = json.JSONDecoder().decode
    json_encode = json.JSONEncoder(indent=__json_indent).iterencode
except ImportError:
    if py3k:
        # Python 3.0: json is part of the standard library,
        # but outputs unicode. We need bytes.
        import json
        json_decode = json.JSONDecoder().decode
        _json_encode = json.JSONEncoder(indent=__json_indent).iterencode

        def json_encode(value):
            for chunk in _json_encode(value):
                yield chunk.encode('utf8')
    elif sys.version_info >= (2, 6):
        # Python 2.6: json is part of the standard library
        import json
Example #24
0
 def get_json(self, *args, **kwargs):
     response = self.get(*args, **kwargs)
     return simplejson.JSONDecoder().decode(response.content)
Example #25
0
def json_iter_parse(response_text):
    decoder = json.JSONDecoder(strict=False)
    idx = 0
    while idx < len(response_text):
        obj, idx = decoder.raw_decode(response_text, idx)
        yield obj
Example #26
0
def decode(str):
    return json.JSONDecoder().decode(curl(str))
Example #27
0
def analyze_capture(file, dfile):
    prev_data = json.JSONDecoder(object_pairs_hook=OrderedDict).decode(file.readline())

    prev_acceleration_data = prev_data
    prev_altitude_data = prev_data
    prev_phi_data = prev_data

    downrange_distance = 0
    phi = pi / 2
    acceleration = 0
    velocity_r = 0
    velocity_x = 0
    F = 0

    stage_sep = False

    global S1_dry_mass
    global S1_prop_mass
    global S2_dry_mass
    global S2_prop_mass
    global MAX_THRUST

    mass = S1_dry_mass + S1_prop_mass + S2_dry_mass + S2_prop_mass

    uid = 0

    for line in file:
        data = json.JSONDecoder(object_pairs_hook=OrderedDict).decode(line)

        if data['time'] - prev_acceleration_data['time'] >= ACCELERATION_INTERVAL:
            acceleration = calc_acceleration(prev_acceleration_data, data)

            if acceleration < -2:
                stage_sep = True

            F = calc_thrust(Fg, Fd, acceleration, phi, mass)

            if not stage_sep:
                S1_prop_mass -= delta_mass(prev_acceleration_data, data, F, 311)
                mass = S1_dry_mass + S1_prop_mass + S2_dry_mass + S2_prop_mass
                MAX_THRUST =  7.607*10**6
            else:
                S2_prop_mass -= delta_mass(prev_acceleration_data, data, F, 348)
                mass = S2_dry_mass + S2_prop_mass
                MAX_THRUST = 9.34*10**5

            prev_acceleration_data = data




        if data['time'] - prev_altitude_data['time'] >= VELOCITY_INTERVAL:
            Vx, Vr = calc_velocity_components(prev_altitude_data, data)

            if Vx is not None:
                velocity_x = Vx
                velocity_r = Vr

            if data['time'] >= 30 and check_angle(prev_phi_data, data, phi, asin(velocity_r / data['velocity'])) and data['time'] > ANGLE_INTERVAL:
                phi = asin(velocity_r / data['velocity'])
                prev_phi_data = data
            else:
                prev_phi_data = data

            downrange_distance += calc_x_distance(prev_altitude_data, data, velocity_x)
            prev_altitude_data = data


        PE = calc_PE(data) - PE_0
        KE_x = velocity_x ** 2 / 2
        KE_r = velocity_r ** 2 / 2

        Fd = calc_drag(data)
        Fg = mass * calc_g(data)

        centripetal_acceleration = calc_centripetal_acceleration(data, velocity_x)
        g_force = acceleration + (Fg/mass)*sin(phi)


        dict_data = OrderedDict([
            ('time', round_to_n_digits(data['time'], 2)),
            ('velocity', round_to_n_digits(data['velocity'], 2)),
            ('altitude', round_to_n_digits(data['altitude'], 2)),
            ('acceleration', round_to_n_digits(acceleration, 2)),
            ('force', 100 * round_to_n_digits(F, 2) / MAX_THRUST),
            ('s1_mass', round_to_n_digits(S1_prop_mass, 2)),
            ('s2_mass', round_to_n_digits(S2_prop_mass, 2)),
            ('vertical_velocity', round_to_n_digits(velocity_r, 2)),
            ('horizontal_velocity', round_to_n_digits(velocity_x, 2)),
            ('angle', round_to_n_digits(degrees(phi), 2)),
            ('downrange_distance', downrange_distance),
            ('gravity', round_to_n_digits(Fg, 2) / mass),
            ('g_force', round_to_n_digits(g_force, 2)),
            ('drag', round_to_n_digits(Fd, 2) / mass),
            ('pe', round_to_n_digits(PE, 2)),
            ('horizontal_ke', round_to_n_digits(KE_x, 2)),
            ('vertical_ke', round_to_n_digits(KE_r, 2)),
            ('ce', round_to_n_digits(centripetal_acceleration, 2)),
        ])

        #print(json.dumps(OrderedDict(
        #    [('uid', uid), ('timestamp', datetime.datetime.utcnow().isoformat() + 'Z'), ('data', dict_data)])))

        dfile.write(json.dumps(dict_data, dfile) + '\n')

        prev_data = data
        uid += 2
Example #28
0
 def setUp(self):
     self.decoder = json.JSONDecoder()
     self.encoder = json.JSONEncoderForHTML()
Example #29
0
import uuid
import binascii
from galaxy.util.bunch import Bunch
from galaxy.util.aliaspickler import AliasPickleModule

# For monkeypatching BIGINT
import sqlalchemy.dialects.sqlite
import sqlalchemy.dialects.postgresql
import sqlalchemy.dialects.mysql

import logging
log = logging.getLogger( __name__ )

# Default JSON encoder and decoder
json_encoder = simplejson.JSONEncoder( sort_keys=True )
json_decoder = simplejson.JSONDecoder( )

def _sniffnfix_pg9_hex(value):
    """
    Sniff for and fix postgres 9 hex decoding issue
    """
    try:
        if value[0] == 'x':
            return binascii.unhexlify(value[1:])
        else:
            return value
    except Exception, ex:
        return value

class JSONType( TypeDecorator ):
    """
Example #30
0
#
# Full docs at: https://docs.coinkite.com/
# 
# Copyright (C) 2014 Coinkite Inc. (https://coinkite.com) ... See LICENSE.md
# 
#
import logging
from objs import make_db_object
from decimal import Decimal

try:
    # We prefer simple json.
    import simplejson

    json_encoder = simplejson.JSONEncoder(use_decimal=True, for_json=True)
    json_decoder = simplejson.JSONDecoder(object_hook=make_db_object, parse_float=Decimal)

except ImportError:
    # We need Decimal to be encoded corrected both for read and write! Not simple.
    import json
    json_decoder = json.JSONDecoder(object_hook=make_db_object, parse_float=Decimal)

    # Lessons learned from http://stackoverflow.com/questions/1960516
    # - the provided API is not suffient to do Decimal in place of float
    # - monkey patching is too version sensitive
    # - the code in json.encoding is too heavy-duty to patch
    # - it can't be done, just use SimpleJSON
    #
    class DecimalEncoder(json.JSONEncoder):

        def default(self, o):