Example #1
0
def JsonHandler():
    if request.is_json:
        content = request.get_json()
        logging.info(content['event_name'])
        # Handle project changes
        if content['event_name'] == "project_create":
            logging.info('Project created. ' + content['name'])
            your_call = call("sh ./sleep.sh", shell=True)
            logging.info('Teste: ' + str(your_call))
            return "OK"
        elif content['event_name'] == "push":
            logging.info('Project push. ' + content['name'])
            return "OK"
        elif content['event_name'] == "project_destroy":
            logging.info('Project destoried. ' + content['name'])
            return "OK"
        elif content['event_name'] == "project_rename":
            logging.info('Project renamed de ' +
                         content['old_path_with_namespace'] + ' para ' +
                         content['name'])
            return "OK"
        elif content['event_name'] == "project_transfer":
            logging.info('Project {0} transferd')
            return "OK"
        else:
            logging.info(content)
            return "Ok"
    else:
        logging.inf("else")
        return "JSON Only"
Example #2
0
async def select(sql, args, size=None):
    log(sql, args)
    global __pool
    async with __pool.get() as conn:
        async with conn.cursor(aiomysql.DictCursor) as cur:
            await cur.execute(sql.replace('?', '%s'), args or ())
            if size:
                rs = await cur.fetchmany(size)
            else:
                rs = await cur.fetchall()
        logging.inf('rows returned: %s' % len(rs))
        return rs
async def select(sql,args,size=None):
    log(sql,args)
    global __pool
    async with __pool.get as conn:
        async with conn.cursor(aiomysql.DictCursor) as cur:
            await cur.excute(sql.replace('?','%s'),args or())
            if size:
                rs = await cur.fetchmany(size)
            else:
                rs = await cur.fetchall()
        logging.inf('rows returned: %s' % len(rs))
        return rs
Example #4
0
def create_tables():
    """ Create all four tables for the Content Safety demonstration.
        This is the schema for all the tables.
    """
    inf("Creating tables")
    
    pinners = Table('pinners', metadata,
        Column('pinner_id', Integer, primary_key=True),
        Column('name', String(40)),
        Column('email', String(40))
    )
    pinners.create()
    
    contents = Table('contents', metadata,
        Column('content_id', Integer, primary_key=True),
        Column('url', String(80)),
        Column('display_status', String(20)), # good, objectionable, copyright
        Column('pinner_id', Integer, ForeignKey('pinners.pinner_id'))
    )
    contents.create()

    reviewers = Table('reviewers', metadata,
        Column('reviewer_id', Integer, primary_key=True),
        Column('name', String(40)),
        Column('email', String(40))
    )
    reviewers.create()

    complaints = Table('complaints', metadata,
        Column('complaint_id', Integer, primary_key=True),
        Column('complaint_timestamp', DateTime), # when the complaint was filed
        Column('complaint_type', String(80)), # objectionable, copyright
        Column('process_status', String(20)), # complaint, review, done
        Column('display_status', String(20)), # good, objectionable, copyright
        Column('review_timestamp', DateTime), # when the compliant was resolved
        Column('pinner_id', Integer, ForeignKey('pinners.pinner_id')),
        Column('reviewer_id', Integer, ForeignKey('reviewers.reviewer_id')),
        Column('content_id', Integer, ForeignKey('contents.content_id'))
    )
    complaints.create()
Example #5
0
def load_tables():
    """ Load the content and pinners tables
    """
    inf("Loading tables")
    
    m_url = 'https://s3-us-west-2.amazonaws.com/boliek-public/animals/'
    
    pinners = Table('pinners', metadata, autoload=True)
    i = pinners.insert()
    i.execute({'name': 'Mary', 'email': '*****@*****.**'},
              {'name': 'John', 'email': '*****@*****.**'},
              {'name': 'Susan', 'email': '*****@*****.**'},
              {'name': 'Carl', 'email': '*****@*****.**'}
    )

    contents = Table('contents', metadata, autoload=True)
    i = contents.insert()
    i.execute({'url': m_url + 'cat0.jpg', 'display_status': 'good', 'pinner_id': 4},
              {'url': m_url + 'cat1.jpg', 'display_status': 'good', 'pinner_id': 4},
              {'url': m_url + 'cat2.jpg', 'display_status': 'good', 'pinner_id': 1},
              {'url': m_url + 'cat3.jpg', 'display_status': 'good', 'pinner_id': 4},
              {'url': m_url + 'dog0.jpg', 'display_status': 'good', 'pinner_id': 1},
              {'url': m_url + 'dog1.jpg', 'display_status': 'good', 'pinner_id': 1},
              {'url': m_url + 'dog2.jpg', 'display_status': 'good', 'pinner_id': 2},
              {'url': m_url + 'dog3.jpg', 'display_status': 'good', 'pinner_id': 2},
              {'url': m_url + 'reptile0.jpg', 'display_status': 'good', 'pinner_id': 1},
              {'url': m_url + 'reptile1.jpg', 'display_status': 'good', 'pinner_id': 2},
              {'url': m_url + 'reptile2.jpg', 'display_status': 'good', 'pinner_id': 3},
              {'url': m_url + 'reptile3.jpg', 'display_status': 'good', 'pinner_id': 3}
    )

    reviewers = Table('reviewers', metadata, autoload=True)
    i = reviewers.insert()
    i.execute({'name': 'Alice', 'email': '*****@*****.**'},
              {'name': 'Bob', 'email': '*****@*****.**'},
              {'name': 'Carol', 'email': '*****@*****.**'}
    )
 def __new(cls,name,bases,attrs):
     if name=='Model':
         return type.__new__(cls,name,bases,attrs)
     tableName = atrs.get('__table__',None) or name
     logging.info('found model: %s (tables:%s)' % (name,tableName))
     mappings = dict()
     fields = []
     primaryKey = None
     for k,v in attrs.items():
         if isinstance(v,Field):
             logging.inf(' found mapping:%s ==> %s' %(k,v))
             mappings[k] = v
          if v.primary_key:
                 if primaryKey:
                     raise StandarError('Duplicate primary key for field: %s' % k)
                     primaryKey = k
                 else:
                     fields.appedn(k)
     if not primaryKey:
         raise StandardError('Primary key not found.')
     for k in mappings.keys():
         attrs.pop(k)
     escaped_fields = list(map(lambda f: '`%s`' % f, fields))
             escaped_fields = list(map(lambda f: '`%s`' % f, fields))
Example #7
0
def unbalance_emai(y,
                   xmat,
                   zmat,
                   kin,
                   init=None,
                   maxiter=30,
                   cc_par=1.0e-8,
                   cc_gra=1.0e-6,
                   em_weight_step=0.001):
    num_var_pos = 1
    for i in range(len(zmat)):
        num_var_pos += len(zmat[i])
    y_var = np.var(y) / num_var_pos
    num_record = y.shape[0]
    var_com = []
    eff_ind = [[0, xmat.shape[1]]]  # the index for all effects [start end]
    zmat_con_lst = []  # combined random matrix
    cov_dim = []  # the dim for covariance matrix
    vari = []
    varij = []
    varik = []
    i = 0
    for i in range(len(zmat)):
        temp = [eff_ind[i][-1]]
        zmat_con_lst.append(hstack(zmat[i]))
        cov_dim.append(len(zmat[i]))
        for j in range(len(zmat[i])):
            temp.append(temp[-1] + zmat[i][j].shape[1])
            for k in range(j + 1):
                vari.append(i + 1)
                varij.append(j + 1)
                varik.append(k + 1)
                if j == k:
                    var_com.append(y_var)
                else:
                    var_com.append(0.0)
        eff_ind.append(temp)
    var_com.append(y_var)
    vari.append(i + 2)
    varij.append(1)
    varik.append(1)
    if init is None:
        var_com = np.array(var_com)
    else:
        if len(var_com) != len(init):
            logging.info('ERROR: The length of initial variances should be' +
                         len(var_com))
            exit()
        else:
            var_com = np.array(init)
    var_com_update = np.array(var_com)
    logging.info('***prepare the MME**')
    zmat_con = hstack(zmat_con_lst)  # design matrix for random effects
    wmat = hstack([xmat, zmat_con])  # merged design matrix
    cmat_pure = np.dot(wmat.T, wmat)  # C matrix
    rhs_pure = wmat.T.dot(y)  # right hand
    # em weight vector
    if em_weight_step <= 0.0 or em_weight_step > 1.0:
        logging.info(
            'ERROR: The em weight step should be between 0 (not include) and 1 (include)'
        )
        exit()
    iter_count = 0
    cc_par_val = 1000.0
    cc_gra_val = 1000.0
    delta = 1000.0
    logging.info("initial variances: " +
                 ' '.join(np.array(var_com, dtype=str)))
    covi_mat = pre_covi_mat(cov_dim, var_com)
    if covi_mat is None:
        logging.inf(
            "ERROR: Initial variances is not positive define, please check!")
        exit()
    while iter_count < maxiter:
        iter_count += 1
        logging.info('***Start the iteration: ' + str(iter_count) + ' ***')
        logging.info("Prepare the coefficient matrix")
        cmat = (cmat_pure.multiply(1.0 / var_com[-1])).toarray()
        for i in range(len(cov_dim)):
            if isspmatrix(kin[i]):
                temp = sparse.kron(covi_mat[i], kin[i])
                temp = temp.toarray()
            else:
                temp = linalg.kron(covi_mat[i], kin[i])
            cmat[eff_ind[i + 1][0]:eff_ind[i + 1][-1], \
            eff_ind[i + 1][0]:eff_ind[i + 1][-1]] = \
                np.add(cmat[eff_ind[i + 1][0]:eff_ind[i + 1][-1], \
                       eff_ind[i + 1][0]:eff_ind[i + 1][-1]], temp)
        rhs_mat = np.divide(rhs_pure, var_com[-1])
        cmati = linalg.inv(cmat)
        eff = np.dot(cmati, rhs_mat)
        e = y - xmat.dot(eff[:eff_ind[0][1], :]) - zmat_con.dot(
            eff[eff_ind[0][1]:, :])
        # first-order derivative
        fd_mat = pre_fd_mat_x(cmati, kin, covi_mat, eff, eff_ind, e, cov_dim,
                              zmat_con_lst, wmat, num_record, var_com)
        # AI matrix
        ai_mat = pre_ai_mat(cmati, covi_mat, eff, eff_ind, e, cov_dim,
                            zmat_con_lst, wmat, var_com)
        # EM matrix
        em_mat = pre_em_mat(cov_dim, zmat_con_lst, num_record, var_com)
        # Increase em weight to guarantee variances positive
        gamma = -em_weight_step
        while gamma < 1.0:
            gamma = gamma + em_weight_step
            if gamma >= 1.0:
                gamma = 1.0
            wemai_mat = (1 - gamma) * ai_mat + gamma * em_mat
            delta = np.dot(linalg.inv(wemai_mat), fd_mat)
            var_com_update = var_com + delta
            covi_mat = pre_covi_mat(cov_dim, var_com_update)
            if covi_mat is not None:
                logging.info('EM weight value: ' + str(gamma))
                break
        logging.info('Updated variances: ' +
                     ' '.join(np.array(var_com_update, dtype=str)))
        if covi_mat is None:
            logging.info("ERROR: Updated variances is not positive define!")
            exit()
        # Convergence criteria
        cc_par_val = np.sum(pow(delta, 2)) / np.sum(pow(var_com_update, 2))
        cc_par_val = np.sqrt(cc_par_val)
        cc_gra_val = np.sqrt(np.sum(pow(fd_mat, 2))) / len(var_com)
        var_com = var_com_update.copy()
        logging.info("Change in parameters, Norm of gradient vector: " +
                     str(cc_par_val) + ', ' + str(cc_gra_val))
        if cc_par_val < cc_par and cc_gra_val < cc_gra:
            break
    if cc_par_val < cc_par and cc_gra_val < cc_gra:
        logging.info("Variances Converged")
    else:
        logging.info("Variances not Converged")
    var_pd = {'vari': vari, "varij": varij, "varik": varik, "var_val": var_com}
    var_pd = pd.DataFrame(var_pd,
                          columns=['vari', "varij", "varik", "var_val"])
    return var_pd
Example #8
0
    print("complaint_id {}".format(complaint['complaint_id']))

    # get the value from the db
    rs = complaints.select().where(complaints.c.complaint_id == complaint['complaint_id'])
    rrs = rs.execute()
    for r in rrs:
        rcomplaint = dict(r)
    print("length {}, value {}".format(len(rcomplaint), rcomplaint))
    
    # now update the value
    rs = complaints.update().where(complaints.c.complaint_id == complaint['complaint_id']).\
                    values(reviewer_id=2, process_status='done')
    rs.execute()
    
    rs = complaints.select()
    run(rs, text='complaints')
    
    # now delete all entries
    rs =  complaints.delete()
    rs.execute()
    
    rs = complaints.select()
    run(rs, text='complaints')

    
# ------------------------------------------------------------------------------
if __name__ == '__main__':
    inf("-- db.py --")
    test_db()
    add_complaint()
    
Example #9
0
    if waterData:
        Oxgen = {"Ox": waterData['Ox']}
        Watertem = {"watertem": waterData['watertem']}
        ph = {"PH": waterData['PH']}
        nh4 = round(random.uniform(0, 0.6), 1)
        Nh4 = {"NH4": nh4}

        send_date(SEND_TOKEN['Oxgen'], Oxgen)
        send_date(SEND_TOKEN['WaterTemperature'], Watertem)
        send_date(SEND_TOKEN['PH'], ph)
        send_date(SEND_TOKEN['NH4'], Nh4)
    else:
        logging.warning("Water sensors error !!")


if __name__ == "__main__":

    time.sleep(3)
    try:
        while True:
            data()
            logging.info("send success sensor data.")
            print(
                '--------------------------------------------------------------'
            )
            time.sleep(15)

    except KeyboardInterrupt as e:
        logging.inf("over")