コード例 #1
0
ファイル: __main__.py プロジェクト: dsw/proquint
def main():
    desc = 'Convert between [integer, hexadecimal, IPv4 address] <-> proquint representations. '\
        'See https://arxiv.org/html/0901.4016'

    parser = argparse.ArgumentParser(description=desc)
    parser.add_argument('-n', '--uint', action='store_true',
                        help='convert from proquint to 32-bit integer', required=False)
    parser.add_argument('-x', '--hex', action='store_true',
                        help='convert from proquint to hexadecimal', required=False)
    parser.add_argument('-i', '--ip', action='store_true',
                        help='convert from proquint to IPv4', required=False)
    parser.add_argument('val', nargs='?', type=str, default=None,
                        help='value to convert (if not specified, ' \
                        'IP address of the current host is printed)')

    args = parser.parse_args()

    target = None
    if args.uint:
        target = 'uint'
    elif args.hex:
        target = 'hex'
    elif args.ip:
        target = 'ip'

    try:
        if args.val is None:
            res = convert(get_my_ip())
        else:
            res = convert(args.val, target)
    except ValueError as e:
        print('{}'.format(str(e)))
    else:
        print('{}'.format(res))
コード例 #2
0
ファイル: celery_task.py プロジェクト: rtx3/deyun.io
def db_statistics_sync():
    result = []
    data = convert(redisapi.hgetall(name='sitestatus'))
    if not data:
        logger.warning('no site status data in redis cache')
        return {'failed': 'no site status data in redis cache'}
    try:
        state = Statistics(
            system_capacity=data['system_capacity'],
            managed_nodes=data['managed_nodes'],
            system_utilization=convert(redisapi.hgetall(
                name='sitestatus')).get('system_utilization', ''),
            user_count=data['user_count'],
            registered_master=data['registered_master'],
            total_task=data['total_task'],
            service_level=convert(redisapi.hgetall(
                name='sitestatus')).get('service_level', ''),
            uptime=data['uptime'],
            page_visit_count=data.get('page_visit_count',0),
            api_visit_count=data.get('api_visit_count',0)
        )
        db.session.add(state)
        db.session.commit()
        result.append(state)
    except Exception as e:
        logger.warning('error in creating data in statistics :', e)
        logger.exception(e)
        return {'failed': e}
    logger.info('Completed in writing data to statistics' + str(result))
    return {'successed': 'db_statistics_sync'}
コード例 #3
0
ファイル: celery_task.py プロジェクト: rtx3/Salt-MWDS
def statistics_sync():
    result = []
    data = convert(redisapi.hgetall(name='sitestatus'))
    if not data:
        logger.warning('no site status data in redis cache')
        return {'failed': 'no site status data in redis cache'}
    try:
        state = Statistics(
            system_capacity = data['system_capacity'],
            managed_nodes = data['managed_nodes'],
            system_utilization = convert(redisapi.hgetall(name='sitestatus')).get('system_utilization',''),
            user_count = data['user_count'],
            registered_master = data['registered_master'],
            total_task = data['total_task'],
            service_level = convert(redisapi.hgetall(name='sitestatus')).get('service_level',''),
            uptime = data['uptime'],
            page_visit_count = data['page_visit_count'],
            api_visit_count = data['api_visit_count']
            )
        session.add(state)
        session.commit()
        result.append(state)
    except Exception as e:
        logger.warning('error in creating data in statistics')
        return {'failed': e}
    logger.info('Completed in writing data to statistics' + str(result))
    return {'successed': result}
コード例 #4
0
def exp6():
	'''
	stream.sh generates streams for set of episoed
	For each ep in eval03
	1. rescore wrt interpolated LM
	2. convert 1bh (.mlf) to .dat file -- DIY
	3. Get perplexities for all 5 LM{1..5} -- to use for interpolated_LM (EM) to get new interpolation weights
	4. RESCORE LATTICE -- to get 1 best hypothesis
	'''
	#dev03
	#get_plex('lm_int_dev03','stream_lm_intdev03', 'lib/texts/dev03.dat')
	episode_list = 'eval03'
	episodes = open('lib/testlists/'+ episode_list+'.lst', 'r')
	episodes = episodes.readlines()
	episodes = [e.strip() for e in episodes]

	save_dir = 'plp/plp-ilm'

	for e in episodes:
		#step 1
		htk.lm_rescore(e, 'lm_int_dev03', save_dir)	
		#step 2
		dat_file = 'dat_files/sentences.dat'
		mlf_file = save_dir + '/'+ e + '/rescore/rescore.mlf' #plp-ilm/eval03_DEV011-20010206-XX1830/rescore
		utl.convert(mlf_file, dat_file)
		#step 3
		lm_dir = 'interpolated_lms/lm_' + episode_list
		utl.LM_interpolation(dat_file, lm_dir)
		#step 4
		output = 'lm_rescore/rescore_' + episode_list
		htk.lm_rescore(e, lm_dir, output)
	
	#get WER
	htk.score(output, episode_list, 'rescore')
コード例 #5
0
def converter(amount, input_currency, output_currency):
    if output_currency is not None:
        results = convert(amount, input_currency, output_currency)
    else:
        results = convert(amount, input_currency)

    click.echo(json.dumps(results, indent=3))
コード例 #6
0
    def convert(self, file, ext="mp3"):
        file_ext = file.split('.')[-1]
        saveas = (file.replace(file_ext, ext)).replace(self.remote, self.local)
        local_name = self.to_local(file)
        file_name = file.replace(self.remote, '')
        status = False
        start_time = timestamp()

        # if exists check for size
        if os.path.exists(file_name):
            orig_size, comp_size, status = is_incomplete(file_name, file, 8)
            if not status:  # not incomplete
                pass
            else:
                print(
                    f'AC/CS {orig_size//1024**2}MB/{comp_size//1024**2}MB -> ',
                    self.local)
                os.unlink(local_file)

        if self.count:
            self.counter()

        # print('converting', self.shorten_name(file))
        convert(file_name=file, saveas=saveas, to=ext)
        end_time = timestamp()

        # increase compresed files value
        with self.value.get_lock():
            self.value.value += 1

        print(self._time_taken(start_time, end_time), 'Converted\t',
              file_name.replace(self.remote, ''))
コード例 #7
0
def converter(args):

    amount = args["amount"]
    input_currency = args["input_currency"]
    if args.get("output_currency"):
        output_currency = args["output_currency"]
        results = convert(amount, input_currency, output_currency)
    else:
        results = convert(amount, input_currency)

    return jsonify(results)
コード例 #8
0
ファイル: celery_task.py プロジェクト: rtx3/deyun.io
def salttoken():
    try:
        if redisapi.hexists(name='salt', key='token'):
            if (time.time() - float(bytes.decode(redisapi.hget(name='salt', key='expire')))) < 0.0:
                ret = convert(redisapi.hget(name='salt', key='token'))
                return convert(ret)
            else:
                return saltlogin(saltapi.login(user, pawd, 'pam'))
        else:
            return saltlogin(saltapi.login(user, pawd, 'pam'))
    except Exception as e:
        return {'falid': e}
コード例 #9
0
ファイル: youtube.py プロジェクト: aljeshishe/youtube_joblib
    def _process(self, url):
        log.info(f'Processing {url}')
        episode = Episode.cached(url)
        if not episode:
            return []
        tmp_audio_path = utils.convert(file_path=episode.audio_path,
                                       extension='.flac')

        subtitles = Subtitles.from_srt(episode.captions_path)

        results = []
        start = None
        accum_text = []
        for line, next_line in pairwise(subtitles.lines):
            # log.info(f'Processing {line}')
            if start is None:
                start = line.start
            accum_text.append(line.text)
            if line.end - start > 10 * 1000:
                self._save_part(start, line.end, tmp_audio_path,
                                ' '.join(accum_text), results)
                accum_text = []
                start = next_line.start

        log.info(f'Processed {url}')
        return results
コード例 #10
0
ファイル: main.py プロジェクト: yunhengzi/videopose
def generate_kpts(video_name):
    kpt_results = []

    cap = cv2.VideoCapture(video_name)
    length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    opWrapper = load_model()
    for i in tqdm(range(length)):

        try:
            datum = op.Datum()
            _, imageToProcess = cap.read()
            datum.cvInputData = imageToProcess
            opWrapper.emplaceAndPop([datum])
            results = datum.poseKeypoints

            #25 to 17
            assert len(
                results
            ) == 1, 'videopose3D only support one pserson restruction'
            kpts = convert(results[0])
            kpt_results.append(kpts)
        except Exception as e:
            print(e)

    # pose processes
    result = np.array(kpt_results)

    # save
    name = '/home/xyliu/experiments/VideoPose3D/data/tmp.npz'
    kpts = result.astype(np.float32)
    print('kpts npz save in ', name)
    np.savez_compressed(name, kpts=kpts)
    return kpts

    return result
コード例 #11
0
def run_test(args):
    it_network = ImageTransformNet(
        input_shape=hparams['test_size'],
        residual_layers=hparams['residual_layers'],
        residual_filters=hparams['residual_filters'])
    ckpt_dir = os.path.join(args.name, 'pretrained')
    ckpt = tf.train.Checkpoint(network=it_network, step=tf.Variable(0))
    ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial()
    print('\n###################################################')
    print('Perceptual Losses for Real-Time Style Transfer Test')
    print('###################################################\n')
    print('Restored {} step: {}\n'.format(args.name, str(ckpt.step.numpy())))

    dir_size = 'step_{}_{}x{}'.format(str(ckpt.step.numpy()),
                                      str(hparams['test_size'][0]),
                                      str(hparams['test_size'][1]))
    dir_model = 'output_img_{}'.format(args.name)
    out_dir = os.path.join(args.output_path, dir_model, dir_size)

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    content_img_list = os.listdir(args.test_content_img)

    for c_file in content_img_list:
        content = convert(os.path.join(args.test_content_img, c_file),
                          hparams['test_size'][:2])[tf.newaxis, :]
        output = it_network(content, training=False)
        tensor = tensor_to_image(output)
        c_name = '{}_{}'.format(args.name, os.path.splitext(c_file)[0])
        save_path = os.path.join(out_dir, c_name)
        tensor.save(save_path + '.jpeg')
        print('Image: {}.jpeg saved'.format(save_path))
コード例 #12
0
def results():
    """
    This route is used to save the file, convert the audio to 16000hz monochannel,
    and predict the emotion using the saved binary model
    """
    if not os.path.isdir("./audio"):
        os.mkdir("audio")
    if request.method == 'POST':
        try:
          f = request.files['file']
          filename = secure_filename(f.filename)
          f.save(os.path.join(app.config["UPLOAD_FOLDER"], filename))
        except:
          return render_template('main.html', value="")

    wav_file_pre  = os.listdir("./audio")[0]
    wav_file_pre = f"{os.getcwd()}/audio/{wav_file_pre}"
    wav_file = convert(wav_file_pre)
    os.remove(wav_file_pre)
    model = pickle.load(open(f"{os.getcwd()}/model.model", "rb"))
    x_test =extract_feature(wav_file)
    y_pred=model.predict(np.array([x_test]))
    os.remove(wav_file)
    print(y_pred)
    return render_template('main.html', value=y_pred[0])
コード例 #13
0
ファイル: feedback.py プロジェクト: shmpwk/snippets
    def cb_fridge_pose(msg):
        tf_handle_to_map = convert_posemsg_to_transform(msg)
        tf_map_to_base = listener.lookupTransform("/base_link", "/map", rospy.Time(0))
        tf_handle_to_base = convert(tf_handle_to_map, tf_map_to_base)

        trans, quat = tf_handle_to_base
        rpy = tf.transformations.euler_from_quaternion(quat)
        co = Coordinate(trans, rpy)
        co.trans_local([-0.02, 0, 0])
        group = ('larm')
        av_pre = robot.get_angle_vector(group)
        av_post, success = robot.solve_ik(co.trans, rpy, group_names=group, overwrite=True)

        """
        av_diff = av_post - av_pre
        av_com = av_pre + av_diff * 2
        robot.set_angle_vector(av_com, group)
        av_com_full = robot.get_angle_vector()
        """

        #av_post = robot.get_angle_vector()

        print("ik success: {0}".format(success))
        #ri.angle_vector(av_post, time=2.0)
        ri.angle_vector(robot.get_angle_vector(), time=0.2)
コード例 #14
0
def generate_kpts(video_name):
    kpt_results = []

    cap = cv2.VideoCapture(video_name)
    length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    opWrapper = load_model()
    for i in tqdm(range(length)):

        try:
            datum = op.Datum()
            _, imageToProcess = cap.read()
            datum.cvInputData = imageToProcess
            opWrapper.emplaceAndPop([datum])
            results = datum.poseKeypoints

            #25 to 17
            assert len(
                results
            ) == 1, 'videopose3D only support one pserson restruction'
            kpts = convert(results[0])
            kpt_results.append(kpts)
        except Exception as e:
            print(e)

    # pose processes
    result = np.array(kpt_results)
    return result
コード例 #15
0
ファイル: main2.py プロジェクト: changwang/Union-Find
def test(alg_num, data_sets):
    if alg_num == 1:
        uf = unionfind1.UnionFind()
    elif alg_num == 2:
        uf = unionfind2.UnionFind()
    elif alg_num == 3:
        uf = unionfind3.UnionFind()
    elif alg_num == 4:
        uf = unionfind4.UnionFind()
    elif alg_num == 5:
        uf = unionfind5.UnionFind()
    elif alg_num == 6:
        uf = unionfind5.UnionFind()

    time_set = []
    data_sets = data_sets * 100
    for date_set in data_sets:
        starttime = time.time()
        for item in date_set:
            result = convert(item)
            if result[0] == "find":
                uf.find(result[1], True)
            elif result[0] == "union":
                uf.union(result[1][0], result[1][1])

        endtime = time.time()

        time_set.append(endtime - starttime)

    del uf

    return time_set
コード例 #16
0
def redirect_url(code):
    #  Takes variable from url and attempts to decode and check DB for a match.  If a match found or the original is a
    #  none coded url it will attempt redirect.  If not it will error.
    if utils.check_url(utils.convert(code)):
        return redirect(utils.convert(code), code=302)
    else:
        try:
            result = dal.lookup_url(code)
            if result:
                return redirect(utils.convert(result), code=302)
            else:
                return 'Malformed URL: {}{}'.format(
                    HOST, code), status.HTTP_400_BAD_REQUEST
        except:
            return 'Malformed URL: {}{}'.format(
                HOST, code), status.HTTP_400_BAD_REQUEST
コード例 #17
0
def main():
  dbFile = '/home/opit/Desktop/PhD/CALIFA/data/CALIFA.sqlite'
  iso25D = 40 / 0.396
  listFile = '../data/SDSS_photo_match.csv'
  fitsDir = '../data/SDSS/'
  dataDir = '../data'
  outputFile = '../data/growthCurvePhotometry.csv'
  imgDir = 'img/'
  simpleFile = '../data/CALIFA_mother_simple.csv'
  maskFile = '../data/maskFilenames.csv'
  noOfGalaxies = 939
  i = 0

  
  
  ra = convert(db.dbUtils.getFromDB('ra', dbFile, 'mothersample', ' where CALIFA_id = 1'))
  dec = convert(db.dbUtils.getFromDB('dec', dbFile, 'mothersample', ' where CALIFA_id = 1'))
  
  print ra, dec
  #exit()
  x0 = Astrometry.getPixelCoords(listFile, 0, dataDir)[1]
  y0 = Astrometry.getPixelCoords(listFile, 0, dataDir)[0]
  print 'pixel coords', y0, x0
  sdssPA = convert(db.dbUtils.getFromDB('isoPhi_r', dbFile, 'mothersample', ' where CALIFA_id = 1'))
  sdss_ba = convert(db.dbUtils.getFromDB('isoB_r', dbFile, 'mothersample', ' where CALIFA_id = 1'))/convert(db.dbUtils.getFromDB('isoA_r', dbFile, 'mothersample', ' where CALIFA_id = 1'))
  sdss_isoA = convert(db.dbUtils.getFromDB('isoA_r', dbFile, 'mothersample', ' where CALIFA_id = 1'))
  
  nadinePA = convert(db.dbUtils.getFromDB('PA', dbFile, 'nadine', ' where CALIFA_id = 1'))
  print nadinePA, 'pa'
  nadine_ba = convert(db.dbUtils.getFromDB('ba', dbFile, 'nadine', ' where CALIFA_id = 1'))
  nadineR_90 = convert(db.dbUtils.getFromDB('R90', dbFile, 'nadine', ' where CALIFA_id = 1'))/0.396
  print nadine_ba
  nPoints = 3000000
  
  #Photometry.createDistanceArray(listFile, i, dataDir, center)
  #Photometry.ellipseContours(x0, y0, angle, isoA, axisRatio, nPoints)
  inputFileName = '../data/filled/fpC-004152-r6-0064.fits'
  inputFile = pyfits.open(inputFileName)
  inputImage = inputFile[0].data
  inputImage[:] = 1000
  #inputImage = np.zeros((50, 50))

  print y0, x0
  ellipseCoords = np.round(Photometry.ellipseContours(y0, x0, nadinePA, nadineR_90, nadine_ba), 0).astype('int16')
  print ellipseCoords[0], ellipseCoords[1], 'ellipseCoords'
 
  inputImage[ellipseCoords[0], ellipseCoords[1]] = 100000

  #distances = Astrometry.makeDistanceArray(inputImage, (y0, x0))

  #inputImage[np.where(distances < iso25D)] = 10000000
  #inputImage[np.where(distances == iso25D+1)] = 10000000
  #inputImage[np.where(distances == iso25D-1)] = 10000000
  #inputImage[np.where(distances == iso25D-2)] = 10000000
  head = inputFile[0].header
  hdu = pyfits.PrimaryHDU(inputImage, header = head)
  hdu.writeto('ellipse.fits')
コード例 #18
0
ファイル: bb9_course.py プロジェクト: developerck/bb2moodle
def create_moodle_zip(blackboard_zip_fname, out_name):
    try:
        shutil.rmtree('elixer_tmp')
        shutil.rmtree('course_files')
    except OSError:
        pass

    course = Course(blackboard_zip_fname)

    moodle_zip = zipfile.ZipFile(out_name, 'w')

    moodle_xml_str = utils.convert(course).encode('utf-8')

    moodle_zip.writestr('moodle.xml', moodle_xml_str)

    err_fh = open(os.path.devnull, 'w')

    command = ('unzip %s -d elixer_tmp' % blackboard_zip_fname).split(' ')
    subprocess.Popen(command, stdout=err_fh, stderr=err_fh).communicate()

    skip_parent = False

    for root, dirs, files in os.walk('elixer_tmp'):
        if not skip_parent:
            skip_parent = True
            continue

        for bb_fname in files:
            moodle_fname = bb_fname
            if moodle_fname.endswith('.xml'):
                continue

            if bb_fname.startswith('!'):
                if '.' in bb_fname:
                    ext, fname = [
                        s[::-1] for s in bb_fname[1:][::-1].split('.', 1)
                    ]
                    moodle_fname = (base64.b16decode(fname.upper()) + '.' +
                                    ext)
                else:
                    ext, fname = '', bb_fname[1:]
                    moodle_fname = (base64.b16decode(fname.upper()))

                moodle_fname = urllib2.unquote(moodle_fname)

            res_num = root.split(os.sep,
                                 1)[1].split(os.sep)[0].replace('res', '')

            fixed_filename = utils.fix_filename(moodle_fname, res_num)

            bb_fname = os.path.join(root, bb_fname)

            moodle_fname = os.path.join('course_files', fixed_filename)

            moodle_zip.write(bb_fname, moodle_fname)

    shutil.rmtree('elixer_tmp')

    moodle_zip.close()
コード例 #19
0
ファイル: celery_task.py プロジェクト: rtx3/deyun.io
def db_salt_nodes_sync():
    result = []
    count = 0
    data = redisapi.hgetall(name='status')
    if not data:
        return {'failed': 'no status data in redis cache '}
    try:
        for (k, v) in convert(data).items():
            if v == 'down':
                salt_mark_status(k, v)
                continue
            target_node = db.session.query(
                Nodedb).filter_by(node_name=k).first()
            node_data = salt_minion(k)
            db_data = node_data['return'][0][k]
            master = ret_master()
            # TODO
            try:
                if target_node:
                    target_node.minion_data = db_data
                    target_node.node_ip = db_data.get('ipv4', '1.1.1.1'),
                    target_node.os = db_data.get('lsb_distrib_description') or (
                        db_data.get('lsb_distrib_id') + db_data.get('lsb_distrib_release')) or (db_data.get('osfullname') + db_data('osrelease'))
                    target_node.cpu = str(db_data[
                        'num_cpus']) + ' * ' + str(db_data['cpu_model'])
                    target_node.kenel = db_data['kernelrelease']
                    target_node.core = int(db_data['num_cpus']),
                    target_node.mem = db_data['mem_total']
                    target_node.host = db_data['host']
                    target_node.status = v
                    target_node.master = master
                else:
                    target_node = Nodedb(
                        id=uuid.uuid4(),
                        node_name=db_data['id'],
                        node_ip=db_data.get('ipv4', '1.1.1.1'),
                        minion_data=db_data,
                        os=db_data.get('lsb_distrib_description') or (
                            db_data.get('lsb_distrib_id') + db_data.get('lsb_distrib_release')) or (db_data.get('osfullname') + db_data('osrelease')),
                        cpu=str(db_data['num_cpus']) + ' * ' +
                        str(db_data['cpu_model']),
                        kenel=db_data['kernelrelease'],
                        core=int(db_data['num_cpus']),
                        mem=db_data['mem_total'],
                        host=db_data['host'],
                        master=master,
                        status=v
                    )
            except KeyError as e:
                logger.warning('updating ' + k + ' with error:' + str(e.args))
                continue
            result.append(target_node)
            db.session.add(target_node)
    except Exception as e:
        logger.warning('Error while updaing ' + str(((k, v))) + str(e.args))
        logger.exception(e)
    db.session.commit()

    return {'ok': 'db_salt_nodes_sync' + ' updated with redis return: ' + str(count)}
コード例 #20
0
    def foo():
        x1, y1, x2, y2 = utils.convert(portableLoc.getLoc())
        mouse.move(x1 // 4,
                   y1 // 4,
                   min((x2 - x1) // 4, (y2 - y1) // 4),
                   teleportRandom=True)
        pyautogui.click(button="right")
        sleep(1)

        loc = pyautogui.locateOnScreen(optionPath)
        x1, y1, x2, y1 = utils.convert(loc)
        mouse.move(x1 // 4,
                   y1 // 4,
                   min((x2 - x1) // 4, (y2 - y1) // 4),
                   teleportRandom=False)
        sleep(0.2)
        pyautogui.click()
コード例 #21
0
ファイル: celery_task_socket.py プロジェクト: rtx3/deyun.io
def emit_salt_task_list(room=None):
    try:
        data = {}
        data['el'] = convert(redisapi.hgetall('salt_exec_list'))
        data['tl'] = convert(redisapi.hgetall('salt_task_list'))
    except Exception as e:
        logger.warning('error in loading salt_task_list ' + str(data), e)
        logger.exception(e)
        return {'failed': e}
    meta = json.dumps(data)
    if room:
        socket_emit(meta=meta, event='salt_task_list', room=room)
        logger.info({'ok': 'emit_salt_task_list ' + str(room)})
    else:
        socket_emit(meta=meta, event='salt_task_list')
        logger.info({'ok': 'emit_salt_task_list to all'})
    return {'ok': 'emit_salt_task_list'}
コード例 #22
0
def main():
    result = scrape(
        'https://scholar.google.com/scholar?hl=en&as_sdt=0,6&q=%22responsible+ai%22&scisbd=1')
    sleep(randint(2,10))
    result += scrape(
        'https://scholar.google.com/scholar?hl=en&as_sdt=0,6&q=harmful+ai&scisbd=1'
    )
    sleep(randint(2, 10))
    result += scrape(
        'https://scholar.google.com/scholar?hl=en&as_sdt=0,6&q=ethics+ai&scisbd=1'
    )
    sleep(randint(2, 10))
    result += scrape(
        'https://scholar.google.com/scholar?hl=en&as_sdt=0,6&q=%22ai+ethics%22&scisbd=1'
    )
    convert(result)
    upload()
コード例 #23
0
ファイル: celery_task_socket.py プロジェクト: rtx3/Salt-MWDS
def salt_nodes_sync():
    result = []
    count = 0
    data = redisapi.hgetall(name='status')
    if not data:
        return {'failed': 'no status data in redis cache '}
    try:
        for (k, v) in convert(data).items():
            if v == 'down':
                salt_mark_status(k, v)
                continue
            target_node = db.session.query(
                Nodedb).filter_by(node_name=k).first()
            node_data = salt_minion(k)
            db_data = node_data['return'][0][k]
            master = ret_master()
            #TODO
            try:
                if target_node:
                    target_node.minion_data = db_data
                    target_node.node_ip=db_data.get('ipv4','1.1.1.1'),
                    target_node.os = db_data.get('lsb_distrib_description') or (
                        db_data.get('lsb_distrib_id') + db_data.get('lsb_distrib_release')) or (db_data.get('osfullname') + db_data('osrelease'))
                    target_node.cpu = str(db_data[
                        'num_cpus']) + ' * ' + str(db_data['cpu_model'])
                    target_node.kenel = db_data['kernelrelease']
                    target_node.core = int(db_data['num_cpus']),
                    target_node.mem = db_data['mem_total']
                    target_node.host = db_data['host']
                    target_node.status = v
                    target_node.master = master
                else:
                    target_node = Nodedb(
                        id=uuid.uuid4(),
                        node_name=db_data['id'],
                        node_ip=db_data.get('ipv4','1.1.1.1'),
                        minion_data=db_data,
                        os=db_data.get('lsb_distrib_description') or (
                        db_data.get('lsb_distrib_id') + db_data.get('lsb_distrib_release')) or (db_data.get('osfullname') + db_data('osrelease')),
                        cpu=str(db_data['num_cpus']) + ' * ' +
                        str(db_data['cpu_model']),
                        kenel=db_data['kernelrelease'],
                        core=int(db_data['num_cpus']),
                        mem=db_data['mem_total'],
                        host=db_data['host'],
                        master=master,
                        status=v
                    )
            except KeyError as e:
                logger.warning('updating ' + k + ' with error:' + str(e.args))
                continue
            result.append(target_node)
            db.session.add(target_node)
    except Exception as e:
        logger.warning('Error while updaing ' + str(((k, v))) + str(e.args))
    db.session.commit()

    return {'ok': str(result) + ' updated with redis return: ' + str(count)}
コード例 #24
0
ファイル: views.py プロジェクト: makeartweb/makeartweb2
def draw_text(context, request):
    text = request.GET['text']
    size = int(request.GET['size'] or 12)
    color = convert('#' + request.GET.get('color', '000000'))
    out = cStringIO.StringIO()
    #file_font = context.file.retrieve()
    context.create_image(text, out, size, color, 'png')
    out.seek(0)
    return Response(out.read(), content_type="image/png")
コード例 #25
0
def create_test_batch(args):
    # Tensorboard defalut test images
    test_content_img = ['chameleon.jpg', 'islas.jpeg', 'face.jpg']
    test_content_batch = tf.concat([
        convert(os.path.join(args.test_img, img))[tf.newaxis, :]
        for img in test_content_img
    ],
                                   axis=0)
    return test_content_batch
コード例 #26
0
def main():
  iso25D = 40 / 0.396
  #dataDir = '/media/46F4A27FF4A2713B_/work2/data/'
  dataDir = '../data'
  band = setBand()
  #outputFile = dataDir+'/growthCurvePhotometry.csv'
  listFile = dataDir+'/SDSS_photo_match.csv'
  fitsDir = dataDir+'/SDSS/'
  #filledDir = 'filled_'+band+'/'
  filledDir = 'filled2/'
  imgDir = 'img/'+band
  simpleFile = dataDir+'/CALIFA_mother_simple.csv'
  maskFile = dataDir+'/maskFilenames.csv'  
  dataFile = 'list.txt'
  
  for ID in range(1, 940):
  
	  	    print 'id,', int(ID)
		    ra = GalaxyParameters.SDSS(listFile, int(ID)).ra
		    dec = GalaxyParameters.SDSS(listFile, int(ID)).dec
		    run = GalaxyParameters.SDSS(listFile, int(ID)).run
		    rerun = GalaxyParameters.SDSS(listFile, int(ID)).rerun
		    camcol = GalaxyParameters.SDSS(listFile, int(ID)).camcol		    
		    field = GalaxyParameters.SDSS(listFile, int(ID)).field
		    runstr = GalaxyParameters.SDSS(listFile, int(ID)).runstr
		    field_str = sdss.field2string(field)

		    print 'getting header info...'
		    rFile = fitsDir+'r/fpC-'+runstr+'-r'+camcol+'-'+field_str+'.fit.gz'

		    WCS=astWCS.WCS(rFile)	
		    pa = utils.convert(db.dbUtils.getFromDB('pa', dbDir+'CALIFA.sqlite', 'nadine', ' where califa_id = '+str(ID)))[0][0]
		    pa_align = utils.convert(db.dbUtils.getFromDB('pa_align', dbDir+'CALIFA.sqlite', 'nadine', ' where califa_id = '+str(ID)))[0][0]
		    ang = WCS.getRotationDeg() 
		    print ang
		    
		    pa_align2 = (360 - ang) - 90 + pa
		    #if pa_align2 >= 180:
		#	    pa_align2 = pa_align2 - 180
		 #   if pa_align2 <= 0:
		#	    pa_align2 = pa_align2 + 180
		    out = int(ID), pa, pa_align2
		    print out
		    utils.writeOut(out, "align_pa.csv")
コード例 #27
0
def ocr_text(text, fontname, angle=0, output_filename=None, peek=False):
    """
        Wrapper over ocr which takes plain text.
    """

    img = utils.convert(text, fontname, output_filename, angle)
    matched_text, taken = ocr(img.copy(), fontname, peek=peek)
    accuracy = utils.measure_correctness_lcs(matched_text, text)

    return img, taken, matched_text, accuracy
コード例 #28
0
ファイル: master.py プロジェクト: VictorD/websync
def update_nodes():
   global NODE_LIST
   if is_online():
      try:
         r = requests.get(URL, timeout=30)
         r_json = convert(r.json())
         NODE_LIST = r_json['Nodes']
         logging.info("Updated Node list. Found " + str(len(NODE_LIST)) + " nodes")
      except (requests.Timeout, requests.ConnectionError):
         logging.error("Failed to retrieve Node list from Master Node")
コード例 #29
0
ファイル: dream.py プロジェクト: bennyqp/Pytorch-HeadTrip
    def dream_seq(self):
        """Dreams a sequence with optical flow"""

        for i, path in enumerate(self.img_list[self.config["start_position"] :]):

            if i == 0:
                img1 = Image.open(path)
                d_img = self.deep_dream(
                    self.transform(img1), self.model, i, seq="first"
                )

                self.save_img(d_img, "", i)
                d_img = convert(d_img)
                flow_iter = 0

                # the iterations needs to be reduced
                self.config["num_iterations"] -= 5

            if i > 0:
                img2 = Image.open(path)
                feature_img, background_masked = self.get_opflow_image(
                    img1, d_img, img2
                )

                feature_img = np.clip(feature_img, 0, 255)

                background_masked[background_masked > 0] = 1 - (flow_iter * 0.1)  # 0.5
                background_masked[background_masked == 0] = flow_iter * 0.1

                d_img = self.deep_dream(
                    self.transform(np.uint8(feature_img)),
                    self.model,
                    i,
                    seq="first",
                    mask=background_masked,
                )

                # change position
                img1 = img2
                self.save_img(d_img, "", i)
                d_img = convert(d_img)
                flow_iter += 1
                flow_iter = 0 if flow_iter > 5 else flow_iter
コード例 #30
0
def shorten_url():
    #  Takes url encoded in json from the body and returns a shorted url if submitted is valid returns error otherwise
    if request.is_json:
        url = request.get_json()["url"]
        if utils.check_url(utils.convert(url)):
            return jsonify({
                "shortened_url":
                '{}{}'.format(HOST, dal.add_url(url))
            }), status.HTTP_201_CREATED
    return 'Malformed URL: {}'.format(url), status.HTTP_400_BAD_REQUEST
コード例 #31
0
ファイル: __main__.py プロジェクト: michaelglass/proquint-1
def main():
    desc = 'Convert between [integer, hexadecimal, IPv4 address] <-> proquint representations. '\
        'See https://arxiv.org/html/0901.4016'

    parser = argparse.ArgumentParser(description=desc)
    parser.add_argument('-n',
                        '--uint',
                        action='store_true',
                        help='convert from proquint to 32-bit integer',
                        required=False)
    parser.add_argument('-x',
                        '--hex',
                        action='store_true',
                        help='convert from proquint to hexadecimal',
                        required=False)
    parser.add_argument('-i',
                        '--ip',
                        action='store_true',
                        help='convert from proquint to IPv4',
                        required=False)
    parser.add_argument('val', nargs='?', type=str, default=None,
                        help='value to convert (if not specified, ' \
                        'IP address of the current host is printed)')

    args = parser.parse_args()

    target = None
    if args.uint:
        target = 'uint'
    elif args.hex:
        target = 'hex'
    elif args.ip:
        target = 'ip'

    try:
        if args.val is None:
            res = convert(get_my_ip())
        else:
            res = convert(args.val, target)
    except ValueError as e:
        print('{}'.format(str(e)))
    else:
        print('{}'.format(res))
コード例 #32
0
ファイル: celery_task_socket.py プロジェクト: rtx3/deyun.io
def ret_socket_sitestatus():
    d = convert(redisapi.hgetall('sitestatus'))
    d['service_level'] = str(100.0 - float(mean_status(d['service_level'])))
    d['system_utilization'] = str(mean_status(d['system_utilization']))
    a = db.session.query(Statistics.managed_nodes).order_by(
        desc(Statistics.update_at)).limit(8).all()
    d['n'] = [r for r, in a]
    b = db.session.query(Statistics.registered_master).order_by(
        desc(Statistics.update_at)).limit(8).all()
    d['m'] = [r for r, in b]
    return d
コード例 #33
0
ファイル: cook.py プロジェクト: Alexithym/Bot
def extendPortable(portableKeyCoordinates, extendInterfaceLocation, tries = 0):
	(x, y, xT, yT) = portableKeyCoordinates
	(x, y, xT, yT) = (5 + x//2, 5 + y//2, -5 + xT//2, -5 + yT//2)

	if tries == 10:
		print("Failed to extend portable")
		return False
	
	def moveToPortableKey():
		mouse.moveRect(x, y, xT, yT)

		sleepDuration = -1
		while sleepDuration < 0:
			sleepDuration = np.random.normal(0.2, 0.01)
		sleep(sleepDuration)

		pyautogui.click(button='right')

	portableClickLocation = utils.Location()

	succ = utils.performAction(moveToPortableKey, 
		utils.makeSuccess(portableClickLocation, portableClickInterfacePath, False))
	if not succ:
		print("Failed to right click portable key, retrying")
		extendPortable(portableKeyCoordinates, extendInterfaceLocation, tries = tries + 1)
		

	sleep(3)

	(x, y, xT, yT) = utils.convert(portableClickLocation.getLoc())
	(x, y, xT, yT) = (x//2, y//2, xT//2, yT//2)
	mouse.moveRect(x, y, xT, yT, teleportRandom=False)
	pyautogui.click()
	
	sleep(3)

	if not utils.checkLocation(extendInterfaceLocation, extendInterfacePath):
		print("Failed to open extend interface, retrying")
		extendPortable(portableKeyCoordinates, extendInterfaceLocation, tries = tries + 1)

	sleepDuration = -1
	while sleepDuration < 0:
		sleepDuration = np.random.normal(0.2, 0.2)
	utils.spamPress('1')
	sleep(sleepDuration)
	utils.spamPress('enter')

	sleep(3)

	if utils.checkLocation(extendInterfaceLocation, extendInterfacePath):
		print("Failed to extend portable, retrying")
		extendPortable(portableKeyCoordinates, extendInterfaceLocation, tries = tries + 1)

	return True
コード例 #34
0
ファイル: views.py プロジェクト: VictorD/websync
def get_next_global_id():
   gid = None
   if master.is_online():   
      try:
         r = requests.get(master.URL + "next/", timeout=30)
         r_json = convert(r.json())
         gid = int(r_json['nextID'])
         logging.info('Global id received: ' + str(gid))
      except (requests.Timeout, requests.ConnectionError):
         logging.error('Global ID request timeout. Using default value')
   return gid
コード例 #35
0
    def getShadow(self, frame, width=256, height=256):
        outim = self.model.test(convert(frame))
        im_out = outim[0].cpu().float().numpy()
        im_out = np.transpose(im_out, (1, 2, 0))
        im_out = (im_out + 1) / 2 * 255
        im_out = im_out.astype('uint8')

        gray = Image.fromarray(np.squeeze(im_out, axis=2)).resize(
            (int(width), int(height)))
        shadowFrame = np.array(gray)
        return shadowFrame
コード例 #36
0
def read_contract(filename, stem=True):
    file = open(filename, "rb")
    read_file = PyPDF2.PdfFileReader(file)
    num_pages = read_file.getNumPages()
    final_string = ""
    for i in range(num_pages):
        text = convert(filename, pages=[i])
        cur_string = preprocess_text(text, stem=stem)
        final_string += cur_string + " "
    # final = " ".join(final_string)
    return final_string
コード例 #37
0
ファイル: views.py プロジェクト: makeartweb/makeartweb2
def raster(context, request):
    text = request.GET['text']
    filename = uuid.uuid4()
    color = convert('#' + request.GET.get('color', '000000'))
    pathname_bmp = './makeart/tmp/%s.bmp' % filename
    pathname_svg = './makeart/tmp/%s.svg' % filename
    out = cStringIO.StringIO()
    context.create_image(text, out, 100, color, 'png')
    #context.create_image(text, pathname_bmp, 100, (0, 0, 0), 'bmp')
    out.seek(0)
    return Response(out.read(), content_type="image/png")
コード例 #38
0
def generate_frame_kpt(frame, opWrapper):
    '''
    提供frame and model
    '''
    datum = op.Datum()
    datum.cvInputData = frame
    opWrapper.emplaceAndPop([datum])
    re = datum.poseKeypoints
    assert len(re) == 1, 'videopose3D only support one pserson restruction'
    kpt = convert(re[0])

    return kpt
コード例 #39
0
ファイル: celery_task.py プロジェクト: rtx3/Salt-MWDS
def salttoken():
    try:
        if redisapi.hexists(name='salt', key='token'):
            if (time.time() - float(bytes.decode(redisapi.hget(name='salt', key='expire')))) < 0.0:
                ret = redisapi.hget(name='salt', key='token')
                return convert(ret)
            else:
                return saltlogin(saltapi.login(user, pawd, 'pam'))
        else:
            return saltlogin(saltapi.login(user, pawd, 'pam'))
    except Exception as e:
        return {'falid': e}
コード例 #40
0
ファイル: train.py プロジェクト: EnyanDai/Master-thesis
def test(model):
    model = model.to(device)
    number = 0
    running_loss = 0.0
    acc = 0.0
    H = 0
    S = 0
    common = 0
    
    containLink = 0
    linkNumber = 0
    model.eval()
    for i in range(len(test_list)):
        pred_span=[]
        for j in range(len(test_list[i])):
            model.hidden = model.init_hidden()
            sentence_in = v.prepare_sequence(test_list[i][j]).to(device)
            labels = tag.prepare_sequence(test_labels[i][j],tag_to_indx).to(device)
            n = len(test_list[i][j])
            number += n
            output = model(sentence_in)
            loss = nn.functional.nll_loss(output,labels)
            _,pred = torch.max(output,dim=1)
#                print(pred.data)
            for indexs in convert(pred.data):
                pred_span.append([test_span_list[i][j][indexs[0]][0],test_span_list[i][j][indexs[1]][1]])
            acc += torch.sum(torch.eq(pred,labels).float()).data
            running_loss += n*loss.data
            
        S += len(pred_span)
        H += len(test_labels_span[i])
        common += metrics(pred_span,test_labels_span[i])
        
        tmpContainLink,tmpLinkNumber = linkMetrics(pred_span,test_linkSpans[i])
        containLink += tmpContainLink
        linkNumber +=tmpLinkNumber
    print(S,H,common)
    if(S != 0):
        precision = common/S
    else:
        precision = 0.0
    recall = common/H
    if(common==0):
        F1 = 0.0
    else:
        F1 = 2*recall*precision/float(recall+precision)
        
    print(containLink,linkNumber)
    
    print('loss: %.4f , acc: %.4f , precision: %.4f, recall: %.4f,F1: %.4f,LinkRecall: %.4f Testing'
              %(running_loss/number,acc/number,precision,recall,F1,containLink/linkNumber))
    return running_loss/number,acc/number,precision,recall,F1,containLink/linkNumber
コード例 #41
0
def main():
    # obtain first two results
    result = scrape(
        'https://search.techcrunch.com/search;_ylt=Awr9ImItSrtfWq0AA7ynBWVH;_ylc=X1MDMTE5NzgwMjkxOQRfcgMyBGZyA3RlY2hjcnVuY2gEZ3ByaWQDV1JYVG5TV3JRWHVWXy5tSkNvNzNVQQRuX3JzbHQDMARuX3N1Z2cDMQRvcmlnaW4Dc2VhcmNoLnRlY2hjcnVuY2guY29tBHBvcwMwBHBxc3RyAwRwcXN0cmwDMARxc3RybAM5BHF1ZXJ5A2V0aGljcyUyMGFpBHRfc3RtcAMxNjA2MTA5NzU5?p=ethics+ai&fr2=sb-top&fr=techcrunch'
    )
    sleep(randint(2, 10))
    result += scrape(
        'https://search.techcrunch.com/search;_ylt=Awr9JnE_SrtfNYYAUfynBWVH;_ylu=Y29sbwNncTEEcG9zAzEEdnRpZAMEc2VjA3BhZ2luYXRpb24-?p=ethics+ai&fr=techcrunch&fr2=sb-top&b=11&pz=10&bct=0&xargs=0'
    )

    # scrape others
    current = 21
    base = 'https://search.techcrunch.com/search;_ylt=Awr9CKpUTrtfRGMAlx2nBWVH;_ylu=Y29sbwNncTEEcG9zAzEEdnRpZAMEc2VjA3BhZ2luYXRpb24-?p=ethics+ai&pz=10&fr=techcrunch&fr2=sb-top&bct=0&b='
    end = '&pz=10&bct=0&xargs=0'

    while current <= 121:
        sleep(randint(2, 10))
        result += scrape(base + str(current) + end)
        current += 10

    convert(result)
    upload()
コード例 #42
0
ファイル: celery_task_socket.py プロジェクト: rtx3/deyun.io
def emit_salt_ping(room, tgt, func,info):
    try:
        if convert(redisapi.hget('salt_task_lock', room + tgt)) == func:
            meta = json.dumps({'msg': 'Task execulting.Waitting for result.',
                               'type': 'warning', 'tgt': tgt, 'func': func, 'jid':'Job Waiting','info':info})
            socket_emit(meta=meta, event='salt_task_warn', room=room)
            return 1
        else:
            redisapi.hset('salt_task_lock', room + tgt, func)
            logger.info({'task': 'emit_salt_ping'.'room': room,'tgt': tgt})
            salt_exec_func(tgt=tgt, func='test.ping', room=room,info=info)
            redisapi.hdel('salt_task_lock',room+tgt)
            return 0
コード例 #43
0
ファイル: celery_task.py プロジェクト: rtx3/Salt-MWDS
def statistics_api_visit():
    try:
        data = convert(redisapi.hgetall(name='sitestatus'))
        if not data:
            logger.warning('no site status data in redis cache')
            return {'failed': 'no site status data in redis cache'} 
        if data.get('api_visit_count' , None):
            page_visit_count = int(data['api_visit_count'])
        else:
            page_visit_count = 0
        redisapi.hset('sitestatus', 'api_visit_count', page_visit_count + 1)
    except Exception as e:
        return {'failed': e} 
    return {'successed': 'page visit updated'}
コード例 #44
0
ファイル: main.py プロジェクト: changwang/Union-Find
def test(alg_num, data_set):
    if alg_num == 1:
        uf = unionfind1.UnionFind()
    elif alg_num == 2:
        uf = unionfind2.UnionFind()
    elif alg_num == 3:
        uf = unionfind3.UnionFind()

    starttime = time.time()
    for item in data_set:
        result = convert(item)
        if result[0] == 'find':
            uf.find(result[1], True)
        elif result[0] == 'union':
            uf.union(result[1][0], result[1][1])
        
    endtime = time.time()
    
    uf.internalNameOfSet(1)
    
    del uf
    
    return (endtime - starttime)
コード例 #45
0
ファイル: main.py プロジェクト: Zerowxm/kdd-cup2009
 num_des=numerical.describe()
 
 
 # comment start
 # f1!!!!!!
 #comment end
 acc_f1=False
 if(acc_f1):
     f1=[]
     shape=[]
     positive=[]
     for col in category:
         col_label=np.append(col,labels)
         df_col=df_all[col_label]
         df_col=df_col.dropna(axis='rows') 
         df_col[col]=u.convert(df_col[col])
         shape.append( df_col.shape[0])
         f1.append(u.selectF(df_col,'churn'))
         positive.append(df_col[df_col[label]==1].shape[0])
     np.savetxt('f1_obj.txt',f1)
     np.savetxt('shape_obj.txt',shape)
     np.savetxt('positive_obj.txt',positive)
     
 acc_f1=False
 if(acc_f1):
     f1=[]
     shape=[]
     positive=[]
     for col in numerical_category:
         col_label=np.append(col,labels)
         df_col=df_all[col_label]
コード例 #46
0
ファイル: celery_task_socket.py プロジェクト: rtx3/Salt-MWDS
def ret_socket_sitestatus():
    d = convert(redisapi.hgetall('sitestatus'))
    d['service_level'] = str(100.0 - float(mean_status(d['service_level'])))
    d['system_utilization'] = str(mean_status(d['system_utilization']))
    return d
コード例 #47
0
ファイル: master_run.py プロジェクト: zbarni/triplexator
                      metavar="2:00")
    parser.add_option("--cluster-mem", dest="clusterMemory", help="memory for submitted job (cluster only)",
                      metavar="16000")
    parser.add_option("--valgrind", dest="valgrind", default=False, action="store_true", help="profile with valgrind",
                      metavar="TTS")
    return parser


if __name__ == "__main__":
    # import required stuff which wasn't possible at the beginning
    utils.lazy_imports()
    # some initial checking
    check_setup()

    m_parser = create_parser()
    (m_options, args) = m_parser.parse_args()

    if m_options.unitTests is not None:
        unit.run_bit_parallel_unit_test(m_options.unitTests)
    elif m_options.convert == "tpx-to-bed":
        utils.convert(m_options)
    elif m_options.dataAnalysis is not None:
        # unit.unit_data_analysis()
        da.main(m_options)
    else:
        if m_options.type == "local":
            m_options = check_input_local(m_options, m_parser)
            run_locally(m_options)
        elif m_options.type == "cluster":
            run_cluster(m_options)
コード例 #48
0
ファイル: dianping.py プロジェクト: kailu/GrouponX
def getAllDealCities():
    params = []
    url = "http://api.dianping.com/v1/metadata/get_cities_with_deals"
    
    return _api(url,params)


if __name__ == '__main__':
#    print getCategories()
#    gpons = getGrouponByCity('桂林')
#    print repr(gpons).decode('utf-8')

    # all_cities =  getAllDealCities()
    # for city in all_cities['cities']:
    #     print city.encode('utf8')

    gpons = getGrouponByCity('北京')
    gpons = utils.convert(gpons)
    for deal in gpons['deals']:
        print 'title:',deal['title']
        print 'description:',deal['description']
        print 'image_url:',deal['image_url']
        print 'list_price:',deal['list_price']
        print 'current_price:',deal['current_price']
        print 'purchase_count:',deal['purchase_count']
        print 'deal_url:',deal['deal_url']
        print 'city:',deal['city']
        print 'categories:',','.join(deal['categories'])
        print 
コード例 #49
0
ファイル: run_kcorrect.py プロジェクト: astrolitterbox/TBSP
def main():
  filenames = ["data/j8cw52080_drz.fits", "data/j8cw52041_drz.fits", "data/j8cw51011_drz.fits"]
  inputRedshift = 0.022
  for filename in filenames:
  	frameData = FrameData()
	data = read_frame(filename)
  	frame_params = read_header(filename)
  	print frame_params
  	print data.shape
  	#HST filters and their UBVRI (Cousin) counterparts:
  	#U: F336W
  	#B: F439W
  	#V: F555W
  	#R: F675W
  	#I: F814W
  	frame_filter = getFrameFilter(frame_params['photmode'])
  	#due to sky subtraction, I pad non-positive data points w/ 0.001
  	data[np.where(data <= 0)] = 0.001
  	data = data[2000:2010, 2000:2010]
        frameData.data = data
  	magnitudes_array = countsToMag(data, frame_params)
  magnitudes_array = magnitudes_array.flatten()
  print magnitudes_array.shape
  magnitudes = np.hstack(magnitudes_array) 
  print magnitudes.shape
  uncertainties = np.ones((magnitudes.shape))*0.02
  redshift = np.ones((magnitudes.shape))*inputRedshift
  kc = ACSKCorrect(redshift, magnitudes, uncertainties, 0.02, cosmo=(Wm, Wl, H0/100))
  # redshift, stmag, stmag_sigma, extinction,
  lamb, templates = load_vmatrix()
  lamb = lambda_to_centers(lamb)
  print lamb.shape, templates.shape
  useful_wavel = np.where((np.round(lamb, 0) > 3000) & (np.round(lamb,0) < 9000))[0]
  print lamb.shape, 'lambda'
  lamb = lamb[useful_wavel]
  print lamb.shape, 'lambda'
  kcorr = kc.kcorrect()
  coeffs = kc.coeffs#[:, 1:4]
  print coeffs.shape
  coeffs = coeffs
  spec = np.dot(coeffs, templates)
  spec = spec[:, useful_wavel]
  #spec: y pixels, x (10 k) lambda datapoints
  #lamb = lamb[useful_wavel]
  lamb = lamb/10 #convert to Angstroms
  spec = spec*10**17
  print spec.shape, lamb.shape
  #xyz = np.empty((spec.shape[0], 1)) #test
  r = np.empty((spec.shape[0], 1))
  g = r.copy()
  b = r.copy()
  for i in range(0, spec.shape[0]):
  #rgb_vals = ciexyz.xyz_from_spectrum(np.transpose(np.array((lamb, spec[0, :]))))
      print np.array((lamb, spec[i, :])).shape, 'hstack'
      rgb_vals = ciexyz.xyz_from_spectrum(np.transpose(np.array((lamb, spec[i, :]))))

      print rgb_vals, rgb_vals.shape
      r[i], g[i], b[i] = colormodels.irgb_from_xyz(rgb_vals)
    #g = colormodels.irgb_from_xyz(rgb_vals)[1]
    #b = colormodels.irgb_from_xyz(rgb_vals)[2]
    
  
  rgb = np.dstack([r, g, b])
  
  print rgb.shape, 'after dstack'
  rgb = np.reshape(rgb, (10, 10, 3))
  fig = plt.figure()
  plt.imshow(rgb)
  plt.savefig('rgb')
  exit()
  fig = plt.figure()
  plt.plot(lamb[2000:4000], (10**17) * spec[2000:4000])
  plt.savefig('spec.png')
   
  exit()

  exit()

  data = np.empty((939, 16))

  califa_id = db.dbUtils.getFromDB('califa_id', dbDir+'CALIFA.sqlite', 'gc')
  
  u = db.dbUtils.getFromDB('petroMag_u', dbDir+'CALIFA.sqlite', 'mothersample')
  g = db.dbUtils.getFromDB('petroMag_g', dbDir+'CALIFA.sqlite', 'mothersample')
  r = db.dbUtils.getFromDB('petroMag_r', dbDir+'CALIFA.sqlite', 'mothersample')
  i = db.dbUtils.getFromDB('petroMag_i', dbDir+'CALIFA.sqlite', 'mothersample')
  z = db.dbUtils.getFromDB('petroMag_z', dbDir+'CALIFA.sqlite', 'mothersample')


  ext_u = db.dbUtils.getFromDB('extinction_u', dbDir+'CALIFA.sqlite', 'extinction')
  ext_g = db.dbUtils.getFromDB('extinction_g', dbDir+'CALIFA.sqlite', 'extinction')
  ext_r = db.dbUtils.getFromDB('extinction_r', dbDir+'CALIFA.sqlite', 'extinction')
  ext_i = db.dbUtils.getFromDB('extinction_i', dbDir+'CALIFA.sqlite', 'extinction')
  ext_z = db.dbUtils.getFromDB('extinction_z', dbDir+'CALIFA.sqlite', 'extinction')
  
  err_u = db.dbUtils.getFromDB('petroMagErr_u', dbDir+'CALIFA.sqlite', 'extinction')
  err_g = db.dbUtils.getFromDB('petroMagErr_g', dbDir+'CALIFA.sqlite', 'extinction')
  err_r = db.dbUtils.getFromDB('petroMagErr_r', dbDir+'CALIFA.sqlite', 'extinction')
  err_i = db.dbUtils.getFromDB('petroMagErr_i', dbDir+'CALIFA.sqlite', 'extinction')
  err_z = db.dbUtils.getFromDB('petroMagErr_z', dbDir+'CALIFA.sqlite', 'extinction')  
  
  redshift = db.dbUtils.getFromDB('z', dbDir+'CALIFA.sqlite', 'ned_z')  
  data[:, 0] = u[:]
  data[:, 1] = g[:]
  data[:, 2] = r[:]
  data[:, 3] = i[:]
  data[:, 4] = z[:]
  
  data[:, 5] = ext_u[:]
  data[:, 6] = ext_g[:]
  data[:, 7] = ext_r[:]
  data[:, 8] = ext_i[:]
  data[:, 9] = ext_z[:]
  
  data[:, 10] = err_u[:]
  data[:, 11] = err_g[:]
  data[:, 12] = err_r[:]
  data[:, 13] = err_i[:]
  data[:, 14] = err_z[:]
  
  data[:, 15] = redshift[:]
  
  maggies = data[:, 0:5]

  extinction = data[:, 5:10]

  maggies_err = data[:, 10:15] 
  print maggies.shape, extinction.shape, maggies_err.shape
  lamb, templates = load_vmatrix()
  lamb = lambda_to_centers(lamb)
  print lamb.shape, templates.shape

  #outputArray = np.empty((939, 9))
  kc =  SDSSKCorrect(redshift, maggies, maggies_err, extinction, cosmo=(Wm, Wl, H0/100))
  kcorr = kc.kcorrect()

  #absmag = getAbsMag(redshift, maggies[:, 2], extinction[:, 2])#kc.absmag() 
  #outputArray[:,0] = califa_id[:]
  #print kcorr[:, 2][:].shape
  
  #outputArray[:, 1:6] = kc.absmag()  
  coeffs = kc.coeffs#[:, 1:4]
  #print coeffs.shape
  #tmremain = np.array([[0.601525, 0.941511, 0.607033, 0.523732, 0.763937]])
  #ones = np.ones((1, len(redshift)))
  #prod = np.dot(tmremain.T, ones).T 
  print coeffs.shape
  coeffs = coeffs[-1]
  print coeffs
  spec = np.dot(coeffs, templates)
  np.savetxt('spec.txt', spec)
  np.savetxt('lambda.txt', lamb)
  print spec.shape, lamb.shape
  fig = plt.figure()

  plt.plot(lamb[2000:4000], (10**17) * spec[2000:4000])
  plt.savefig('spec.png')
  exit()

  modelMasses = coeffs*prod
  #print modelMasses.shape
  mass = np.sum(modelMasses, axis=1)
  for i in range (0, (len(data))):
    distmod = KC.utils.cosmology.ztodm(redshift[i])
    exp = 10 ** (0.4 * distmod)
    outputArray[i, 6] = mass[i] * exp
    #outputArray[i, 7] = getAbsMag(redshift[i], maggies[i, 2], extinction[i, 2])
    
    outputArray[i, 8] = distmod
  outputArray[:, 7] = kcorr[:, 2]  
  np.savetxt("kcorrect_sdss.csv", outputArray, fmt = '%i, %10.3f, %10.3f, %10.3f, %10.3e, %10.3f, %10.3e, %10.3e, %10.3e')  

  exit()
  data = np.empty((939, 16))

  califa_id = utils.convert(db.dbUtils.getFromDB('califa_id', dbDir+'CALIFA.sqlite', 'gc'))
  
  u = utils.convert(db.dbUtils.getFromDB('el_mag', dbDir+'CALIFA.sqlite', 'u_tot'))
  g = utils.convert(db.dbUtils.getFromDB('el_mag', dbDir+'CALIFA.sqlite', 'g_tot'))
  r = utils.convert(db.dbUtils.getFromDB('el_mag', dbDir+'CALIFA.sqlite', 'r_tot'))
  i = utils.convert(db.dbUtils.getFromDB('el_mag', dbDir+'CALIFA.sqlite', 'i_tot'))
  z = utils.convert(db.dbUtils.getFromDB('el_mag', dbDir+'CALIFA.sqlite', 'z_tot'))


  ext_u = utils.convert(db.dbUtils.getFromDB('extinction_u', dbDir+'CALIFA.sqlite', 'extinction'))
  ext_g = utils.convert(db.dbUtils.getFromDB('extinction_g', dbDir+'CALIFA.sqlite', 'extinction'))
  ext_r = utils.convert(db.dbUtils.getFromDB('extinction_r', dbDir+'CALIFA.sqlite', 'extinction'))
  ext_i = utils.convert(db.dbUtils.getFromDB('extinction_i', dbDir+'CALIFA.sqlite', 'extinction'))
  ext_z = utils.convert(db.dbUtils.getFromDB('extinction_z', dbDir+'CALIFA.sqlite', 'extinction'))
  
  err_u = utils.convert(db.dbUtils.getFromDB('petroMagErr_u', dbDir+'CALIFA.sqlite', 'extinction'))
  err_g = utils.convert(db.dbUtils.getFromDB('petroMagErr_g', dbDir+'CALIFA.sqlite', 'extinction'))
  err_r = utils.convert(db.dbUtils.getFromDB('petroMagErr_r', dbDir+'CALIFA.sqlite', 'extinction'))
  err_i = utils.convert(db.dbUtils.getFromDB('petroMagErr_i', dbDir+'CALIFA.sqlite', 'extinction'))
  err_z = utils.convert(db.dbUtils.getFromDB('petroMagErr_z', dbDir+'CALIFA.sqlite', 'extinction'))  
  
  redshift = utils.convert(db.dbUtils.getFromDB('z', dbDir+'CALIFA.sqlite', 'ned_z'))  
  
  data[:, 0] = u[:, 0]
  data[:, 1] = g[:, 0]
  data[:, 2] = r[:, 0]
  data[:, 3] = i[:, 0]
  data[:, 4] = z[:, 0]
  
  data[:, 5] = ext_u[:, 0]
  data[:, 6] = ext_g[:, 0]
  data[:, 7] = ext_r[:, 0]
  data[:, 8] = ext_i[:, 0]
  data[:, 9] = ext_z[:, 0]
  
  data[:, 10] = err_u[:, 0]
  data[:, 11] = err_g[:, 0]
  data[:, 12] = err_r[:, 0]
  data[:, 13] = err_i[:, 0]
  data[:, 14] = err_z[:, 0]
  
  data[:, 15] = redshift[:, 0]
  
  maggies = data[:, 0:5]

  extinction = data[:, 5:10]

  maggies_err = data[:, 10:15] 
  print maggies.shape, extinction.shape, maggies_err.shape
  
  outputArray = np.empty((939, 9))
  kc =  SDSSKCorrect(redshift, maggies, maggies_err, extinction, cosmo=(Wm, Wl, H0/100))
  kcorr = kc.kcorrect()

  #absmag = getAbsMag(redshift, maggies[:, 2], extinction[:, 2])#kc.absmag() 
  outputArray[:,0] = califa_id[:, 0]
  #print kcorr[:, 2][:].shape
  
  outputArray[:, 1:6] = kc.absmag()  
  coeffs = kc.coeffs#[:, 1:4]
  tmremain = np.array([[0.601525, 0.941511, 0.607033, 0.523732, 0.763937]])
  ones = np.ones((1, len(redshift)))
  prod = np.dot(tmremain.T, ones).T 
  modelMasses = coeffs*prod
  #print modelMasses.shape
  mass = np.sum(modelMasses, axis=1)
  for i in range (0, (len(data))):
    distmod = KC.utils.cosmology.ztodm(redshift[i])
    exp = 10 ** (0.4 * distmod)
    outputArray[i, 6] = mass[i] * exp
    #outputArray[i, 7] = getAbsMag(redshift[i], maggies[i, 2], extinction[i, 2])
    
    outputArray[i, 8] = distmod
  outputArray[:, 7] = kcorr[:, 2]  
  np.savetxt("absmag.csv", outputArray, fmt = '%i, %10.3f, %10.3f, %10.3f, %10.3e, %10.3f, %10.3e, %10.3e, %10.3e')  
コード例 #50
0
# -*- coding: utf-8 -*-
import numpy as np
import db
import utils

dbDir = "../db/"

data = np.empty((939, 14), dtype=float)


califa_id = utils.convert(db.dbUtils.getFromDB("CALIFA_ID", dbDir + "CALIFA.sqlite", "gc_r"))
ra = utils.convert(db.dbUtils.getFromDB("ra", dbDir + "CALIFA.sqlite", "mothersample"))
dec = utils.convert(db.dbUtils.getFromDB("dec", dbDir + "CALIFA.sqlite", "mothersample"))

el_u = utils.convert(db.dbUtils.getFromDB("el_mag", dbDir + "CALIFA.sqlite", "gc_u")) - 0.04  # AB correction
el_g = utils.convert(db.dbUtils.getFromDB("el_mag", dbDir + "CALIFA.sqlite", "gc_g"))
el_r = utils.convert(db.dbUtils.getFromDB("el_mag", dbDir + "CALIFA.sqlite", "gc_r"))
el_i = utils.convert(db.dbUtils.getFromDB("el_mag", dbDir + "CALIFA.sqlite", "gc_i"))
el_z = utils.convert(db.dbUtils.getFromDB("el_mag", dbDir + "CALIFA.sqlite", "gc_z"))

el_hlr = 0.396 * utils.convert(db.dbUtils.getFromDB("el_hlma", dbDir + "CALIFA.sqlite", "gc_r"))  # SDSS pixel scale
print el_hlr

# circ_mag = utils.convert(db.dbUtils.getFromDB('circ_mag', dbDir+'CALIFA.sqlite', 'gc_r'))
# circ_hlr = 0.396*utils.convert(db.dbUtils.getFromDB('circ_hlr', dbDir+'CALIFA.sqlite', 'gc_r'))
r_sky = utils.convert(db.dbUtils.getFromDB("gc_sky", dbDir + "CALIFA.sqlite", "gc_r"))

pa = utils.convert(db.dbUtils.getFromDB("PA", dbDir + "CALIFA.sqlite", "angles"))
pa_align = utils.convert(db.dbUtils.getFromDB("PA_align", dbDir + "CALIFA.sqlite", "angles"))
ba = utils.convert(db.dbUtils.getFromDB("ba", dbDir + "CALIFA.sqlite", "nadine"))
コード例 #51
0
ファイル: jira2github.py プロジェクト: whybe/jira2github
    for comment in jira.issue(issue.key).fields.comment.comments:
        jira_issue.comments.append(
            JiraComment(
                comment.author.name,
                comment.body,
                comment.created))

    # if len(issue.fields.components) == 0:
    # if len(issue.fields.components) > 0 and issue.fields.components[0].name == 'Backend':
        # print issue.fields.components

    print jira_issue

    github_labels = []

    label = convert(jira_issue.issue_type, JiraAttr.TYPE, GithubAttr.LABEL)
    if label is not None:
        github_labels.append(create_label(label, github_repo))

    label = convert(jira_issue.status, JiraAttr.STATUS, GithubAttr.LABEL)
    if label is not None:
        github_labels.append(create_label(label, github_repo))

    github_issue = github_repo.create_issue(
        jira_issue.summary,
        body=gen_body(jira_issue),
        assignee=convert(jira_issue.assignee, JiraAttr.USER,
                         GithubAttr.LOGIN),
        labels=github_labels)

    github_issue.edit(state=convert(jira_issue.status, JiraAttr.STATUS, GithubAttr.STATE))
コード例 #52
0
def main():
  data = np.empty((939, 16))

  califa_id = utils.convert(db.dbUtils.getFromDB('califa_id', dbDir+'CALIFA.sqlite', 'gc'))
  
  u = utils.convert(db.dbUtils.getFromDB('u_mag', dbDir+'CALIFA.sqlite', 'gc'))
  g = utils.convert(db.dbUtils.getFromDB('g_mag', dbDir+'CALIFA.sqlite', 'gc'))
  r = utils.convert(db.dbUtils.getFromDB('r_mag', dbDir+'CALIFA.sqlite', 'gc'))
  i = utils.convert(db.dbUtils.getFromDB('i_mag', dbDir+'CALIFA.sqlite', 'gc'))
  z = utils.convert(db.dbUtils.getFromDB('z_mag', dbDir+'CALIFA.sqlite', 'gc'))


  ext_u = utils.convert(db.dbUtils.getFromDB('extinction_u', dbDir+'CALIFA.sqlite', 'extinction'))
  ext_g = utils.convert(db.dbUtils.getFromDB('extinction_g', dbDir+'CALIFA.sqlite', 'extinction'))
  ext_r = utils.convert(db.dbUtils.getFromDB('extinction_r', dbDir+'CALIFA.sqlite', 'extinction'))
  ext_i = utils.convert(db.dbUtils.getFromDB('extinction_i', dbDir+'CALIFA.sqlite', 'extinction'))
  ext_z = utils.convert(db.dbUtils.getFromDB('extinction_z', dbDir+'CALIFA.sqlite', 'extinction'))
  
  err_u = utils.convert(db.dbUtils.getFromDB('petroMagErr_u', dbDir+'CALIFA.sqlite', 'extinction'))
  err_g = utils.convert(db.dbUtils.getFromDB('petroMagErr_g', dbDir+'CALIFA.sqlite', 'extinction'))
  err_r = utils.convert(db.dbUtils.getFromDB('petroMagErr_r', dbDir+'CALIFA.sqlite', 'extinction'))
  err_i = utils.convert(db.dbUtils.getFromDB('petroMagErr_i', dbDir+'CALIFA.sqlite', 'extinction'))
  err_z = utils.convert(db.dbUtils.getFromDB('petroMagErr_z', dbDir+'CALIFA.sqlite', 'extinction'))  
  
  redshift = utils.convert(db.dbUtils.getFromDB('z', dbDir+'CALIFA.sqlite', 'ned_z'))  
  
  data[:, 0] = u[:, 0]
  data[:, 1] = g[:, 0]
  data[:, 2] = r[:, 0]
  data[:, 3] = i[:, 0]
  data[:, 4] = z[:, 0]
  
  data[:, 5] = ext_u[:, 0]
  data[:, 6] = ext_g[:, 0]
  data[:, 7] = ext_r[:, 0]
  data[:, 8] = ext_i[:, 0]
  data[:, 9] = ext_z[:, 0]
  
  data[:, 10] = err_u[:, 0]
  data[:, 11] = err_g[:, 0]
  data[:, 12] = err_r[:, 0]
  data[:, 13] = err_i[:, 0]
  data[:, 14] = err_z[:, 0]
  
  data[:, 15] = redshift[:, 0]
  
  maggies = data[:, 0:5]

  extinction = data[:, 5:10]

  maggies_err = data[:, 10:15] 
  print maggies.shape, extinction.shape, maggies_err.shape
  
  outputArray = np.empty((939, 9))
  kc =  SDSSKCorrect(redshift, maggies, maggies_err, extinction, cosmo=(Wm, Wl, H0/100))
  kcorr = kc.kcorrect()

  #absmag = getAbsMag(redshift, maggies[:, 2], extinction[:, 2])#kc.absmag() 
  outputArray[:,0] = califa_id[:, 0]
  #print kcorr[:, 2][:].shape
  
  outputArray[:, 1:6] = kc.absmag()  
  coeffs = kc.coeffs#[:, 1:4]
  tmremain = np.array([[0.601525, 0.941511, 0.607033, 0.523732, 0.763937]])
  ones = np.ones((1, len(redshift)))
  prod = np.dot(tmremain.T, ones).T 
  modelMasses = coeffs*prod
  #print modelMasses.shape
  mass = np.sum(modelMasses, axis=1)
  for i in range (0, (len(data))):
    distmod = KC.utils.cosmology.ztodm(redshift[i])
    exp = 10 ** (0.4 * distmod)
    outputArray[i, 6] = mass[i] * exp
    #outputArray[i, 7] = getAbsMag(redshift[i], maggies[i, 2], extinction[i, 2])
    
    outputArray[i, 8] = distmod
  outputArray[:, 7] = kcorr[:, 2]  
  np.savetxt("absmag.csv", outputArray, fmt = '%i, %10.3f, %10.3f, %10.3f, %10.3e, %10.3f, %10.3e, %10.3e, %10.3e')  
コード例 #53
0
ファイル: data.py プロジェクト: josbouten/bioplot
    def _decodeType3Results(self, res):
        """
        Decoder for cross identification type results file. Example of the format used:

        80374  0000000017133729a 80359 0000000016842970b 2.1088616847991943  FALSE META_VAL1
        148407 0000260007968376b 89823 0000000008087650a 0.33669018745422363 FALSE META_VAL1
        179408 03ea7cce-a192626a 80372 0000000016749939b 1.26323664188385    FALSE META_VAL2
        80344  0000000016888750a 80344 0000000015560933b 4.423274517059326   TRUE  META_VAL2
        etc.

        :param res: list of strings (text lines) of raw data resulting from a series of trials.
        Type 3 data contains 7 fields:
        field 1: string: label identifying a subject (training data)
        field 2: string: name of data file containing biometric features or raw data originating
                         from the subject denoted by field 1 used for training the test model
        field 3: string: label identifying a subject (test data)
        field 4: string: name of data file containing biometric features or raw data originating
                         from the subject denoted by field 3 used for training the reference model
        field 5: string: float value: score of trial
        field 6: boolean: ground truth
        field 7: string: meta data value for the trial
        Field 7 can be used to contrast experiments in the zoo plot.
        So if you have 2 experiments where you change one variable, when doing a cross
        identification test, the meta value can be used to group the experiment's scores.
        """

        totCnt = 0
        resCnt = 0
        # For type 3 scores we assume that the scores are (Log) Likelyhood Ratios ranging between 0 and +infinity.
        onlyOnce = set()
        revRepeatCnt = 0
        selfCnt = 0
        valuesCnt = collections.Counter()
        # Set max and min function for this type.
        self.getMaximum4ThisType = self.config.getMaximum4Type3
        self.getMinimum4ThisType = self.config.getMinimum4Type3
        # Scores are scalar float values.
        self._miAll = self.getMaximum4ThisType()
        self._maAll = self.getMinimum4ThisType()
        for line in res:
            if ',' in line:
                splitChar = ','
            else:
                splitChar = None
            try:
                l1, f1, l2, f2, score, truth, metaValue = line.split(splitChar)
                if splitChar:
                    l1, f1, l2, f2, score, truth, metaValue = self._sanitize(l1, f1, l2, f2, score, truth, metaValue)
            except Exception as e:
                print('Error in', line)
                print('Use either comma or space as separator.')
                print(e)
            else:
                # We want to sort the data when choosing colors.
                # Therefore we convert to numbers if possible
                # otherwise we assume string values.
                metaValue = convert(metaValue)

                # Keep track of distinct meta data values.
                valuesCnt[metaValue] += 1

                if not metaValue in self._minimumScore:
                    self._minimumScore[metaValue] = self.getMaximum4ThisType()
                    self._maximumScore[metaValue] = self.getMinimum4ThisType()
                l1_0 = l1 + '---' + f1
                l2_0 = l2 + '---' + f2
                # If the score is not numerical, then we skip everything.
                try:
                    score = float(score)
                except Exception as e:
                    print('Error in', line)
                    print(e)
                else:
                    self._miAll = min(self._miAll, score)
                    self._maAll = max(self._maAll, score)
                    self._minimumScore[metaValue] = min(self._minimumScore[metaValue], score)
                    self._maximumScore[metaValue] = max(self._maximumScore[metaValue], score)

                    if l1_0 == l2_0:
                        selfCnt += 1
                        # Selfies are not interesting and therefore skipped
                        # continue
                    if not (l1_0, l2_0) in onlyOnce:
                        onlyOnce.add((l1_0, l2_0))
                    if not self._allowDups:
                        # We do not want to include an experiment twice,
                        # assuming that the scores are symmetric.
                        # This may not be the case!
                        if (l2_0, l1_0) in onlyOnce:
                            revRepeatCnt += 1
                            continue
                    resCnt += 1

                    # Keep track of labels associated with meta data values.
                    metaValue = str(metaValue)

                    pattern = l1 + self.LABEL_SEPARATOR + metaValue
                    # Keep track of results for ranking purposes.
                    # print 'adding element to results[', l1 + self.LABEL_SEPARATOR + metaValue, ']'
                    self._results[pattern].append((l2, score))
                    self._results4Subject[metaValue, l1].append((l2, score))  # code is just for debugging

                    self._metaDataValues[metaValue].add(l1)
                    self._metaDataValues[metaValue].add(l2)
                    totCnt += 1
                    if truth.lower() == 'true':
                        self._targetScores[pattern].append(score)
                        self._targetScores4Label[l1].append(score)
                        self._targetScores4MetaValue[metaValue].append(score)
                        self._targetCnt[metaValue] += 1
                        self._targetLabels.add(l1)
                    else:
                        self._nonTargetScores[pattern].append(score)
                        self._nonTargetScores4Label[l1].append(score)
                        self._nonTargetScores4MetaValue[metaValue].append(score)
                        self._nonTargetCnt[metaValue] += 1
                        self._nonTargetLabels.add(l1)
        if self.debug:
            print('Number of results in file:', resCnt)
            print('Number of subjects:', len(self._results4Subject))
        print('Number of scores:', totCnt)
        if totCnt == 0:
            print('No scores were found. Maybe the dataType is not set correctly.')
            print("DataType is '%s'" % self._dataType)
            print('Is this correct?')
            sys.exit(1)
        print("Number of target and non target scores for: ")
        maxLen = 0
        for metaValue in self._nonTargetCnt:
            maxLen = max(maxLen, len(metaValue))
        template = "{:<%d}" % maxLen
        scoreLen = len(str(self.compLen(self._nonTargetScores)))
        template += " {:>%d} {:>%d}" % (scoreLen + 1, scoreLen + 1)

        for metaValue in self._targetCnt:
            #print("{:<10} {:>7} {:>7}".format(metaValue, self._targetCnt[metaValue], self._nonTargetCnt[metaValue]))
            print(template.format(metaValue, self._targetCnt[metaValue], self._nonTargetCnt[metaValue]))
        # print("Number of non target scores for: ")

        print(template.format("Total", self.compLen(self._targetScores), self.compLen(self._nonTargetScores)))
        # print('Total number of target scores:', self.compLen(self._targetScores))
        # print('Total number of non target scores:', self.compLen(self._nonTargetScores))
        print('Number of repeats (multiple instances of same data in input):', revRepeatCnt)
        print('Number of selfies (A vs A):', selfCnt)
        self._nrDistinctMetaDataValues = len(self._metaDataValues)
        print('Nr of distinct meta data values:', self._nrDistinctMetaDataValues)
コード例 #54
0
# -*- coding: utf-8 -*-
import numpy as np
import db
import utils
dbDir = '../db/'

data = np.empty((939, 3), dtype = object)


#califa_id = utils.convert(db.dbUtils.getFromDB('CALIFA_ID', dbDir+'CALIFA.sqlite', 'gc_r'))
ra = utils.convert(db.dbUtils.getFromDB('ra', dbDir+'CALIFA.sqlite', 'mothersample'))
dec = utils.convert(db.dbUtils.getFromDB('dec', dbDir+'CALIFA.sqlite', 'mothersample'))

print ra.shape
for i in range(0, 939):


  data[i, 0] = str(ra[i, 0])+'d'
  data[i, 1] = str(dec[i, 0])+'d'
  data[i, 2] = 0.05
  print i, data[i]
  utils.writeOut(data[i, 0:3], "ned_query_coords.csv", ";")

#np.savetxt("ned_query_coords.csv", data, delimiter = ';')

コード例 #55
0
ファイル: views.py プロジェクト: rtx3/Salt-MWDS
 def get(self):
     data = redisapi.hgetall('sitestatus')
     if data:
         return json.dumps(convert(data))
コード例 #56
0
def main():
    #selecting by ids when some of them are missing
    fit_sky_ids = utils.convert(db.dbUtils.getFromDB('califa_id', dbDir+'CALIFA.sqlite', 'u_tot'))[:, 0]    
    ids = ''
    id_length = 0
    for i in fit_sky_ids:
      ids = ids+","+str(int(i))
      id_length+=1
    ids = ids[1:]  
    
    tot_mag = utils.convert(db.dbUtils.getFromDB('u_mag', dbDir+'CALIFA.sqlite', 'u_tot', ' where califa_id in('+ids+')'))  #parsing tuples  
    gc_mag = utils.convert(db.dbUtils.getFromDB('u_mag', dbDir+'CALIFA.sqlite', 'gc', ' where califa_id in('+ids+')'))  #parsing tuples
    #circ_mag = utils.convert(db.dbUtils.getFromDB('circ_r_mag', dbDir+'CALIFA.sqlite', 'gc'))  #parsing tuples    
    nadines_mag = utils.convert(db.dbUtils.getFromDB('r_mag', dbDir+'CALIFA.sqlite', 'nadine', ' where califa_id in('+ids+')'))  #parsing tuples
    sdss_mag = utils.convert(db.dbUtils.getFromDB('petroMag_u', dbDir+'CALIFA.sqlite', 'mothersample', ' where califa_id in('+ids+')'))  #parsing tuples    
    atlas_mag = utils.convert(db.dbUtils.getFromDB('r_mag', dbDir+'CALIFA.sqlite', 'atlas', ' where califa_id in('+ids+')'))

    #gc_hlr = utils.convert(db.dbUtils.getFromDB('circ_hlr', dbDir+'CALIFA.sqlite', 'gc'))
    #nadine_hlr = utils.convert(db.dbUtils.getFromDB('re', dbDir+'CALIFA.sqlite', 'nadine'))
    #sdss_hlr = utils.convert(db.dbUtils.getFromDB('petroR50_r', dbDir+'CALIFA.sqlite', 'mothersample'))  #parsing tuples    
    #lucie_hlr = utils.convert(db.dbUtils.getFromDB('hlr', dbDir+'CALIFA.sqlite', 'lucie'))  #parsing tuples    
    #el_hlr = utils.convert(db.dbUtils.getFromDB('el_hlma', dbDir+'CALIFA.sqlite', 'gc'))

    #lucie_sky = utils.convert(db.dbUtils.getFromDB('sky', dbDir+'CALIFA.sqlite', 'lucie', ' where califa_id in('+ids+')'))  - 1000  #parsing tuples        
    tot_sky = utils.convert(db.dbUtils.getFromDB('gc_sky', dbDir+'CALIFA.sqlite', 'u_tot', ' where califa_id in('+ids+')'))  #parsing tuples 
    gc_sky = utils.convert(db.dbUtils.getFromDB('gc_sky', dbDir+'CALIFA.sqlite', 'gc', ' where califa_id in('+ids+')'))
    sdss_sky = utils.convert(db.dbUtils.getFromDB('sky', dbDir+'CALIFA.sqlite', 'sdss_sky', ' where califa_id in('+ids+')'))
    
    
    
    #plot relations between various magnitude results
    graph = plot.Plots()
    #gc_magData = plot.GraphData(((nadines_mag, tot_mag)), 'k', 'best')
    #graph.plotScatter([gc_magData], "/analysis/new_gc_mag_vs_nadine", plot.PlotTitles("Comparison between my and Nadine's photometry values", "Nadine's gc magnitude, mag", "Updated gc r magnitude, mag"), (11, 16, 11, 16))

    gc_magData = plot.GraphData(((sdss_mag, tot_mag)), 'k', 'best')
    graph.plotScatter([gc_magData], "/analysis/new_gc_mag_vs_sdss_u", plot.PlotTitles("Comparison between my and SDSS photometry values", "SDSS Petrosian u magnitude, mag", "Updated gc r magnitude, mag"),(12, 18, 12, 18))

    #gc_magData = plot.GraphData(((atlas_mag, tot_mag)), 'k', 'best')
    #graph.plotScatter([gc_magData], "/analysis/new_gc_mag_vs_nsatlas", plot.PlotTitles("Comparison between my and NASA Sloan Atlas photometry values", "Updated gc r magnitude, mag", "NSAtlas magnitude, mag"),(11, 16, 11, 16))

    gc_magData = plot.GraphData(((gc_mag, tot_mag)), 'k', 'best')
    graph.plotScatter([gc_magData], "/analysis/gc_vs_gc_new_u", plot.PlotTitles("Comparison between GC and sky-fit GC photometry values", "GC r magnitude, mag", "Updated GC magnitude, mag"),(12, 17, 12, 17))

    #gc_magData = plot.GraphData(((circ_mag, gc_mag)), 'k', 'best')
    #graph.plotScatter([gc_magData], "/analysis/el_mag_vs_circ_apert", plot.PlotTitles("Comparison between elliptical and circular annuli", "r magnitude, mag", "r magnitude, mag"),(11, 16, 11, 16))

    
    #compare sky values
   
    graph = plot.Plots()
    
    #plotData = plot.GraphData(((np.arange(1, 938), gc_sky - lucie_sky)), 'k', 'best')    
    #graph.plotScatter([plotData], "/analysis/sky_comparison", plot.PlotTitles("Comparison between my and Lucie's sky values", "counts", "counts"), (70, 170, -2, 1))
    
    
    
    #print sdss_sky
    
    #print np.reshape((tot_sky - sdss_sky), (id_length, 1)).shape, np.reshape(np.arange(1, id_length+1), (id_length, 1)).shape
    

    plotData = plot.GraphData(((np.arange(1, id_length+1), tot_sky - sdss_sky)), 'k', 'best')    
    graph.plotScatter([plotData], "/analysis/sdss_sky_comparison_u", plot.PlotTitles("Comparison between my and SDSS sky values", "No.", "counts"), (0, id_length, -2, 0.5))

    plotData = plot.GraphData(((np.arange(1, id_length+1), gc_sky - tot_sky)), 'k', 'best')    
    graph.plotScatter([plotData], "/analysis/gc_sky_comparison_u", plot.PlotTitles("GC sky - updated GC sky", "No.", "counts"))


    exit()
    #plot various HLR values

    graph = plot.Plots()
    plotData = plot.GraphData(((lucie_hlr, gc_hlr)), 'k', 'best')
    graph.plotScatter([plotData], "/analysis/hlr_vs_lucie_noscale", plot.PlotTitles("Comparison between my and Lucie's HLR values", "Lucie's $r_e$, arcsec (?)", "gc hlr, arcsec"), (0, 50, 0, 50))   
    
    plotData = plot.GraphData(((nadine_hlr, el_hlr)), 'k', 'best')    
    graph.plotScatter([plotData], "/analysis/el_hlr_vs_nadine", plot.PlotTitles("Comparison between my and Nadine's HLR values", "Nadine's $r_e$, arcsec", "gc hlr, arcsec"), (0, 70, 0, 50))
    
    plotData = plot.GraphData(((sdss_hlr, gc_hlr)), 'k', 'best')    
    graph.plotScatter([plotData], "/analysis/circ_hlr_vs_sdss", plot.PlotTitles("Comparison between my and SDSS HLR values", "SDSS Petrosian $r_50$, arcsec", "gc hlr, arcsec"), (0, 50, 0, 50))
    

    
    #stellar mass, absmag comparison with Jakob's values:
    '''
    absmag_kc = utils.convert(db.dbUtils.getFromDB('r', dbDir+'CALIFA.sqlite', 'jakobs'))[:937, :]
    absmag_j = utils.convert(db.dbUtils.getFromDB('r', dbDir+'CALIFA.sqlite', 'kcorrect_ned'))[:937, :]

    graph = plot.Plots()
    gc_magData = plot.GraphData(((absmag_kc, absmag_j)), 'k', 'best')
    graph.plotScatter([gc_magData], "/analysis/absolute_magnitudes", plot.PlotTitles("Comparison between my and Jakob's absolute magnitudes", "gc M_r, mag", "JW M_r, mag"))
	

    stmass_kc = utils.convert(db.dbUtils.getFromDB('st_mass', dbDir+'CALIFA.sqlite', 'kcorrect_ned'))[:937, :]
    stmass_kc_sdss = utils.convert(db.dbUtils.getFromDB('st_mass', dbDir+'CALIFA.sqlite', 'kcorrect_sdss_phot'))[:937, :]    
    stmass_kc_no_z = utils.convert(db.dbUtils.getFromDB('st_mass', dbDir+'CALIFA.sqlite', 'kcorrect_no_uz'))[:937, :]        
    stmass_j = utils.convert(db.dbUtils.getFromDB('mstar', dbDir+'CALIFA.sqlite', 'jakobs'))[:937, :]
    stmass_kc = np.log10(stmass_kc)
    stmass_kc_no_z = np.log10(stmass_kc_no_z)
    stmass_kc_sdss = np.log10(stmass_kc_sdss)    
    stmass_j = np.log10(stmass_j)
    
    graph = plot.Plots()
    gc_magData = plot.GraphData(((stmass_kc_sdss, stmass_j)), 'k', 'best')
    graph.plotScatter([gc_magData], "/analysis/stellar masses", plot.PlotTitles("Comparison between kcorrect's and Jakob's stellar masses", "M_{kc}", "M_{JW}"), (7.5, 13, 7.5, 13))

    gc_magData = plot.GraphData(((stmass_kc_sdss, stmass_j)), 'k', 'best')
    graph.plotScatter([gc_magData], "/analysis/stellar_sdss_kc_masses", plot.PlotTitles("Comparison between kcorrect's and Jakob's stellar masses", "M_{kc}", "M_{JW}"), (7.5, 13, 7.5, 13))

    gc_magData = plot.GraphData(((stmass_kc_no_z, stmass_j)), 'k', 'best')
    graph.plotScatter([gc_magData], "/analysis/stMass_jw_gc_no_uz_bands", plot.PlotTitles("Comparison between kcorrect's gri and Jakob's stellar masses", "M_{kc}", "M_{JW}"), (7.5, 13, 7.5, 13))
	
    '''

    '''
コード例 #57
0
ファイル: bb9_course.py プロジェクト: nikoloup/reteach
def create_moodle_zip(blackboard_zip_fname, out_name, course_id, parameters):
    try:
        shutil.rmtree('elixer_tmp')
        shutil.rmtree('course_files')
    except OSError:
        pass

    course = Course(blackboard_zip_fname, course_id, parameters)

    moodle_zip = zipfile.ZipFile(out_name, 'w')

    moodle_xml_str = utils.convert(course).encode('utf-8')

    moodle_zip.writestr('moodle.xml', moodle_xml_str)

    err_fh = open(os.path.devnull, 'w')

    #nikoloup
    #fix unzip encoding issues
    #command = ('unzip %s -d elixer_tmp' % blackboard_zip_fname).split(' ')
    #subprocess.Popen(command, stdout=err_fh, stderr=err_fh).communicate()
    bbzip = zipfile.ZipFile(blackboard_zip_fname, 'r')
    bbzip.extractall('elixer_tmp') 
    #end

    skip_parent = False

    for root, dirs, files in os.walk('elixer_tmp'):
        if not skip_parent:
            skip_parent = True
            continue

        for bb_fname in files:
            moodle_fname = bb_fname
	    #nikoloup
	    #exclude xml files
	    if moodle_fname.find('.xml')!=-1:
		continue

	    #nikoloup
	    #filename cleanup
	    parts1 = moodle_fname.split('__')
	    parts2 = moodle_fname.split('.')
	    moodle_fname = parts1[0] + '.' + parts2[len(parts2)-1]
	    #end
	    
            if bb_fname.startswith('!'):
                if '.' in bb_fname:
                    ext, fname = [s[::-1] for s in bb_fname[1:][::-1].split('.', 1)]
                    moodle_fname = (base64.b16decode(fname.upper()) + '.' + ext)
                else:
                    ext, fname = '', bb_fname[1:]
                    moodle_fname = (base64.b16decode(fname.upper()))

		moodle_fname = urllib2.unquote(moodle_fname)

	    #nikoloup
	    #filename cleanup
            #res_num = root.split(os.sep, 1)[1].split(os.sep)[0].replace('res', '')
	    res_num = ''
	    #end

            fixed_filename = utils.fix_filename(moodle_fname, res_num)
	    
            bb_fname = os.path.join(root, bb_fname)

            moodle_fname = os.path.join('course_files', fixed_filename)

            moodle_zip.write(bb_fname, moodle_fname)

    shutil.rmtree('elixer_tmp')

    moodle_zip.close()
コード例 #58
0
ファイル: celery_task.py プロジェクト: rtx3/Salt-MWDS
logger = logging.getLogger('task')

celery, session = create_celery_app()

celery.config_from_object('celery_config')

indbapi = Indb(config['INDB_HOST'] + ':' + config['INDB_PORT'])

sensuapi = SensuAPI(config['SENSU_HOST'] + ':' + config['SENSU_PORT'])

master = session.query(Masterdb).first()
try:
    saltapi = Pepper(master.ret_api())
    user = master.username
    pawd = convert(base64.b64decode(master.password))
except:
    saltapi = Pepper(config['SALTAPI_HOST'])
    user = config['SALTAPI_USER']
    pawd = config['SALTAPI_PASS']

redisapi = redis.StrictRedis(host=config['REDIS_HOST'], port=config['REDIS_PORT'], db=config['REDIS_DB'])


'''
### DOC ###

Celery function description

*self test*
コード例 #59
0
import sys
import textwrap

from utils import convert

if __name__ == "__main__":
    info = "Usage: converter.py input.txt"
    try:
        source = sys.argv[1]
    except (TypeError, ValueError, IndexError):
        sys.exit(info)
    if len(sys.argv) < 2:
        sys.exit(info)

    with open(source) as file:
        out_line = "".join(line for line in file)
        out_line = out_line.replace("-\n", "").replace("\n", " ")

    with open("converted.txt", "w") as out:
        source_str = convert(out_line)
        out.write(textwrap.fill(source_str, 60))