def mosaic():
    global params
    global registration_data
    global mosaic_data

    # 结束时间
    timeline_endtime = params["timeline_endtime"]
    timeline_endtime_stamp = time_to_timestamp(timeline_endtime)
    # 开始时间
    start_time = "{}-00".format(params["start_time"])
    timeline_starttime_stamp = time_to_timestamp(start_time)
    # 时间间隔
    interval = params["interval"] * 60  # 秒数
    # 遍历
    for timeline_time_stamp in floatrange(timeline_starttime_stamp,
                                          timeline_endtime_stamp, interval):
        timeline_time = timestamp_to_time(timeline_time_stamp)  #时间线上的时间

        if timeline_time in mosaic_data:
            # 这个时间点数据已经融合
            continue
        # 融合
        mosaic_item = do_mosaic(timeline_time)
        if type(mosaic_item) is dict:
            mosaic_data[timeline_time] = mosaic_item
            save_hdr(params["mosaic_dir"], mosaic_item["time"],
                     mosaic_item)  #保存头文件
            log.getlogger().info("【融合】{}时间的数据已处理完成。".format(timeline_time))
        else:
            log.getlogger().info("【融合】出错。")

    log.save_json("mosaic_data_{}".format(get_current_time()), mosaic_data)
    pass
Example #2
0
def do_get(lat, lon):
    """ 雷达降水图接口
    :param lat: 纬度
    :param lon: 经度
    :return:
        本次请求的images
            [0]: 图片url
            [1]:时间
            [2]:extent gcj02坐标系
        返回失败None
    """
    global home
    global headers
    global params

    params["lon"] = lon
    params["lat"] = lat
    r = requests.get(home, params=params, headers=headers)
    log.getlogger().info("【请求网页】{}".format(r.url))
    json_str = r.text
    ret = json.loads(json_str)
    if "images" not in ret:
        # 该地区没有imgs
        return None
    else:
        imgs = ret["images"]
        if len(imgs) == 0 or len(imgs[0]) != 3:
            # 出错
            log.getlogger().error("【do_get】imgs出错(可能Url有变):{}".format(imgs))
            return None
        else:
            return imgs
def registration():
    global params
    global original_data
    global registration_data
    registration_dir = params["registration_dir"]
    current_time = get_current_time()

    log.getlogger().info("【配准数据】")

    d = original_data.copy()  #防止异常错误
    for frame, frame_value in d.items():
        for time, time_value in frame_value.items():
            # 原始数据
            original_file_path = time_value["file_path"]
            if original_file_path == "":
                # 还没有下载
                continue
            original_file_name = time_value["file_name"]

            # 配准数据
            if frame not in registration_data:
                registration_data[frame] = OrderedDict()

            if time in registration_data:
                # 该数据已经配准过了
                continue

            # 该数据还没有配准
            registration_file_name = "{} {}.tif".format(frame, time)  #tif格式
            frame_path = os.path.join(registration_dir, frame)
            registration_file_path = os.path.join(frame_path,
                                                  registration_file_name)
            if os.path.exists(registration_file_path) is True:
                # 该数据已经存在
                continue

            rgs_data_item = do_rgs(time_value, registration_file_path)
            if rgs_data_item is None:
                log.getlogger().error("[配准失败!] {}".format(original_file_path))
            else:
                # 配准完成,保存到data_rgs
                registration_data[frame][time] = rgs_data_item
                params["timeline_endtime"] = time  #把时间保存下来,当前配准的最后一个数据的时间

    log.save_json("registration_data_{}".format(current_time),
                  registration_data)
    pass
def download():
    global params
    global original_data

    log.getlogger().info("【下载数据】")

    original_dir = params["original_dir"]
    d = original_data.copy()  #防止出错
    for frame, frame_value in d.items():
        # 保存文件夹
        frame_dir = os.path.join(original_dir, frame)
        if os.path.exists(frame_dir) is False:
            os.mkdir(frame_dir)

        for time, time_value in frame_value.items():
            if time_value["file_path"] != "":
                # 已下载
                continue

            # 下载
            url = time_value["url"]
            fn, fp = download_file(url, frame_dir)
            if fn == None:  #下载错误
                log.getlogger().error("【下载失败】{}".format(url))
                continue
            else:  #下载完成
                time_value["file_path"] = fp
                time_value["file_name"] = fn
                # 保存头文件
                hdr_fn = os.path.splitext(fn)[0]  #头文件名称
                save_hdr(frame_dir, hdr_fn, time_value)

    log.getlogger().info("\t下载数据结束")
    pass
def loop():
    global params
    global original_data
    frames = params["frames"]
    frames_selected = params["frames_selected"]
    end_timestrap = params["end_timestrap"]
    current_timestrap = get_current_timestamp()
    current_time = timestamp_to_time(current_timestrap)

    if end_timestrap is not None:  #有设置结束时间
        if current_timestrap > end_timestrap:
            log.getlogger().info("【结束】到达结束时间,停止爬取")
            sched.shutdown()

    log.getlogger().info("【开始爬取】{}".format(current_time))

    for frame_num in frames_selected:
        # 要爬取的图幅
        req_pnt = frames[frame_num]["req_pnt"]
        imgs = do_get(*req_pnt)
        if imgs is None:
            log.getlogger().error("图幅{}出错:在该位置上没有获取到imgs".format(frame_num))
            continue
        # 处理imgs
        for img in imgs:
            timestrap = img[1]
            time = timestamp_to_time(timestrap)
            # 加入数据
            if frame_num not in original_data:
                original_data[frame_num] = OrderedDict()
            if time not in original_data[frame_num]:
                original_data[frame_num][time] = {
                    "timestamp": timestrap,
                    "time": time,
                    "url": img[0],
                    "extent_gcj02": img[2],
                    "req_pnt": req_pnt,
                    "file_path": ""
                }
            else:
                pass

    log.getlogger().info("原始数据:{}".format(original_data))
    log.save_json("original_data-{}".format(time), original_data)

    download()  #下载
    registration()  # 配准
    mosaic()  #融合
    return
def dowork():
    global params

    # 图幅处理
    log.getlogger().info("【前端返回】{}".format(request.values))
    selected_frames_str = request.form.get("selected_frames", type=str)
    frames_selected = loads(selected_frames_str)
    params["frames_selected"] = frames_selected
    log.getlogger().info("【选择的图幅号】{}".format(frames_selected))
    for frame in params["frames"]:
        if frame in frames_selected:
            params["frames"][frame]["selected"] = "1"
        else:
            params["frames"][frame]["selected"] = "0"

    print("[DEBUG]{}".format(params))
    # 初始化文件夹
    init_dir()
    ret = save_params_file(params)  #将参数保存至文件夹
    ret = ret.split("\n")

    return render_template('start.html', infos=ret)
Example #7
0
def compile(env,tagname,jsonstr):
    global workpath
    global username
    global pwd
    global push
    global ret
    global sourcedir 
    logger=log.getlogger()
    logger.info(os.getcwd())
    logger.info(jsonstr)
    if env=='.net':
        try:
            exportfile(jsonstr,'trunk')
            getprojfilepath()
            print ret
            logger.info(ret)
            os.chdir(ret)
            l=sourcedir.split('/')
            projname=l[len(l)-2]
            if push==0:
                cmd='msbuild /p:Configuration=Release;VisualStudioVersion=12.0 /p:WebProjectOutputDir=%s\\192.168.10.62\\upload\\%s /p:OutputPath=%s\\192.168.10.62\\upload\\%s\\bin'%('\\',projname,'\\',projname) 
            if push==1:
                cmd='msbuild /p:Configuration=Release;VisualStudioVersion=12.0'
            print cmd
            res=os.system(cmd)
            if push==0:
                cmd='msbuild /p:Configuration=Release;VisualStudioVersion=12.0 /p:WebProjectOutputDir=c:\\Release\\%s /p:OutputPath=c:\\Release\\%s\\bin'%(projname,projname) 
                os.system(cmd)
                cmd='msbuild /p:Configuration=Release;VisualStudioVersion=12.0'
                os.system(cmd)
            if res==0:              
                r=svn.remote.RemoteClient(sourcedir,username,pwd)
                files=svndiff.showdiff(sourcedir+'/tags/original',workpath)  
                winupload.winup(0,'\\192.168.10.62\\upload\\backup', files, projname,tagname)
                winupload.winup(1,'c:\\zz',files,projname,tagname)
                sshupload.uploadfile(projname, files, '172.18.7.88', '/home/swift/','swift', '1234qwer')
                path=['trunk','tags/'+tagname]
                print r.run_command('copy',[sourcedir+'trunk', sourcedir+'tags/'+tagname+'_compiled','-m','copy'])
                dbclient=mongodbaction()
                dbclient.insertlog(projname,username,'compile',tagname)        
                if push==0:
                    Sendmail.sendtogroup('test', 'version '+tagname, 'version '+tagname+' is compiled,please test..'+'path : '+sourcedir+"/tags/"+tagname.encode()+'_compiled')
                Sendmail.sendtogroup('develop', 'version '+tagname, 'version '+tagname+' is compiled successfully..'+'path : '+sourcedir+"/tags/"+tagname.encode()+'_compiled')
                return 'success'
            else:
                Sendmail.sendtogroup('develop', 'version '+tagname, 'version '+tagname+' is compiled failed..')
                return 'failed'
        except Exception,e:
            print e
            return e
Example #8
0
 def send_message(self, data):
     logger=log.getlogger()
     logger.info(data)
     print data
     if jsondecode.jsondecode(data,'language')==".net":
             #self._stream.write_to_fd(".net")
         self._stream.write_to_fd(compile.compile('.net',jsondecode.jsondecode(data,'tagname'),data))
         self.read_message()
     elif jsondecode.jsondecode(data,'language')=="java":
         self._stream.write_to_fd(compile.compile('java', jsondecode.jsondecode(data,'tagname'),data))
         self.read_message()
     elif jsondecode.jsondecode(data,'language')=="testok":
         compile.compile('testok',jsondecode.jsondecode(data,'tagname'),data)
         self._stream.write_to_fd('ok')
         self.read_message()
     elif jsondecode.jsondecode(data,'language')=="run":
         compile.compile('run',jsondecode.jsondecode(data,'tagname'),data)
         self._stream.write_to_fd('ok')
         self.read_message()    
     else:
         self._stream.write_to_fd("no support language")
         self.read_message()
Example #9
0
def exportfile(jsonstr,remotepath):
    global workpath
    global sourcedir
    global username
    global pwd
    global push
    logger=log.getlogger()
    xml_file=sys.path[0]+'\\config.xml' 
    xml=ElementTree.ElementTree(file=xml_file).getroot()
    sourcedir=jsondecode.jsondecode(jsonstr,'svnurl')
    l=sourcedir.split('/')
    projname=l[len(l)-2]
    workpath=xml.find('temppath').text+projname
    username=jsondecode.jsondecode(jsonstr,'username')
    pwd=jsondecode.jsondecode(jsonstr,'userpwd')
    push=jsondecode.jsondecode(jsonstr,'push')
    try:
        r=svn.remote.RemoteClient(sourcedir+remotepath,username,pwd)
        r.checkout(workpath)
        #r.run_command('commit')
    except Exception,e:
        logger.info(e)
        print e
        return e
Example #10
0
def exportfile(jsonstr, remotepath):
    global workpath
    global sourcedir
    global username
    global pwd
    global push
    logger = log.getlogger()
    xml_file = sys.path[0] + '\\config.xml'
    xml = ElementTree.ElementTree(file=xml_file).getroot()
    sourcedir = jsondecode.jsondecode(jsonstr, 'svnurl')
    l = sourcedir.split('/')
    projname = l[len(l) - 2]
    workpath = xml.find('temppath').text + projname
    username = jsondecode.jsondecode(jsonstr, 'username')
    pwd = jsondecode.jsondecode(jsonstr, 'userpwd')
    push = jsondecode.jsondecode(jsonstr, 'push')
    try:
        r = svn.remote.RemoteClient(sourcedir + remotepath, username, pwd)
        r.checkout(workpath)
        #r.run_command('commit')
    except Exception, e:
        logger.info(e)
        print e
        return e
Example #11
0
import time
from pyvirtualdisplay import Display
import os
import os.path
from urlparse import urlparse
import json
from datetime import datetime
import codecs
from PIL import Image, ImageDraw
import bs4
import timeout_decorator as tout_dec

import rendertree as rtree
from geometry import Rectangle
import log
logger = log.getlogger("fridolin", log.INFO)

MAX_TIMEOUT = 60


class MaximumRetryReached(Exception):
    def __init__(self, *args, **kwargs):
        Exception.__init__(self, *args, **kwargs)


def _get_xpath_selenium(driver, element):
    xpath = driver.execute_script(
        """gPt=function(c){
                                 if(c===document){
                                    return ""
                                 }
import Queue
import os
import sys
import time, datetime
import logging
import xml
import argparse

from cloud_monitor_settings import *
from monitor import ThreadPoolMonitor
from log import getlogger
from xml.etree import ElementTree
import web as _web
import libvirt as _libvirt

logger = getlogger("CloudMonitor")

_web.config.debug = False

try:
    db = _web.database(dbn=db_engine,
                       host=db_server,
                       db=db_database,
                       user=db_username,
                       pw=db_password)
except Exception, e:
    logger.exception(e)
    raise e

cloud_vhost_table = 'cloud_vhost_libvirt'
cloud_config_table = 'cloud_config'
Example #13
0
    #     temp += 1
    #
    # print len(Imaps)
    # f = open('temp.json', 'r')
    # a = json.load(f)
    # t = maps().__dict__.keys()
    # for i in t:
    #     try:
    #         print a[9][i]
    #     except KeyError:
    #         print i,'error'

    # a = mapSpider()
    # t = a.getTestData()
    # with open('temp.json') as f:
    #     res = json.load(f)
    #
    # print id(res)
    # print type(res)
    logger = log.getlogger()
    logger.info('我就是测试一下没啥')
    logger.info('测试baseMapper的insert')
    a = maps(814994, '2018-07-16T01:39:16+00:00', '2018-08-20T23:53:09+00:00',
             '2018-08-29T00:21:41+00:00', '僕のヒーローアカデミア')
    aa = maps(622109, '2017-06-04T04:19:43+00:00', '2018-08-27T13:57:38+00:00',
              '2018-08-28T15:00:10+00:00', 'ギルティクラウン')
    t = maps()
    mapper = baseMapper(t)
    print isinstance([a, aa], list)
    mapper.insertModels([a, aa])
Example #14
0
import urllib.request
import time
import json
import os
from pprint import pformat, pprint
import gevent.subprocess

import requests
from bs4 import BeautifulSoup as bs

import log

log = log.getlogger('watcher')

MYPLACE = ['guangzhou', 'shenzhen']


def getlabal(s):
    tmp = list(s)
    tmp.insert(1, '/')
    return "".join(tmp)


def check_who():
    wholist = os.popen("who | awk '{print $5}'").readlines()
    result = []
    if not wholist:
        return []
    for who in wholist:
        # who = '(14.16.225.186)\n'
        try:
Example #15
0
from lxml import etree
from StringIO import StringIO
from collections import deque
import log
logger = log.getlogger("rendertree", log.DEBUG)


class RenderTree(object):
    def __init__(self, roots=[]):
        self.roots = roots


class RenderTreeNode(object):
    def __init__(self, info="", xml=None, children=[], parent=None):
        self.info = info
        self.xml = xml
        self.children = children
        self.parent = parent

    def __str__(self):
        return "info={}, xml={}".format(self.info, str(self.xml))


def parse_xmltree(xml):
    try:
        t = etree.parse(StringIO(xml))

    except Exception as e:
        logger.exception(e)
        logger.info("XML Parser failed parsing. Reparsing with recover=True")
        parser = etree.XMLParser(recover=True)
Example #16
0
# coding: utf-8
import os
import sys
import web
import time, datetime
import logging
import threading

from filesystem_monitor_settings import *
from Registry import Registry
from log import getlogger
from hash import *
import Queue

logger = getlogger("monitor")

web.config.debug = False

# 数据库连接
db = web.database(dbn=db_engine,
                  host=db_server,
                  db=db_database,
                  user=db_username,
                  pw=db_password)
# 全局变量存储虚拟机元数据
ret = db.select('cloud_vhost', what='uuid,name,allocation,windows,profile')
profiles = {}
for line in ret:
    profiles[line['uuid']] = (line['windows'], line['name'], line['profile'],
                              line['allocation'])
Example #17
0
def compile(env, tagname, jsonstr):
    global workpath
    global username
    global pwd
    global push
    global ret
    global sourcedir
    logger = log.getlogger()
    logger.info(os.getcwd())
    logger.info(jsonstr)
    if env == '.net':
        try:
            exportfile(jsonstr, 'trunk')
            getprojfilepath()
            print ret
            logger.info(ret)
            os.chdir(ret)
            l = sourcedir.split('/')
            projname = l[len(l) - 2]
            if push == 0:
                cmd = 'msbuild /p:Configuration=Release;VisualStudioVersion=12.0 /p:WebProjectOutputDir=%s\\192.168.10.62\\upload\\%s /p:OutputPath=%s\\192.168.10.62\\upload\\%s\\bin' % (
                    '\\', projname, '\\', projname)
            if push == 1:
                cmd = 'msbuild /p:Configuration=Release;VisualStudioVersion=12.0'
            print cmd
            res = os.system(cmd)
            if push == 0:
                cmd = 'msbuild /p:Configuration=Release;VisualStudioVersion=12.0 /p:WebProjectOutputDir=c:\\Release\\%s /p:OutputPath=c:\\Release\\%s\\bin' % (
                    projname, projname)
                os.system(cmd)
                cmd = 'msbuild /p:Configuration=Release;VisualStudioVersion=12.0'
                os.system(cmd)
            if res == 0:
                r = svn.remote.RemoteClient(sourcedir, username, pwd)
                files = svndiff.showdiff(sourcedir + '/tags/original',
                                         workpath)
                winupload.winup(0, '\\192.168.10.62\\upload\\backup', files,
                                projname, tagname)
                winupload.winup(1, 'c:\\zz', files, projname, tagname)
                sshupload.uploadfile(projname, files, '172.18.7.88',
                                     '/home/swift/', 'swift', '1234qwer')
                path = ['trunk', 'tags/' + tagname]
                print r.run_command('copy', [
                    sourcedir + 'trunk',
                    sourcedir + 'tags/' + tagname + '_compiled', '-m', 'copy'
                ])
                dbclient = mongodbaction()
                dbclient.insertlog(projname, username, 'compile', tagname)
                if push == 0:
                    Sendmail.sendtogroup(
                        'test', 'version ' + tagname, 'version ' + tagname +
                        ' is compiled,please test..' + 'path : ' + sourcedir +
                        "/tags/" + tagname.encode() + '_compiled')
                Sendmail.sendtogroup(
                    'develop', 'version ' + tagname, 'version ' + tagname +
                    ' is compiled successfully..' + 'path : ' + sourcedir +
                    "/tags/" + tagname.encode() + '_compiled')
                return 'success'
            else:
                Sendmail.sendtogroup(
                    'develop', 'version ' + tagname,
                    'version ' + tagname + ' is compiled failed..')
                return 'failed'
        except Exception, e:
            print e
            return e
def listener(event):
    if event.exception:
        log.getlogger().info("【{}任务退出】{}".format(event.job_id,
                                                 event.exception.message))
    else:
        log.getlogger().info("【爬取任务正常运行】")
# coding: utf-8
import threading
import Queue
import os
import sys
import time
import logging
import xml
import argparse

from cloud_monitor_settings import *
from monitor import ThreadPoolMonitor
from log import getlogger

logger = getlogger("CloudMonitor")

try:
    import web as _web
except (ImportError, ImportWarning) as e:
    logger.debug("Can not find python-webpy, \
           in ubuntu just run \"sudo apt-get install python-webpy\".")
    raise e

try:
    import libvirt as _libvirt
except (ImportError, ImportWarning) as e:
    logger.debug("Can not find python-libvirt, \
            in ubuntu just run \"sudo apt-get install python-libvirt\".")
    raise e

_web.config.debug = False
Example #20
0
 def __init__(self):
     self._listeners = defaultdict(set)
     self.logger = log.getlogger("helper.signals.Processor")
Example #21
0
def get_frames(params):
    """ 得到区域内的图幅
    :param params:
    :return:
    """
    frames = OrderedDict()
    step = params["step"]  # 设置步长

    # get four boundary
    extent_gcj02 = params["extent_gcj02"]
    s_boundary, w_boundary, n_boundary, e_boundary = extent_gcj02
    # init processing
    s_boundary = int(s_boundary)
    w_boundary = int(w_boundary)
    n_boundary = int(n_boundary + step / 2)
    e_boundary = int(e_boundary + step / 2)
    # count the frames
    if abs(n_boundary - s_boundary) <= step and abs(e_boundary -
                                                    w_boundary) <= step:
        # 小图幅
        center_point = params["points"]["center_point"]
        imgs = do_get(*center_point)
        if imgs != None:
            frames["0,0"] = {
                "req_pnt": center_point,
                "extent_gcj02": imgs[0][2]
            }
        else:
            # 请求不正常
            log.getlogger().error("【get_frames】按用户圈定extent请求,返回的内容不正常!")
            pass
    else:
        # 大图幅
        row = 0
        for x in range(s_boundary, n_boundary, step):
            col = 0
            for y in range(w_boundary, e_boundary, step):
                frame_num = "{},{}".format(row, col)
                imgs = do_get(x, y)
                if imgs != None:
                    # 图幅正常
                    extent_gcj02 = imgs[0][2]
                else:
                    # 图幅出错
                    extent_gcj02 = None
                    continue
                frames[frame_num] = {
                    "req_pnt": (x, y),
                    "extent_gcj02": extent_gcj02
                }

                col += 1
            row += 1
        log.save_json("frames-步长{}°".format(step), frames)
        # 根据extent去重,去掉None
        d = frames.copy()
        prior_extent = None
        for key in d:
            value = d[key]
            if prior_extent is None:
                prior_extent = value["extent_gcj02"]
                continue
            else:
                now_extent = value["extent_gcj02"]
                if now_extent is None:
                    frames.pop(key)
                if prior_extent == now_extent:
                    frames.pop(key)
                else:
                    prior_extent = now_extent

    # 计算frames长度
    frame_len = 0
    for key, value in frames.items():
        if value["extent_gcj02"] is not None:
            frame_len += 1
        else:
            continue

    log.save_json("frams", frames)
    return frame_len, frames
def do_mosaic(timeline_time):
    """ 找到timeline_time前面的一个图幅们
    :param timeline_time: 时间线上的时间
    :return: dict
        time 实际时间(文件名)
        file_path 文件路径
        subimgs 子图像
    """
    global params
    global registration_data
    frame_selected = params["frames_selected"]
    mosaic_dir = params["mosaic_dir"]  #融合的文件夹

    timeline_time_stamp = time_to_timestamp(timeline_time)  #时间线上的点:时间戳格式
    # log.getlogger().info("[融合] 时间线上的时间戳{}".format(timeline_time) )

    subimgs = {}  #子图像
    d = registration_data.copy()  #防止异常报错
    for frame_num, frame_value in d.items():
        log.getlogger().info("[融合] - 找图幅{}的图片".format(frame_num))
        # 在每个图幅中找一个timeline_time_stamp最近的图片
        prior_time = None
        for time, time_value in frame_value.items():
            timestamp = time_to_timestamp(time)  #转成时间戳
            if timestamp > timeline_time_stamp:
                # 找到了,即为prior_time
                if prior_time is None:
                    # 出错了,没有找到timeline_time前面的数据
                    log.getlogger().error("[融合] -- 没有找到{}前面的数据".format(
                        frame_num, timeline_time))
                    return None
                # log.getlogger().info("[融合] -- 找到了{}".format(prior_time) )
                subimgs[prior_time] = frame_value[prior_time]
                subimgs[prior_time]["frame_num"] = frame_num
                subimgs[prior_time]["time"] = prior_time
                continue
            else:
                prior_time = time

    # log.getlogger().info("[融合] - 找到的图片{}".format(subimgs) )
    if len(subimgs) < len(frame_selected) / 2:
        # 出错了,没有找到这么多幅图片
        log.getlogger().info("[融合] - 所找图片不够({}\{})".format(
            len(subimgs), len(frame_selected)))
        log.getlogger().info("[融合条件] 达到选所图幅的一半:{}".format(
            len(frame_selected) / 2))
        return None

    if len(subimgs) == 1:  #只有一幅图
        for timeline_time, time_value in subimgs.items():
            last_time = time_value["time"]
            reg_file_path = time_value["file_path"]  #配准数据位置
            mosaic_file_path = os.path.join(mosaic_dir,
                                            "{}.tif".format(last_time))
            shutil.copyfile(reg_file_path, mosaic_file_path)
            return {"time": last_time, "file_path": mosaic_file_path}
    else:  #多幅图
        # 将subimgs排序
        times = []
        for time in subimgs.keys():
            times.append(time_to_timestamp(time))
        times.sort()  #排序
        # 存放到subimgs
        new_subimgs = OrderedDict()
        for timestamp in times:
            time = timestamp_to_time(timestamp)
            new_subimgs[time] = subimgs[time]

        # 对new_subimgs(时间顺序)进行融合
        last_time = None
        reg_file_paths = []  #配准图片
        for timeline_time, time_value in new_subimgs.items():
            last_time = time_value["time"]
            reg_file_paths.append(time_value["file_path"])
        mosaic_file_path = os.path.join(mosaic_dir, "{}.tif".format(last_time))
        # 对reg_file_paths按顺序进行拼接,并存储到mosaic_file_path
        main_by_params({"-o": mosaic_file_path, "input_files": reg_file_paths})
        return {
            "time": last_time,
            "file_path": mosaic_file_path,
            "subims_len": len(new_subimgs),
            "subimgs": new_subimgs
        }
Example #23
0
# Create your views here.
from django.shortcuts import render_to_response, get_object_or_404, get_list_or_404
from django.template import RequestContext
from models import *
from django.http import HttpResponse, HttpResponseRedirect
from datetime import datetime

from django.views.generic import ListView, DetailView
from forms import *
from django.core.urlresolvers import reverse

from django.contrib import messages
from log import getlogger
logger=getlogger()
# logger.debug("---------------")
from django.utils import simplejson
from fileupload.models import UserFile
from api import CardResource, SmallCardResource, GuideResource, MediaElementResource
from django.contrib.auth.decorators import login_required


# Viewing Guides
# -------------------------

def Landing (request): 
	if request.user.is_authenticated():		  
		guide_list = Guide.objects.all()
		your_guides= request.user.guide_set.all()
		return render_to_response("site/home.html", locals(), context_instance=RequestContext(request))   	
	return render_to_response("site/landing.html", locals(), context_instance=RequestContext(request))
	
def stop_work():
    sched.shutdown()
    log.getlogger().info("【手动关闭】程序已关闭")