Ejemplo n.º 1
0
    def run(self):
        gc.collect()
        #filename of the archive to be scanned
        filepath = self.jobdata['file']
        filename = os.path.basename(filepath)  # filename only (no path info)
        metadatapath = UpqConfig().paths['metadata']

        if not os.path.exists(filepath):
            self.msg("File doesn't exist: %s" % (filepath))
            return False

        hashes = self.get_hash(filepath)
        tmpdir = self.setupdir(filepath)  #temporary directory for unitsync

        usync = self.initUnitSync(tmpdir, filename)
        archiveh = self.openArchive(usync, os.path.join("games", filename))

        res = self.ExtractMetadata(usync, archiveh, filename, filepath,
                                   metadatapath, hashes)

        err = usync.GetNextError()
        while not err == None:
            self.logger.error(err)
            err = usync.GetNextError()

        usync.CloseArchive(archiveh)
        usync.RemoveAllArchives()
        usync.UnInit()
        del usync
        #print(self.jobcfg)
        if not "keeptemp" in self.jobcfg or self.jobcfg["keeptemp"] != "yes":
            assert (tmpdir.startswith("/home/upq/upq/tmp/"))
            shutil.rmtree(tmpdir)
        return res
Ejemplo n.º 2
0
	def run(self):
		fid=int(self.jobdata['fid'])
		results=UpqDB().query("SELECT filename, path, sdp FROM file WHERE fid=%d AND status=1" % fid)
		res=results.first()
		#filename of the archive to be scanned
		filename=res['filename'] # filename only (no path info)
		absfilename=os.path.join(UpqConfig().paths['files'], res['path'], res['filename']) # absolute filename
		torrent=os.path.join(UpqConfig().paths['metadata'], res['sdp']+ ".torrent" )
		if not os.path.exists(absfilename):
			self.msg("File doesn't exist: %s" %(absfilename))
			return False

		res=self.create_torrent(absfilename, torrent)
		if res:
			UpqDB().query("UPDATE file SET torrent=1 WHERE fid=%s" %(fid))
		return res
Ejemplo n.º 3
0
    def run(self):
        """
			class Hash must be initialized with fileid!
		"""
        fid = int(self.jobdata['fid'])
        results = UpqDB().query(
            "SELECT filename, path, md5, sha1, sha256  FROM file WHERE fid=%d  "
            % int(fid))
        res = results.first()
        filename = os.path.join(UpqConfig().paths['files'], res['path'],
                                res['filename'])
        if not os.path.exists(filename):
            self.msg("File %s doesn't exist" % (filename))
            return False
        hashes = self.hash(filename)
        if res['md5'] != None and res['md5'] != hashes['md5']:
            self.logger.error("md5 missmatch")
            return False
        if res['sha1'] != None and res['sha1'] != hashes['sha1']:
            self.logger.error("sha1 missmatch")
            return False
        if res['sha256'] != None and res['sha256'] != hashes['sha256']:
            self.logger.error("sha256 missmatch")
            return False
        UpqDB().query(
            "UPDATE file set md5='%s', sha1='%s', sha256='%s' WHERE fid=%d" %
            (hashes['md5'], hashes['sha1'], hashes['sha256'], fid))
        self.msg("md5: %s sha1: %s sha256: %s" %
                 (hashes['md5'], hashes['sha1'], hashes['sha256']))
        return True
Ejemplo n.º 4
0
 def setupdir(self, filepath):
     if not os.path.exists(filepath):
         self.msg("error setting up temp dir, file doesn't exist %s" %
                  (filepath))
         raise Exception(self.msgstr)
     temppath = tempfile.mkdtemp(dir=UpqConfig().paths['tmp'])
     archivetmp = os.path.join(temppath, "games")
     os.mkdir(archivetmp)
     self.tmpfile = os.path.join(archivetmp, os.path.basename(filepath))
     self.logger.debug("symlinking %s %s" % (filepath, self.tmpfile))
     os.symlink(filepath, self.tmpfile)
     return temppath
Ejemplo n.º 5
0
 def check(self):
     results = UpqDB().query(
         "SELECT fid, filename, path FROM file WHERE fid=%d " %
         int(self.jobdata['fid']))
     res = results.first()
     filename = os.path.join(UpqConfig().paths['files'], res['path'],
                             res['filename'])
     if not os.path.exists(filename):
         self.logger.error("file %d doesn't exist: %s" %
                           (res['fid'], filename))
         return False
     self.enqueue_job()
     return True
Ejemplo n.º 6
0
 def saveImage(self, image, size):
     """ store a image, called with an Image object, returns the filename """
     m = hashlib.md5()
     m.update(image.tobytes())
     if (size[0] > 1024):  # shrink if to big
         sizey = int((1024.0 / size[0]) * size[1])
         self.logger.debug("image to big %dx%d, resizing... to %dx%d" %
                           (size[0], size[1], 1024, sizey))
         image = image.resize((1024, sizey))
     else:
         image = image.resize((size[0], size[1]))
     #use md5 as filename, so it can be reused
     filename = m.hexdigest() + ".jpg"
     absname = os.path.join(UpqConfig().paths['metadata'], filename)
     if os.path.isfile(absname) and os.path.getsize(absname) == image.size:
         self.logger.debug("Not overwriting %s" % (absname))
         return
     image.save(absname)
     os.chmod(absname, int("0644", 8))
     self.logger.debug("Wrote " + absname)
     return filename
Ejemplo n.º 7
0
	def __init__(self, jobname, jobdata):
		# if you add attributes to the UpqJob class that should be carried over
		# through a restart/reschedule, add it to notify_job.jobdata['job']
		# in notify(), if and only if it is (JSON)-serializable!
		self.jobname = jobname
		self.jobcfg  = UpqConfig().jobs[jobname] #settings from config-filea

		# subjobs handling: if a runtime job is available, use it, else the configured ones
		if jobdata.has_key('subjobs'): #runtime set subjobs are available
			jobdata['subjobs']=jobdata['subjobs']
		elif self.jobcfg.has_key('subjobs'):
			# make copy of subjobs, as we modify them later
			jobdata['subjobs']=self.jobcfg['subjobs'][:]
		else:
			jobdata['subjobs'] = [] # no subjobs defined, initialize empty
		self.jobdata = jobdata #runtime parameters, these are stored into database and restored on re-run
		self.logger  = log.getLogger("upq")
		self.thread  = "T-none-0"
		self.jobid   = -1
		self.msgstr  = ""
		self.result  = False
		self.finished= threading.Event()
		self.retries = 0
Ejemplo n.º 8
0
    def start_server(self):
        if os.path.exists(UpqConfig().paths['socket']):
            self.logger.debug("File '%s' exists - removing it.",
                              UpqConfig().paths['socket'])
            os.remove(UpqConfig().paths['socket'])
        try:
            server = upqserver.UpqServer(UpqConfig().paths['socket'],
                                         upqserver.UpqRequestHandler)
        except Exception as e:
            msg = "Couldn't create socket %s %s" % (
                UpqConfig().paths['socket'], e)
            self.logger.error(msg)
            print >> sys.stderr, msg
            sys.exit(1)
        os.chmod(UpqConfig().paths['socket'],
                 int(str(UpqConfig().paths['socket_chmod']), 8))
        self.logger.info("Server listening on '%s'.", server.server_address)

        # Start a thread with the server -- that thread will then start one
        # more thread for each request
        server_thread = threading.Thread(target=server.serve_forever)
        # Exit the server thread when the main thread terminates
        server_thread.setDaemon(True)
        server_thread.start()
        self.logger.debug("Server main thread is '%s'.",
                          server_thread.getName())

        # everything should be fine now, so let's revive unfinnished jobs
        unfinnished_business = server.revive_jobs()
        self.logger.debug("unfinnished_business='%s'", unfinnished_business)
        self.logger.info("Starting %d unfinnished jobs found in DB.",
                         len(unfinnished_business))
        for job in unfinnished_business:
            self.logger.info("Starting unfinnished job '%s' with jobid '%d'",
                             job.jobname, job.jobid)
            job.enqueue_job()

        return server
Ejemplo n.º 9
0
 def getPathByStatus(self, status):
     if status == 1:
         return UpqConfig().paths['files']
     elif status == 3:
         return UpqConfig().paths['broken']
     raise Exception("Unknown status %s" % (status))
Ejemplo n.º 10
0
import os
import ctypes
from PIL import Image
from io import StringIO
import shutil
import getopt
import base64
import tempfile
import gzip
import hashlib
import json
import gc
import traceback
import filecmp

unitsyncpath = os.path.join(UpqConfig().paths['jobs_dir'], 'unitsync')
sys.path.append(unitsyncpath)

try:
    from unitsync import unitsync
except ImportError:
    import unitsync


class Extract_metadata(UpqJob):
    """
		setup temporary directory.
		creates <tempdir>/games and symlinks archive file into that directory
	"""
    def setupdir(self, filepath):
        if not os.path.exists(filepath):
Ejemplo n.º 11
0
#
#You should have received a copy of the GNU General Public License
#along with this program.  If not, see <http://www.gnu.org/licenses/>.


# Creates a torrent file

from upqjob import UpqJob
from upqdb import UpqDB,UpqDBIntegrityError
from upqconfig import UpqConfig

import sys
import os
import shutil

metalinkpath=os.path.join(UpqConfig().paths['jobs_dir'],'metalink')
sys.path.append(metalinkpath)

import metalink

class Createtorrent(UpqJob):
	def check(self):
		if not 'fid' in self.jobdata:
			self.msg("fid not specified")
			return False

		results=UpqDB().query("SELECT * FROM file WHERE fid=%s and status=1" % (int(self.jobdata['fid'])))
		res=results.first()
		if res == None:
			self.msg("fid not found")
			return False
Ejemplo n.º 12
0
class UpqJob(object):
	def __init__(self, jobname, jobdata):
		# if you add attributes to the UpqJob class that should be carried over
		# through a restart/reschedule, add it to notify_job.jobdata['job']
		# in notify(), if and only if it is (JSON)-serializable!
		self.jobname = jobname
		self.jobcfg  = UpqConfig().jobs[jobname] #settings from config-filea

		# subjobs handling: if a runtime job is available, use it, else the configured ones
		if jobdata.has_key('subjobs'): #runtime set subjobs are available
			jobdata['subjobs']=jobdata['subjobs']
		elif self.jobcfg.has_key('subjobs'):
			# make copy of subjobs, as we modify them later
			jobdata['subjobs']=self.jobcfg['subjobs'][:]
		else:
			jobdata['subjobs'] = [] # no subjobs defined, initialize empty
		self.jobdata = jobdata #runtime parameters, these are stored into database and restored on re-run
		self.logger  = log.getLogger("upq")
		self.thread  = "T-none-0"
		self.jobid   = -1
		self.msgstr  = ""
		self.result  = False
		self.finished= threading.Event()
		self.retries = 0

	def check(self):
		"""
		Check if job is feasable and possibly queue it.
		Overwrite this method in your job class.

		Returns True + sets jobid
		"""
		# check if file is readable (or similar)
		# return True when jobdata is fine to call run(), when returning False sets self.msg
		self.enqueue_job()
		return True

	def run(self):
		"""
		Do the actual job work, save result in self.result.
		Returning boolean indicates success or failure for notification system.

		Overwrite this method in your job class.
		"""
		# Save result in self.result.
		return True

	def enqueue_job(self):
		"""
		Put this job into the active queue
		"""
		UpqQueueMngr().enqueue_job(self)

	def enqueue_newjob(self, jobname, params):
		"""
		Add a new job into queue, data a dict, for example:
			{ "mail": "*****@*****.**",[email protected]", "syslog" }
		"""
		job=UpqQueueMngr().new_job(jobname, params)
		UpqQueueMngr().enqueue_job(job)

	def __setstate__(self, dict):
		# this is used to unpickle a job
		self.__dict__.update(dict)
		self.logger = log.getLogger("upq")

	def msg(self, msg):
		self.logger.debug(msg)
		if len(self.msgstr)+len(msg)<=500:
			self.msgstr+=str(msg)
		else:
			self.logger.error("msg to long: --------%s-------" %(msg))

	def start_subjobs(self,job):
		"""
			checks if a job has a subjob and runs it
		"""
		if self.jobdata.has_key('subjobs') and len(job.jobdata['subjobs'])>0:
			jobname=job.jobdata['subjobs'].pop()
			newjob=UpqQueueMngr().new_job(jobname, job.jobdata)
			newjob.check()

	def append_job(self, job, params={}):
		"""
			append job, will be added as the first job
		"""
		self.jobdata['subjobs'].append(job)
		for name in params:
			self.jobdata[name]=params[name]

	def notify(self, succeed):
		"""
		Notify someone responsible about job result.
		"""
		params = {}
		if succeed:
			if self.jobcfg['notify_success']:
				params = UpqQueueMngr().getParams(self.jobcfg['notify_success'])
				params['msg'] = self.msgstr
				params['success'] = True
		else:
			if self.jobcfg['notify_fail']:
				params = UpqQueueMngr().getParams(self.jobcfg['notify_fail'])
				params['msg'] = self.msgstr
				params['success'] = False
		if params:
			notify_job = UpqQueueMngr().new_job("notify", params)
			if isinstance(notify_job, UpqJob):
				# data of this job carried over to Notify job
				notify_job.jobdata['job'] = {"jobname": self.jobname,
								  "jobcfg" : self.jobcfg,
								  "jobdata": self.jobdata,
								  "jobid"  : self.jobid,
								  "msgstr" : self.msgstr,
								  "result" : self.result,
								  "retries": self.retries}
				UpqQueueMngr().enqueue_job(notify_job)

	def __str__(self):
		return "Job: "+self.jobname +" id:"+ str(self.jobid)+" jobdata:"+json.dumps(self.jobdata) +" thread: "+self.thread

	def getcfg(self, name, default):
		"""
			returns a config value or default, if config isn't set
		"""
		if self.jobcfg.has_key(name):
			return self.jobcfg[name]
		else:
			return default
Ejemplo n.º 13
0
    def run(self):
        fid = int(self.jobdata['fid'])
        results = UpqDB().query(
            "SELECT filename, path, size, md5 FROM file where fid=%d AND status=1"
            % (fid))
        if results.rowcount != 1:
            self.msg("Wrong result count with fid %d" % fid)
            return False
        res = results.first()
        srcfilename = os.path.join(UpqConfig().paths['files'], res['path'],
                                   res['filename'])
        dstfilename = os.path.join(res['path'], res['filename'])
        filesize = res['size']
        md5 = res['md5']
        #uploads a fid to all mirrors
        results = UpqDB().query(
            "SELECT m.mid, ftp_url, ftp_user, ftp_pass, ftp_dir, ftp_port, ftp_passive, ftp_ssl \
						from file f  \
						left join mirror m on m.status=f.status \
						where f.fid=%d and f.status=1 and m.status=1 \
						and m.mid not in (select mid from mirror_file where fid=%d and status = 1)"
            % (fid, fid))
        uploadcount = 0
        for res in results:
            try:
                ftp = self.ftpconnect(res['ftp_url'], res['ftp_port'],
                                      res['ftp_user'], res['ftp_pass'],
                                      res['ftp_passive'], res['ftp_dir'],
                                      res['ftp_ssl'])
            except Exception as e:
                self.logger.error("Couldn't connect to the ftp server: %s", e)
                continue
            if not os.path.isfile(srcfilename):
                self.msg("File doesn't exist: " + srcfilename)
                return False
            try:
                f = open(srcfilename, "rb")

                dstdir = os.path.dirname(dstfilename)
                try:
                    self.logger.debug("cd into " + dstdir)
                    ftp.cwd(dstdir)
                except:
                    try:
                        self.logger.debug("mkdir " + dstdir)
                        ftp.mkd(dstdir)
                        self.logger.debug("cwd " + dstdir)
                        ftp.cwd(dstdir)
                    except:
                        self.logger.error(
                            "couldn't cd/mkdir %s, skipping upload " %
                            (dstdir))
                        continue
                self.logger.info("uploading %s to %s" %
                                 (os.path.basename(dstfilename),
                                  (res['ftp_url'])))
                ftp.storbinary('STOR ' + os.path.basename(dstfilename), f)

                ftp.quit()
                f.close()
                try:  #upload succeed, mark in db as uploaded
                    id = UpqDB().insert(
                        "mirror_file", {
                            "mid": res['mid'],
                            "fid": fid,
                            "path": dstfilename,
                            "status": 1
                        })
                except UpqDBIntegrityError:
                    res = UpqDB().query(
                        "SELECT mfid FROM mirror_file WHERE mid=%s AND fid=%s"
                        % (res['mid'], fid))
                    id = res.first()
                    self.logger.info("file already uploaded: mfid=%d" % (id))
                    UpqDB().query(
                        "UPDATE mirror_file SET status=1 path='%s' WHERE mfid=%s"
                        % (dstfilename, id))
                self.logger.debug("inserted into db as %d", id)
                self.enqueue_newjob("verify_remote_file", {"mfid": id})
            except ftplib.all_errors as e:
                self.logger.error("Ftp-Error (%s) %s failed %s" %
                                  ((res['ftp_url'], srcfilename, e)))
            except Exception as e:
                self.logger.error("Upload (%s) %s failed %s" %
                                  ((res['ftp_url'], srcfilename, e)))
                return False
            uploadcount += 1
        self.msg("Uploaded to %d mirrors." % (uploadcount))
        self.deleteOldFiles()
        return True
Ejemplo n.º 14
0
def main(argv=None):
    if argv is None:
        argv = sys.argv

    server = None

    # SIGINT signal handler
    def program_cleanup(sig_num, frame):
        logger = log.getLogger("upq")
        logger.info("Shutting down socket server...")
        server.shutdown()
        logger.info("Disconnecting from DB...")
        upqdb.UpqDB().cleanup()
        log.getLogger("upq").info("Good bye.")
        sys.exit(0)

    usage = "usage: %prog -c CONFIGFILE [options]"
    parser = OptionParser(usage)
    parser.add_option("-c",
                      "--config",
                      dest="configfile",
                      default="",
                      help="path to config file CONFIGFILE")
    #TODO: use this to en/disable daemonization
    #		parser.add_option("-d", "--daemonize",
    #		help="detach from terminal etc")
    parser.add_option("-l",
                      "--logfile",
                      dest="logfile",
                      default="",
                      help="path to logfile LOGFILE")
    (options, argv) = parser.parse_args()

    try:
        # read ini file
        UpqConfig(options.configfile, options.logfile)
        UpqConfig().readConfig()

        #FIXME: remove following line + how does this $%$!" work?
        del UpqConfig().daemon['pidfile']
        #	 if UpqConfig().daemon.has_key('pidfile'):
        #		 lockfile=UpqConfig().daemon['pidfile']
        #		 UpqConfig().daemon['pidfile']=pidlockfile.TimeoutPIDLockFile(lockfile, acquire_timeout=1)
        context = daemon.DaemonContext(**UpqConfig().daemon)
        # daemonize
        context.stdout = sys.stderr
        context.stderr = sys.stderr

        upq = Upq()
        with context:
            # initialize logging
            logger = log.init_logging(UpqConfig().logging)
            logger.info("Starting logging...")
            logger.debug(UpqConfig().config_log)
            # setup and test DB
            logger.info("Connecting to DB...")
            db = upqdb.UpqDB()
            db.connect(UpqConfig().db['url'], UpqConfig().db['debug'])
            db.version()
            # start server
            logger.info("Starting socket server...")
            server = upq.start_server()

        # except SIGINT and SIGTERM
        signal.signal(signal.SIGINT, program_cleanup)
        signal.signal(signal.SIGTERM, program_cleanup)

        log.getLogger("upq").info(
            "Server running until receiving SIGTERM or SIGINT / Ctrl+C.")
        signal.pause()

    except Exception:
        traceback.print_exc(file=sys.stderr)
    try:
        db.cleanup()
    except:
        pass
    sys.exit(1)