def remote_pdb_handler(signum, frame): """ Handler to drop us into a remote debugger upon receiving SIGUSR1 """ try: from remote_pdb import RemotePdb rdb = RemotePdb(host='127.0.0.1', port=0) rdb.set_trace(frame=frame) except ImportError: log.warning('remote_pdb unavailable. Please install remote_pdb to ' 'allow remote debugging.') # Restore signal handler for later signal.signal(signum, remote_pdb_handler)
def __debug(self, engine, options): if engine == 'PTVS': import ptvsd if 'secret' not in options: raise ValidationError('secret', 'secret is required for PTVS') ptvsd.enable_attach( options['secret'], address=(options['bind_address'], options['bind_port']), ) if options['wait_attach']: ptvsd.wait_for_attach() elif engine == 'PYDEV': for i in ('host', 'local_path'): if i not in options: raise ValidationError(i, f'{i} is required for PYDEV') os.environ['PATHS_FROM_ECLIPSE_TO_PYTHON'] = json.dumps([ [ options['local_path'], '/usr/local/lib/python3.7/site-packages/middlewared' ], ]) import pydevd pydevd.stoptrace() pydevd.settrace(host=options['host']) elif engine == 'REMOTE_PDB': from remote_pdb import RemotePdb RemotePdb(options['bind_address'], options['bind_port']).set_trace()
def extendedExceptHook(the_type, value, tb): lines = traceback.format_exception(the_type, value, tb) pre = "---------------------Traceback lines-----------------------\n" mid = "---------------------Local variables-----------------------\n" trace = "\n".join(lines) post = "-----------------------------------------------------------\n" if SHOWEXCEPTIONS: fOut = sys.stderr else: fOut = sys.stdout fTmp = StringIO.StringIO() fTmp.write(pre) fTmp.write(trace) fTmp.write(mid) print_callers_locals(fTmp, Locs[-1]) fTmp.write(post) if _USE_REMOTE: fTmp.write("Starting remote console... Connect with "\ "'telnet %s %s' or 'nc -C %s %s'\n" % ( CONFIG_REMOTE["host"], CONFIG_REMOTE["port"], CONFIG_REMOTE["host"], CONFIG_REMOTE["port"]) ) fOut.write(fTmp.getvalue()) if hasattr(fOut, "flush"): fOut.flush() if _USE_REMOTE: if not Remote: globals()["Remote"] = RemotePdb(CONFIG_REMOTE["host"], CONFIG_REMOTE["port"]) Remote.set_trace()
def norm(arr, thres=None, max_=None): np.seterr(all='raise') try: ar = arr - np.mean(arr.astype(np.float64)).astype( np.float32) # overflow encountered in reduce except: from remote_pdb import RemotePdb from random import randint RemotePdb('127.0.0.1', 4444 + randint(0, 1000)).set_trace() ''' try: arr -= np.mean(arr) except: __import__('pdb').set_trace() ''' if thres is not None and np.max(arr) < thres: arr = arr silence = True else: if max_ is not None: arr = arr / max_ else: arr = arr / float(np.max(arr)) silence = False return arr, silence
def serve(): emoji = choice(CHOICES) from remote_pdb import RemotePdb RemotePdb('127.0.0.1', 5555).set_trace() return render_template("index.html", emoji=emoji)
def start_remote_pdb_op(handler, logger, cmd, resp): """Starts remote debugger on specified port.""" from remote_pdb import RemotePdb port = cmd.args['port'] logger.warning("starting remote debugger on port %d", port) RemotePdb('0.0.0.0', port).set_trace() resp(chorus.OK, 'OK')
def __init__(self, lst, screen_manager): Screen.__init__(self, screen_manager) from remote_pdb import RemotePdb RemotePdb('localhost', 4444).set_trace() if not lst or len(lst) < 0: raise Exception("Please check lst") self.lst = lst self.selected = 0 self.length = len(lst)
def start_remote_debugger_if_correct_worker(*arg): with open(remote_pdb_filename) as f: wanted_worker = f.read().strip() this_worker = str(uwsgi.worker_id()) if wanted_worker != this_worker: print("This worker is {}, not {}".format(this_worker, wanted_worker)) return print("I'm worker {}! Starting remote PDB server on port {}.".format( this_worker, remote_pdb_port)) return RemotePdb('127.0.0.1', remote_pdb_port).set_trace()
#!/usr/bin/env python3 import sys import time if len(sys.argv) <= 1: HOST = '0.0.0.0' PORT = 4244 else: HOST = sys.argv[1] PORT = int(sys.argv[2]) avar = 42 print("Hello", avar) print(sys.argv) while True: from remote_pdb import RemotePdb RemotePdb(HOST, PORT).set_trace() # noqa print("Do something", avar) time.sleep(1) print("Bye", avar)
from remote_pdb import RemotePdb RemotePdb("localhost", 4444).set_trace() i = 0 while True: i += 1
def start_remote(a, b): from remote_pdb import RemotePdb RemotePdb('0.0.0.0', 4444).set_trace()
def train_one_epoch(self, sep_only=False, pitch_only=False): """ One epoch training function """ ## prepare ### self.model.train() epoch_loss = AverageMeter() epoch_acc = AverageMeter() data_src = self.data_source data_loader = data_prefetcher(data_src.loader) iterations = data_src.iterations local_rank = self.local_rank torch.backends.cudnn.benchmark = True for idx, batch in enumerate(tqdm(data_loader, total=iterations, ncols=36)): profile = False if profile == True and idx > 128: return epoch_loss.avg, epoch_acc.avg input_ = batch['feat'] input_ = input_.half() if self.half else input_.float() input_ = input_.to(self.local_rank) logits = self.model(input_) label = batch['label'].long().to(local_rank) loss_scale = 1. if type(self.criterion) == torch.nn.CrossEntropyLoss: cnt_loss = self.criterion(logits, label) # optimizer self.optimizer.zero_grad() if self.half: with amp.scale_loss(cnt_loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: cnt_loss.backward() self.optimizer.step() label_np = label.detach().cpu().numpy() logits_np = logits.detach().cpu().numpy() pred_np = np.argmax(logits_np, axis=-1) acc = torch.Tensor([np.mean(pred_np == label_np)]).to(self.local_rank) torch.distributed.barrier() cnt_loss_detach = cnt_loss.detach() torch.distributed.reduce(cnt_loss_detach, dst=0) torch.distributed.barrier() cnt_loss_detach = cnt_loss_detach / self.world_size epoch_loss.update(cnt_loss_detach.item()) torch.distributed.barrier() torch.distributed.reduce(acc, dst=0) torch.distributed.barrier() acc = acc / self.world_size epoch_acc.update(float(acc.cpu())) self.current_iteration += 1 if local_rank == 0: if open("/tmp/debug").read().startswith("nmsl"): from remote_pdb import RemotePdb RemotePdb('127.0.0.1', 4444+local_rank).set_trace() from utils.plot import plotCurveMat plotCurveMat([pred_np.reshape(-1), label_np.reshape(-1)], labels=['pred', 'label']) torch.cuda.empty_cache() return epoch_loss.avg, epoch_acc.avg
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from remote_pdb import RemotePdb import pyspark.daemon as original_daemon if __name__ == '__main__': # Note it's super important we don't output anything to STDERR/STDOUT since # The Scala Spark expects certain bytes to happen to tell it how to connect to us debug_port = 7778 fail = False try: RemotePdb('0.0.0.0', debug_port).set_trace() except: fail = True while fail and debug_port < 7799: try: RemotePdb('0.0.0.0', debug_port).set_trace() fail = False except: fail = True original_daemon.manager()
def remote_debug_signal(self, *args): from remote_pdb import RemotePdb RemotePdb('0.0.0.0', 9999).set_trace()
def _set_trace(self): if self.local_rank == 0: if open("/tmp/debug").read().startswith("nmsl"): from remote_pdb import RemotePdb RemotePdb('127.0.0.1', 4444+self.local_rank).set_trace()
def pdb(host='127.0.0.1', port=4444): RemotePdb(host, port).set_trace()
def d(): from remote_pdb import RemotePdb RemotePdb('127.0.0.1', 4444).set_trace()
def handle_pdb(sig, frame): from remote_pdb import RemotePdb print sig, frame RemotePdb('0.0.0.0', 48110).set_trace()
def _set_trace(received_signum, frame): # type: (int, FrameType) -> None RemotePdb(host=bind_host, port=bind_port).set_trace(frame)
def interactive(port): from remote_pdb import RemotePdb RemotePdb('127.0.0.1', port).set_trace()