def got_command(self, dispatcher, command, headers): if command == "summary": summary = {'ip' : RelayConfig.relay_ip, 'version' : __version__, 'status' : self.status, 'uptime' : int(time() - self.start_time), 'session_count' : len(self.session_manager.sessions), 'stream_count' : self.session_manager.stream_count, 'bps_relayed' : self.session_manager.bps_relayed} return cjson.encode(summary) elif command == "sessions": return cjson.encode(self.session_manager.statistics) elif command == "update": if self.graceful_shutdown or self.shutting_down: if not self.session_manager.has_session(**headers): log.debug("cannot add new session: media-relay is shutting down") return 'halting' try: local_media = self.session_manager.update_session(dispatcher, **headers) except RelayPortsExhaustedError: log.error("Could not reserve relay ports for session, all allocated ports are being used") return "error" if local_media: return " ".join([RelayConfig.advertised_ip or local_media[0][0]] + [str(media[1]) for media in local_media]) else: # remove session = self.session_manager.remove_session(**headers) if session is None: return "error" else: return cjson.encode(session.statistics)
def connectionMade(self): peer = self.transport.getPeer() log.debug("Connected to dispatcher at %s:%d" % (peer.host, peer.port)) if RelayConfig.passport is not None: peer_cert = self.transport.getPeerCertificate() if not RelayConfig.passport.accept(peer_cert): self.transport.loseConnection(CertificateSecurityError('peer certificate not accepted')) self._connection_watcher = RecurrentCall(RelayConfig.keepalive_interval, self._send_keepalive)
def _do_cleanup(self, ip): log.debug("Doing cleanup for old relay %s" % ip) del self.cleanup_timers[ip] for call_id in [ call_id for call_id, session in self.sessions.items() if session.relay_ip == ip ]: del self.sessions[call_id]
def update_statistics(self, stats): log.debug("Got statistics: %s" % stats) if stats["start_time"] is not None: for accounting in self.accounting: try: accounting.do_accounting(stats) except Exception, e: log.error("An unhandled error occured while doing accounting: %s" % e) log.err()
def get_ports(self): if len(self.bad_ports) > len(self.ports): log.debug('Excessive amount of bad ports, doing cleanup') self.ports.extend(self.bad_ports) self.bad_ports = deque() try: return self.ports.popleft() except IndexError: raise RelayPortsExhaustedError()
def send_command(self, command, headers): log.debug('Issuing "%s" command to relay at %s' % (command, self.ip)) seq = str(self.sequence_number) self.sequence_number += 1 defer = Deferred() timer = reactor.callLater(DispatcherConfig.relay_timeout, self._timeout, seq, defer) self.commands[seq] = (command, defer, timer) self.transport.write("\r\n".join([" ".join([command, seq])] + headers + ["", ""])) return defer
def get_ports(self): if len(self.bad_ports) > len(self.ports): log.debug("Excessive amount of bad ports, doing cleanup") self.ports.extend(self.bad_ports) self.bad_ports = deque() try: return self.ports.popleft() except IndexError: raise RelayPortsExhaustedError()
def run(self): log.debug('Using {0.__class__.__name__}'.format(reactor)) process.signals.add_handler(signal.SIGHUP, self._handle_signal) process.signals.add_handler(signal.SIGINT, self._handle_signal) process.signals.add_handler(signal.SIGTERM, self._handle_signal) process.signals.add_handler(signal.SIGUSR1, self._handle_signal) for accounting_module in self.accounting: accounting_module.start() reactor.run(installSignalHandlers=False)
def receive(self, message): # log.debug("{0.name} received: {1!r}".format(self, message)) try: message = self.otr_session.handle_input(message, 'text/plain') except IgnoreMessage: return else: log.debug("{0.name} decoded: {1!r}".format(self, message)) self.received_message = message self.all_done.set()
def datagramReceived(self, data, address): log.debug('Got MI response: {}'.format(data)) try: response = json.loads(data) except ValueError: code, _, message = data.partition(' ') try: code = int(code) except ValueError: log.error( 'MI response from OpenSIPS cannot be parsed (neither JSON nor status reply)' ) return # we got one of the 'code message' type of replies. This means either parsing error or internal error in OpenSIPS. # if we only have one request pending, we can associate the response with it, otherwise is impossible to tell to # which request the response corresponds. The failed request will fail with timeout later. if len(self.transport.requests) == 1: _, request = self.transport.requests.popitem() request.deferred.errback( Failure(NegativeReplyError(code, message))) log.error('MI request {.method} failed with: {} {}'.format( request, code, message)) else: log.error( 'Got MI status reply from OpenSIPS that cannot be associated with a request: {!r}' .format(data)) else: try: request_id = response['id'] except KeyError: log.error('MI JSON response from OpenSIPS lacks id field') return if request_id not in self.transport.requests: log.error( 'MI JSON response from OpenSIPS has unknown id: {!r}'. format(request_id)) return request = self.transport.requests.pop(request_id) if 'result' in response: request.deferred.callback(response['result']) elif 'error' in response: log.error( 'MI request {0.method} failed with: {1[error][code]} {1[error][message]}' .format(request, response)) request.deferred.errback( Failure( NegativeReplyError(response['error']['code'], response['error']['message']))) else: log.error('Invalid MI JSON response from OpenSIPS') request.deferred.errback( Failure( OpenSIPSError( 'Invalid MI JSON response from OpenSIPS')))
def update_statistics(self, stats): log.debug("Got statistics: %s" % stats) if stats["start_time"] is not None: for accounting in self.accounting: try: accounting.do_accounting(stats) except Exception, e: log.error( "An unhandled error occured while doing accounting: %s" % e) log.err()
def connectionLost(self, connector, reason, protocol): while protocol._request_queue: req = protocol._request_queue.pop() if not req.reliable: log.debug('Request is considered failed: %s', req) req.deferred.errback( failure.Failure( RatingEngineError( "Connection with the Rating Engine is down"))) else: log.debug('Saving request to be requeued later: %s', req) self.__unsent_req.appendleft(req) self.connection = None
def session_expired(self, call_id, from_tag): key = (call_id, from_tag) try: session = self.sessions[key] except KeyError: log.warn("A session expired that was no longer present on the relay") return log.debug("expired session %s" % session) session.cleanup() self.closed_byte_counter += session.relayed_bytes del self.sessions[key] self.relay.session_expired(session) self.relay.remove_session(session.dispatcher)
def remove_session(self, call_id, from_tag, to_tag=None, **kw): key = self._find_session_key(call_id, from_tag, to_tag) try: session = self.sessions[key] except KeyError: log.warn("The dispatcher tried to remove a session which is no longer present on the relay") return None log.debug("removing session %s" % session) session.cleanup() self.closed_byte_counter += session.relayed_bytes del self.sessions[key] reactor.callLater(0, self.relay.remove_session, session.dispatcher) return session
def _send_next_request(self): if self.connected: self.__request = self._request_queue.popleft() self.delimiter = b'\r\n' log.debug("Send request to rating engine %s:%s: %s" % (self.transport.getPeer().host, self.transport.getPeer().port, self.__request.decode())) self.transport.write(self.__request) #self._set_timeout() self._set_timeout(self.factory.timeout) log.debug('Sent request to rating engine: %s', self.__request.decode()) else: self.__request = None
def session_expired(self, call_id, from_tag): key = (call_id, from_tag) try: session = self.sessions[key] except KeyError: log.warn( "A session expired that was no longer present on the relay") return log.debug("expired session %s" % session) session.cleanup() self.closed_byte_counter += session.relayed_bytes del self.sessions[key] self.relay.session_expired(session) self.relay.remove_session(session.dispatcher)
def _respond(self, result, success=True): if self.__request is not None: req = self.__request self.__request = None try: if success: req.deferred.callback(result) else: req.deferred.errback( failure.Failure(RatingEngineError(result))) except defer.AlreadyCalledError: log.debug('Request %s was already responded to', req) if self._request_queue: self._send_next_request()
def remove_session(self, call_id, from_tag, to_tag=None, **kw): key = self._find_session_key(call_id, from_tag, to_tag) try: session = self.sessions[key] except KeyError: log.warn( "The dispatcher tried to remove a session which is no longer present on the relay" ) return None log.debug("removing session %s" % session) session.cleanup() self.closed_byte_counter += session.relayed_bytes del self.sessions[key] reactor.callLater(0, self.relay.remove_session, session.dispatcher) return session
def send(self, request): try: self.transport.write(json.dumps(request.__data__), OpenSIPSConfig.socket_path) except socket.error as e: log.error("cannot write request to %s: %s" % (OpenSIPSConfig.socket_path, e[1])) request.deferred.errback( Failure( Error("Cannot send MI request %s to OpenSIPS" % request.method))) else: self.transport.requests[request.id] = request request.deferred.addBoth(request.process_response) reactor.callLater(self.timeout, self._did_timeout, request) log.debug('Send MI request: {}'.format(request.__data__)) return request.deferred
def update_session(self, dispatcher, call_id, from_tag, from_uri, to_uri, cseq, user_agent, type, media=[], to_tag=None, **kw): key = self._find_session_key(call_id, from_tag, to_tag) if key: session = self.sessions[key] log.debug("updating existing session %s" % session) is_downstream = (session.from_tag != from_tag) ^ (type == "request") is_caller_cseq = (session.from_tag == from_tag) session.update_media(cseq, to_tag, user_agent, media, is_downstream, is_caller_cseq) elif type == "reply" and not media: return None else: is_downstream = type == "request" is_caller_cseq = True session = Session(self, dispatcher, call_id, from_tag, from_uri, to_tag, to_uri, cseq, user_agent, media, is_downstream, is_caller_cseq) self.sessions[(call_id, from_tag)] = session self.relay.add_session(dispatcher) log.debug("created new session %s" % session) return session.get_local_media(is_downstream, cseq, is_caller_cseq)
def substream_expired(self, substream, reason, timeout_wait): if substream is self.rtp and self.caller.uses_ice and self.callee.uses_ice: log.debug("RTP stream expired for session %s: %s" % (self.session, reason)) reason = "unselected ICE candidate" if not substream.caller.got_stun_probing and not substream.callee.got_stun_probing: log.debug("unselected ICE candidate for session %s but no STUN was received" % self.session) if substream is self.rtcp or (self.is_on_hold and reason=='conntrack timeout'): # Forget about the remote addresses, this will cause any # re-occurence of the same traffic to be forwarded again substream.caller.remote.forget() substream.caller.listener.protocol.send_packet_count = 0 substream.callee.remote.forget() substream.callee.listener.protocol.send_packet_count = 0 else: session = self.session self.cleanup(reason) self.timeout_wait = timeout_wait session.stream_expired(self)
def _process(self): try: req = Request(self.line_buf[0], self.line_buf[1:]) except InvalidRequestError as e: log.info("Invalid OpenSIPS request: %s" % str(e)) self._send_error_reply(failure.Failure(e)) else: log.debug('Received request from OpenSIPS %s', req) def _unknown_handler(req): req.deferred.errback(failure.Failure(CommandError(req))) try: getattr(self, '_CC_%s' % req.cmd, _unknown_handler)(req) except Exception as e: self._send_error_reply(failure.Failure(e)) else: req.deferred.addCallbacks(callback=self._send_reply, errback=self._send_error_reply)
def got_data(self, host, port, data): if (host, port) == tuple(self.remote): if self.remote.obsolete: # the received packet matches the previously used IP/port, # which has been made obsolete, so ignore it return else: if self.remote.in_use: # the received packet is different than the recorded IP/port, # so we will discard it return # we have learnt the remote IP/port self.remote.host, self.remote.port = host, port self.remote.in_use = True log.debug("Got traffic information for stream: %s" % self.substream.stream) is_stun, is_binding_request = _stun_test(data) self.substream.send_data(self, data, is_stun) if not self.remote.got_rtp and not is_stun: # This is the first RTP packet received self.remote.got_rtp = True if self.timer: if self.timer.active(): self.timer.cancel() self.timer = None if self.codec == "Unknown" and self.substream is self.substream.stream.rtp: try: pt = ord(data[1]) & 127 except IndexError: pass else: if pt > 95: self.codec = "Dynamic(%d)" % pt elif pt in rtp_payloads: self.codec = rtp_payloads[pt] else: self.codec = "Unknown(%d)" % pt self.substream.check_create_conntrack() if is_binding_request: self.got_stun_probing = True
def lineReceived(self, line): log.debug( 'Received response from rating engine %s:%s: %s' % (self.transport.getPeer().host, self.transport.getPeer().port, line.strip().replace("\n", " "))) if not line: return if self.__timeout_call is not None: self.__timeout_call.cancel() if self.__request is None: log.warning('Got reply for non-existing request: %s' % line) return try: self._respond( getattr(self, '_PE_%s' % self.__request.command.lower())(line)) except AttributeError: self._respond( "Unknown command in request. Cannot handle reply. Reply is: %s" % line, success=False) except Exception as e: self._respond(str(e), success=False)
def substream_expired(self, substream, reason, timeout_wait): if substream is self.rtp and self.caller.uses_ice and self.callee.uses_ice: log.debug("RTP stream expired for session %s: %s" % (self.session, reason)) reason = "unselected ICE candidate" if not substream.caller.got_stun_probing and not substream.callee.got_stun_probing: log.debug( "unselected ICE candidate for session %s but no STUN was received" % self.session) if substream is self.rtcp or (self.is_on_hold and reason == 'conntrack timeout'): # Forget about the remote addresses, this will cause any # re-occurence of the same traffic to be forwarded again substream.caller.remote.forget() substream.caller.listener.protocol.send_packet_count = 0 substream.callee.remote.forget() substream.callee.listener.protocol.send_packet_count = 0 else: session = self.session self.cleanup(reason) self.timeout_wait = timeout_wait session.stream_expired(self)
def update_dispatchers(self, dispatchers): dispatchers = set(dispatchers) for new_dispatcher in dispatchers.difference(self.dispatchers): if new_dispatcher in self.old_connectors.iterkeys(): log.debug('Restoring old dispatcher at %s:%d' % new_dispatcher) self.dispatcher_connectors[new_dispatcher] = self.old_connectors.pop(new_dispatcher) else: log.debug('Adding new dispatcher at %s:%d' % new_dispatcher) dispatcher_addr, dispatcher_port = new_dispatcher factory = DispatcherConnectingFactory(self, dispatcher_addr, dispatcher_port) self.dispatcher_connectors[new_dispatcher] = reactor.connectTLS(dispatcher_addr, dispatcher_port, factory, self.cred) for old_dispatcher in self.dispatchers.difference(dispatchers): log.debug('Removing old dispatcher at %s:%d' % old_dispatcher) self.old_connectors[old_dispatcher] = self.dispatcher_connectors.pop(old_dispatcher) self._check_disconnect(old_dispatcher) self.dispatchers = dispatchers
def update_dispatchers(self, dispatchers): dispatchers = set(dispatchers) for new_dispatcher in dispatchers.difference(self.dispatchers): if new_dispatcher in self.old_connectors.iterkeys(): log.debug('Restoring old dispatcher at %s:%d' % new_dispatcher) self.dispatcher_connectors[new_dispatcher] = self.old_connectors.pop(new_dispatcher) else: log.debug('Adding new dispatcher at %s:%d' % new_dispatcher) dispatcher_addr, dispatcher_port = new_dispatcher factory = DispatcherConnectingFactory(self, dispatcher_addr, dispatcher_port) self.dispatcher_connectors[new_dispatcher] = reactor.connectTLS(dispatcher_addr, dispatcher_port, factory, self.tls_context) for old_dispatcher in self.dispatchers.difference(dispatchers): log.debug('Removing old dispatcher at %s:%d' % old_dispatcher) self.old_connectors[old_dispatcher] = self.dispatcher_connectors.pop(old_dispatcher) self._check_disconnect(old_dispatcher) self.dispatchers = dispatchers
def _do_cleanup(self, ip): log.debug('Cleaning up after old relay at %s' % ip) del self.cleanup_timers[ip] for call_id in (call_id for call_id, session in self.sessions.items() if session.relay_ip == ip): del self.sessions[call_id]
def update_media(self, cseq, to_tag, user_agent, media_list, is_downstream, is_caller_cseq): if self.cseq is None: old_cseq = (0,0) else: old_cseq = self.cseq if is_caller_cseq: cseq = (cseq, old_cseq[1]) if self.to_tag is None and to_tag is not None: self.to_tag = to_tag else: cseq = (old_cseq[0], cseq) if is_downstream: party = "caller" if self.caller_ua is None: self.caller_ua = user_agent else: party = "callee" if self.callee_ua is None: self.callee_ua = user_agent if self.cseq is None or cseq > self.cseq: if not media_list: return log.debug("Received new SDP offer") self.streams[cseq] = new_streams = [] if self.cseq is None: old_streams = [] else: old_streams = self.streams[self.cseq] for media_type, media_ip, media_port, media_direction, media_parameters in media_list: stream = None for old_stream in old_streams: old_remote = getattr(old_stream, party).remote_sdp old_uses_ice = getattr(old_stream, party).uses_ice if old_remote is not None: old_ip, old_port = old_remote else: old_ip, old_port = None, None if old_stream.is_alive and old_stream.media_type == media_type and ((media_ip, media_port) in ((old_ip, old_port), ('0.0.0.0', old_port), (old_ip, 0))) and old_uses_ice == (media_parameters.get("ice", "no") == "yes"): stream = old_stream stream.check_hold(party, media_direction, media_ip) log.debug("Found matching existing stream: %s" % stream) break if stream is None: stream = MediaStream(self, media_type, media_ip, media_port, media_direction, media_parameters, party) log.debug("Added new stream: %s" % stream) if media_port == 0: stream.cleanup() log.debug("Stream explicitly closed: %s" % stream) new_streams.append(stream) if self.previous_cseq is not None: for stream in self.streams[self.previous_cseq]: if stream not in self.streams[self.cseq] + new_streams: stream.cleanup() self.previous_cseq = self.cseq self.cseq = cseq elif self.cseq == cseq: log.debug("Received updated SDP answer") now = time() if self.start_time is None: self.start_time = now current_streams = self.streams[cseq] for stream in current_streams: if stream.start_time is None: stream.start_time = now if to_tag is not None and not media_list: return if len(media_list) < len(current_streams): for stream in current_streams[len(media_list):]: log.debug("Stream rejected by not being included in the SDP answer: %s" % stream) stream.cleanup("rejected") if stream.start_time is None: stream.start_time = now for stream, (media_type, media_ip, media_port, media_direction, media_parameters) in zip(current_streams, media_list): if stream.media_type != media_type: raise ValueError('Media types do not match: "%s" and "%s"' % (stream.media_type, media_type)) if media_port == 0: log.debug("Stream explicitly rejected: %s" % stream) stream.cleanup("rejected") continue stream.check_hold(party, media_direction, media_ip) party_info = getattr(stream, party) party_info.uses_ice = (media_parameters.get("ice", "no") == "yes") if party_info.remote_sdp is None or party_info.remote_sdp[0] == "0.0.0.0": party_info.remote_sdp = (media_ip, media_port) log.debug("Got initial answer from %s for stream: %s" % (party, stream)) else: if party_info.remote_sdp[1] != media_port or (party_info.remote_sdp[0] != media_ip != '0.0.0.0'): stream.reset(party, media_ip, media_port) log.debug("Updated %s for stream: %s" % (party, stream)) else: log.debug("Unchanged stream: %s" % stream) if self.previous_cseq is not None: for stream in [stream for stream in self.streams[self.previous_cseq] if stream not in current_streams]: log.debug("Removing old stream: %s" % stream) stream.cleanup() else: log.debug("Received old CSeq %d:%d, ignoring" % cseq)
def __trace__(cls, message, *args): if cls.__tracing__ == log.level.INFO: log.info(message % args) elif cls.__tracing__ == log.level.DEBUG: log.debug(message % args)
def handle_notification(notification): log.debug("--- {0.name!s} from {0.sender!r} with data: {0.data!r}".format(notification))
def send(self, content, content_type='text/plain'): log.debug("{0.name} encoding: {1!r}".format(self, content)) self.sent_message = content content = self.otr_session.handle_output(content, content_type) log.debug("{0.name} sending: {1!r}".format(self, content)) self.send_queue.put(content)
def put(self, project_name): project = Project.query.filter_by(name=project_name).first() command = request.args['command'] command_args = request.args.get('arguments') if command_args is not None: command_args = json.loads(command_args) file = request.files['file'] # Get the value of the first (and only) result for the specified project setting svn_password = next((setting.value for setting in project.settings if setting.name == 'svn_password')) svn_default_user = next((setting.value for setting in project.settings if setting.name == 'svn_default_user')) # We get the actual username from the http headers svn_user = auth.username() # If the setting does not exist, stop here and prevent any other operation if not svn_password: return make_response( jsonify({'message': 'SVN missing password settings'}), 500) if file and self.allowed_file(file.filename): os.makedirs(project.upload_path, exist_ok=True) local_client = svn.local.LocalClient(project.repository_path) # TODO, add the merge operation to a queue. Later on, the request could stop here # and all the next steps could be done in another loop, or triggered again via # another request filename = werkzeug.secure_filename(file.filename) tmp_filepath = os.path.join(project.upload_path, filename) file.save(tmp_filepath) # TODO, once all files are uploaded, unpack and run the tasklist (copy, add, remove # files on a filesystem level and subsequently as svn commands) import zipfile extract_tmp_dir = os.path.splitext(tmp_filepath)[0] with open(tmp_filepath, 'rb') as zip_file: zip_handle = zipfile.ZipFile(zip_file) zip_handle.extractall(extract_tmp_dir) del zip_file, zip_handle del zipfile with open(os.path.join(extract_tmp_dir, '.bam_paths_remap.json'), 'r') as path_remap: path_remap = json.load(path_remap) import shutil for src_file_path, dst_file_path in path_remap.items(): assert (os.path.exists( os.path.join(extract_tmp_dir, src_file_path))) src_file_path_abs = os.path.join(extract_tmp_dir, src_file_path) dst_file_path_abs = os.path.join(project.repository_path, dst_file_path) os.makedirs(os.path.dirname(dst_file_path_abs), exist_ok=True) shutil.move(src_file_path_abs, dst_file_path_abs) # TODO, dry run commit (using commit message) # Seems not easily possible with SVN, so we might just smartly use svn status result = local_client.run_command( 'status', [local_client.info()['entry_path'], '--xml'], combine=True) # We parse the svn status xml output root = xml.etree.ElementTree.fromstring(result) # Loop throught every entry reported by the svn status command for e in root.iter('entry'): file_path = e.attrib['path'] item_status = e.find('wc-status').attrib['item'] # We add each unversioned file to SVN if item_status == 'unversioned': result = local_client.run_command('add', [ file_path, ]) with open(os.path.join(extract_tmp_dir, '.bam_paths_ops.json'), 'r') as path_ops: path_ops = json.load(path_ops) log.debug(path_ops) for file_path, operation in path_ops.items(): # TODO(fsiddi), collect all file paths and remove after if operation == 'D': file_path_abs = os.path.join(project.repository_path, file_path) assert (os.path.exists(file_path_abs)) result = local_client.run_command('rm', [ file_path_abs, ]) # Commit command result = local_client.run_command('commit', [ local_client.info()['entry_path'], '--no-auth-cache', '--message', command_args['message'], '--username', svn_user, '--password', svn_password ], combine=True) return jsonify(message=result) else: return jsonify(message='File not allowed')
def buildProtocol(self, addr): ip = addr.host log.debug("Connection from relay at %s" % ip) prot = Factory.buildProtocol(self, addr) prot.ip = ip return prot
def _do_cleanup(self, ip): log.debug("Doing cleanup for old relay %s" % ip) del self.cleanup_timers[ip] for call_id in [call_id for call_id, session in self.sessions.items() if session.relay_ip == ip]: del self.sessions[call_id]
def connectionLost(self, reason): log.debug("Connection to %s lost: %s" % (self.description, reason.value)) self.factory.connection_lost(self)
def debug(self, message, **context): log.debug(self.prefix+message, **context)
def connectionMade(self, connector): self.connection = connector.transport self.connection.protocol._request_queue.extend(self.__unsent_req) for req in self.__unsent_req: log.debug('Re-queueing request for the rating engine: %s', req) self.__unsent_req.clear()
def put(self, project_name): project = Project.query.filter_by(name=project_name).first() command = request.args['command'] command_args = request.args.get('arguments') if command_args is not None: command_args = json.loads(command_args) file = request.files['file'] # Get the value of the first (and only) result for the specified project setting svn_password = next((setting.value for setting in project.settings if setting.name == 'svn_password')) svn_default_user = next((setting.value for setting in project.settings if setting.name == 'svn_default_user')) # We get the actual username from the http headers svn_user = auth.username() # If the setting does not exist, stop here and prevent any other operation if not svn_password: return make_response(jsonify( {'message': 'SVN missing password settings'}), 500) if file and self.allowed_file(file.filename): os.makedirs(project.upload_path, exist_ok=True) local_client = svn.local.LocalClient(project.repository_path) # TODO, add the merge operation to a queue. Later on, the request could stop here # and all the next steps could be done in another loop, or triggered again via # another request filename = werkzeug.secure_filename(file.filename) tmp_filepath = os.path.join(project.upload_path, filename) file.save(tmp_filepath) # TODO, once all files are uploaded, unpack and run the tasklist (copy, add, remove # files on a filesystem level and subsequently as svn commands) import zipfile extract_tmp_dir = os.path.splitext(tmp_filepath)[0] with open(tmp_filepath, 'rb') as zip_file: zip_handle = zipfile.ZipFile(zip_file) zip_handle.extractall(extract_tmp_dir) del zip_file, zip_handle del zipfile with open(os.path.join(extract_tmp_dir, '.bam_paths_remap.json'), 'r') as path_remap: path_remap = json.load(path_remap) import shutil for src_file_path, dst_file_path in path_remap.items(): assert(os.path.exists(os.path.join(extract_tmp_dir, src_file_path))) src_file_path_abs = os.path.join(extract_tmp_dir, src_file_path) dst_file_path_abs = os.path.join(project.repository_path, dst_file_path) os.makedirs(os.path.dirname(dst_file_path_abs), exist_ok=True) shutil.move(src_file_path_abs, dst_file_path_abs) # TODO, dry run commit (using commit message) # Seems not easily possible with SVN, so we might just smartly use svn status result = local_client.run_command('status', [local_client.info()['entry_path'], '--xml'], combine=True) # We parse the svn status xml output root = xml.etree.ElementTree.fromstring(result) # Loop throught every entry reported by the svn status command for e in root.iter('entry'): file_path = e.attrib['path'] item_status = e.find('wc-status').attrib['item'] # We add each unversioned file to SVN if item_status == 'unversioned': result = local_client.run_command('add', [file_path, ]) with open(os.path.join(extract_tmp_dir, '.bam_paths_ops.json'), 'r') as path_ops: path_ops = json.load(path_ops) log.debug(path_ops) for file_path, operation in path_ops.items(): # TODO(fsiddi), collect all file paths and remove after if operation == 'D': file_path_abs = os.path.join(project.repository_path, file_path) assert(os.path.exists(file_path_abs)) result = local_client.run_command('rm', [file_path_abs, ]) # Commit command result = local_client.run_command('commit', [local_client.info()['entry_path'], '--no-auth-cache', '--message', command_args['message'], '--username', svn_user, '--password', svn_password], combine=True) return jsonify(message=result) else: return jsonify(message='File not allowed')
def inject_otr_message(self, message): log.debug("{0.name} sending: {1!r}".format(self, message)) self.send_queue.put(message)
def update_media(self, cseq, to_tag, user_agent, media_list, is_downstream, is_caller_cseq): if self.cseq is None: old_cseq = (0, 0) else: old_cseq = self.cseq if is_caller_cseq: cseq = (cseq, old_cseq[1]) if self.to_tag is None and to_tag is not None: self.to_tag = to_tag else: cseq = (old_cseq[0], cseq) if is_downstream: party = "caller" if self.caller_ua is None: self.caller_ua = user_agent else: party = "callee" if self.callee_ua is None: self.callee_ua = user_agent if self.cseq is None or cseq > self.cseq: if not media_list: return log.debug("Received new SDP offer") self.streams[cseq] = new_streams = [] if self.cseq is None: old_streams = [] else: old_streams = self.streams[self.cseq] for media_type, media_ip, media_port, media_direction, media_parameters in media_list: stream = None for old_stream in old_streams: old_remote = getattr(old_stream, party).remote_sdp if old_remote is not None: old_ip, old_port = old_remote else: old_ip, old_port = None, None if old_stream.is_alive and old_stream.media_type == media_type and ( (media_ip, media_port) in ((old_ip, old_port), ('0.0.0.0', old_port), (old_ip, 0))): stream = old_stream stream.check_hold(party, media_direction, media_ip) log.debug("Found matching existing stream: %s" % stream) break if stream is None: stream = MediaStream(self, media_type, media_ip, media_port, media_direction, media_parameters, party) log.debug("Added new stream: %s" % stream) if media_port == 0: stream.cleanup() log.debug("Stream explicitly closed: %s" % stream) new_streams.append(stream) if self.previous_cseq is not None: for stream in self.streams[self.previous_cseq]: if stream not in self.streams[self.cseq] + new_streams: stream.cleanup() self.previous_cseq = self.cseq self.cseq = cseq elif self.cseq == cseq: log.debug("Received updated SDP answer") now = time() if self.start_time is None: self.start_time = now current_streams = self.streams[cseq] for stream in current_streams: if stream.start_time is None: stream.start_time = now if to_tag is not None and not media_list: return if len(media_list) < len(current_streams): for stream in current_streams[len(media_list):]: log.debug( "Stream rejected by not being included in the SDP answer: %s" % stream) stream.cleanup("rejected") if stream.start_time is None: stream.start_time = now for stream, (media_type, media_ip, media_port, media_direction, media_parameters) in zip(current_streams, media_list): if stream.media_type != media_type: raise ValueError( 'Media types do not match: "%s" and "%s"' % (stream.media_type, media_type)) if media_port == 0: log.debug("Stream explicitly rejected: %s" % stream) stream.cleanup("rejected") continue stream.check_hold(party, media_direction, media_ip) party_info = getattr(stream, party) party_info.uses_ice = (media_parameters.get("ice", "no") == "yes") if party_info.remote_sdp is None or party_info.remote_sdp[ 0] == "0.0.0.0": party_info.remote_sdp = (media_ip, media_port) log.debug("Got initial answer from %s for stream: %s" % (party, stream)) else: if party_info.remote_sdp[1] != media_port or ( party_info.remote_sdp[0] != media_ip != '0.0.0.0'): stream.reset(party, media_ip, media_port) log.debug("Updated %s for stream: %s" % (party, stream)) else: log.debug("Unchanged stream: %s" % stream) if self.previous_cseq is not None: for stream in [ stream for stream in self.streams[self.previous_cseq] if stream not in current_streams ]: log.debug("Removing old stream: %s" % stream) stream.cleanup() else: log.debug("Received old CSeq %d:%d, ignoring" % cseq)
def _handle_event(self, event): networks = self.networks role_map = ThorEntitiesRoleMap( event.message ) # mapping between role names and lists of nodes with that role role = 'rating_server' try: network = networks[role] except KeyError: from thor import network as thor_network network = thor_network.new(ThorNodeConfig.multiply) networks[role] = network else: first_run = False ips = [] for node in role_map.get(role, []): if isinstance(node.ip, bytes): ips.append(node.ip.decode('utf-8')) else: ips.append(node.ip) nodes = [] for node in network.nodes: if isinstance(node, bytes): nodes.append(node.decode('utf-8')) else: nodes.append(node) new_nodes = set(ips) old_nodes = set(nodes) # old_nodes = set(network.nodes) added_nodes = new_nodes - old_nodes removed_nodes = old_nodes - new_nodes if added_nodes: log.debug('added nodes: %s', added_nodes) for node in added_nodes: if isinstance(node, str): network.add_node(node.encode()) address = RatingEngineAddress(node) else: network.add_node(node) address = RatingEngineAddress(node.decode()) self.rating_connections[address] = RatingEngine(address) plural = 's' if len(added_nodes) != 1 else '' # log.info('Added rating node%s: %s', plural, ', '.join(added_nodes)) added_nodes_str = [node for node in added_nodes] log.info("added %s node%s: %s" % (role, plural, ', '.join(added_nodes_str))) if removed_nodes: log.debug('removed nodes: %s', removed_nodes) for node in removed_nodes: if isinstance(node, str): network.remove_node(node.encode()) address = RatingEngineAddress(node) else: network.remove_node(node) address = RatingEngineAddress(node.decode()) self.rating_connections[address].shutdown() del self.rating_connections[address] plural = 's' if len(removed_nodes) != 1 else '' # log.info('Removed rating node%s: %s', plural, ', '.join(removed_nodes)) removed_nodes_str = [node for node in removed_nodes] log.info("removed %s node%s: %s" % (role, plural, ', '.join(removed_nodes_str)))