summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rwxr-xr-xrun_tests.sh2
-rw-r--r--ryu/services/protocols/bgp/api/all.py6
-rw-r--r--ryu/services/protocols/bgp/api/base.py226
-rw-r--r--ryu/services/protocols/bgp/api/core.py88
-rw-r--r--ryu/services/protocols/bgp/api/import_map.py79
-rw-r--r--ryu/services/protocols/bgp/api/operator.py77
-rw-r--r--ryu/services/protocols/bgp/api/prefix.py95
-rw-r--r--ryu/services/protocols/bgp/api/rpc_log_handler.py36
-rw-r--r--ryu/services/protocols/bgp/api/rtconf.py169
-rw-r--r--ryu/services/protocols/bgp/application.py198
-rw-r--r--ryu/services/protocols/bgp/base.py464
-rw-r--r--ryu/services/protocols/bgp/constants.py50
-rw-r--r--ryu/services/protocols/bgp/core.py422
-rw-r--r--ryu/services/protocols/bgp/core_manager.py72
-rw-r--r--ryu/services/protocols/bgp/core_managers/__init__.py22
-rw-r--r--ryu/services/protocols/bgp/core_managers/configuration_manager.py125
-rw-r--r--ryu/services/protocols/bgp/core_managers/import_map_manager.py45
-rw-r--r--ryu/services/protocols/bgp/core_managers/peer_manager.py305
-rw-r--r--ryu/services/protocols/bgp/core_managers/table_manager.py500
-rw-r--r--ryu/services/protocols/bgp/info_base/__init__.py3
-rw-r--r--ryu/services/protocols/bgp/info_base/base.py795
-rw-r--r--ryu/services/protocols/bgp/info_base/rtc.py73
-rw-r--r--ryu/services/protocols/bgp/info_base/vpn.py109
-rw-r--r--ryu/services/protocols/bgp/info_base/vpnv4.py59
-rw-r--r--ryu/services/protocols/bgp/info_base/vpnv6.py59
-rw-r--r--ryu/services/protocols/bgp/info_base/vrf.py530
-rw-r--r--ryu/services/protocols/bgp/info_base/vrf4.py60
-rw-r--r--ryu/services/protocols/bgp/info_base/vrf6.py61
-rw-r--r--ryu/services/protocols/bgp/model.py148
-rw-r--r--ryu/services/protocols/bgp/net_ctrl.py397
-rw-r--r--ryu/services/protocols/bgp/operator/command.py269
-rw-r--r--ryu/services/protocols/bgp/operator/commands/clear.py54
-rw-r--r--ryu/services/protocols/bgp/operator/commands/responses.py34
-rw-r--r--ryu/services/protocols/bgp/operator/commands/root.py11
-rw-r--r--ryu/services/protocols/bgp/operator/commands/set.py65
-rw-r--r--ryu/services/protocols/bgp/operator/commands/show/__init__.py56
-rw-r--r--ryu/services/protocols/bgp/operator/commands/show/count.py53
-rw-r--r--ryu/services/protocols/bgp/operator/commands/show/importmap.py42
-rw-r--r--ryu/services/protocols/bgp/operator/commands/show/memory.py89
-rw-r--r--ryu/services/protocols/bgp/operator/commands/show/neighbor.py135
-rw-r--r--ryu/services/protocols/bgp/operator/commands/show/rib.py65
-rw-r--r--ryu/services/protocols/bgp/operator/commands/show/route_formatter_mixin.py43
-rw-r--r--ryu/services/protocols/bgp/operator/commands/show/vrf.py162
-rw-r--r--ryu/services/protocols/bgp/operator/internal_api.py157
-rw-r--r--ryu/services/protocols/bgp/operator/views/__init__.py1
-rw-r--r--ryu/services/protocols/bgp/operator/views/base.py302
-rw-r--r--ryu/services/protocols/bgp/operator/views/bgp.py273
-rw-r--r--ryu/services/protocols/bgp/operator/views/conf.py14
-rw-r--r--ryu/services/protocols/bgp/operator/views/fields.py69
-rw-r--r--ryu/services/protocols/bgp/operator/views/other.py34
-rw-r--r--ryu/services/protocols/bgp/peer.py1481
-rw-r--r--ryu/services/protocols/bgp/processor.py512
-rw-r--r--ryu/services/protocols/bgp/protocol.py87
-rw-r--r--ryu/services/protocols/bgp/protocols/bgp/__init__.py7
-rw-r--r--ryu/services/protocols/bgp/protocols/bgp/capabilities.py280
-rw-r--r--ryu/services/protocols/bgp/protocols/bgp/exceptions.py349
-rw-r--r--ryu/services/protocols/bgp/protocols/bgp/messages.py540
-rw-r--r--ryu/services/protocols/bgp/protocols/bgp/nlri.py841
-rw-r--r--ryu/services/protocols/bgp/protocols/bgp/pathattr.py1076
-rw-r--r--ryu/services/protocols/bgp/rtconf/base.py700
-rw-r--r--ryu/services/protocols/bgp/rtconf/common.py334
-rw-r--r--ryu/services/protocols/bgp/rtconf/neighbors.py469
-rw-r--r--ryu/services/protocols/bgp/rtconf/vrfs.py551
-rw-r--r--ryu/services/protocols/bgp/signals/__init__.py5
-rw-r--r--ryu/services/protocols/bgp/signals/base.py33
-rw-r--r--ryu/services/protocols/bgp/signals/emit.py55
-rw-r--r--ryu/services/protocols/bgp/speaker.py596
-rw-r--r--ryu/services/protocols/bgp/utils/bgp.py121
-rw-r--r--ryu/services/protocols/bgp/utils/circlist.py265
-rw-r--r--ryu/services/protocols/bgp/utils/dictconfig.py562
-rw-r--r--ryu/services/protocols/bgp/utils/evtlet.py140
-rw-r--r--ryu/services/protocols/bgp/utils/internable.py102
-rw-r--r--ryu/services/protocols/bgp/utils/logs.py35
-rw-r--r--ryu/services/protocols/bgp/utils/other.py11
-rw-r--r--ryu/services/protocols/bgp/utils/rtfilter.py219
-rw-r--r--ryu/services/protocols/bgp/utils/stats.py100
-rw-r--r--ryu/services/protocols/bgp/utils/validation.py234
77 files changed, 16907 insertions, 1 deletions
diff --git a/run_tests.sh b/run_tests.sh
index e558f1d4..3873fc9a 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -106,7 +106,7 @@ run_pylint() {
run_pep8() {
echo "Running pep8 ..."
- PEP8_EXCLUDE="vcsversion.py,*.pyc,contrib"
+ PEP8_EXCLUDE="vcsversion.py,*.pyc,contrib,dictconfig.py"
PEP8_OPTIONS="--exclude=$PEP8_EXCLUDE --repeat --show-source"
PEP8_INCLUDE="ryu setup*.py"
PEP8_LOG=pep8.log
diff --git a/ryu/services/protocols/bgp/api/all.py b/ryu/services/protocols/bgp/api/all.py
new file mode 100644
index 00000000..2cde697a
--- /dev/null
+++ b/ryu/services/protocols/bgp/api/all.py
@@ -0,0 +1,6 @@
+# flake8: noqa
+import core
+import operator
+import prefix
+import rtconf
+import import_map
diff --git a/ryu/services/protocols/bgp/api/base.py b/ryu/services/protocols/bgp/api/base.py
new file mode 100644
index 00000000..e43f0745
--- /dev/null
+++ b/ryu/services/protocols/bgp/api/base.py
@@ -0,0 +1,226 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Public API for BGPSpeaker.
+
+ This API can be used by various services like RPC, CLI, IoC, etc.
+"""
+import inspect
+import logging
+import traceback
+
+from ryu.services.protocols.bgp.base import add_bgp_error_metadata
+from ryu.services.protocols.bgp.base import API_ERROR_CODE
+from ryu.services.protocols.bgp.base import BGPSException
+from ryu.services.protocols.bgp.core_manager import CORE_MANAGER
+from ryu.services.protocols.bgp.rtconf.base import get_validator
+from ryu.services.protocols.bgp.rtconf.base import MissingRequiredConf
+from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
+
+
+LOG = logging.getLogger('bgpspeaker.api.base')
+
+# Various constants used in API calls
+ROUTE_DISTINGUISHER = 'route_dist'
+PREFIX = 'prefix'
+NEXT_HOP = 'next_hop'
+VPN_LABEL = 'label'
+API_SYM = 'name'
+ORIGIN_RD = 'origin_rd'
+
+# API call registry
+_CALL_REGISTRY = {}
+
+
+@add_bgp_error_metadata(code=API_ERROR_CODE,
+ sub_code=1,
+ def_desc='Unknown API error.')
+class ApiException(BGPSException):
+ pass
+
+
+@add_bgp_error_metadata(code=API_ERROR_CODE,
+ sub_code=2,
+ def_desc='API symbol or method is not known.')
+class MethodNotFound(ApiException):
+ pass
+
+
+@add_bgp_error_metadata(code=API_ERROR_CODE,
+ sub_code=3,
+ def_desc='Error related to BGPS core not starting.')
+class CoreNotStarted(ApiException):
+ pass
+
+
+def register(**kwargs):
+ """Decorator for registering API function.
+
+ Does not do any check or validation.
+ """
+ def decorator(func):
+ _CALL_REGISTRY[kwargs.get(API_SYM, func.func_name)] = func
+ return func
+
+ return decorator
+
+
+def register_method(name):
+ """Decorator for registering methods that provide BGPS public API.
+ """
+ def decorator(func):
+ setattr(func, '__api_method_name__', name)
+ return func
+
+ return decorator
+
+
+def register_class(cls):
+ """Decorator for the registering class whose instance methods provide BGPS
+ public API.
+ """
+ old_init = cls.__init__
+
+ def new_init(self, *args, **kwargs):
+ old_init(self, *args, **kwargs)
+ api_registered_methods = \
+ [(m_name, m) for m_name, m in
+ inspect.getmembers(cls, predicate=inspect.ismethod)
+ if hasattr(m, '__api_method_name__')]
+
+ for _, method in api_registered_methods:
+ api_name = getattr(method, '__api_method_name__')
+
+ def create_wrapper(method):
+ def api_method_wrapper(*args, **kwargs):
+ return method(self, *args, **kwargs)
+ return api_method_wrapper
+
+ register(name=api_name)(create_wrapper(method))
+
+ cls.__init__ = new_init
+ return cls
+
+
+class RegisterWithArgChecks(object):
+ """Decorator for registering API functions.
+
+ Does some argument checking and validation of required arguments.
+ """
+ def __init__(self, name, req_args=None, opt_args=None):
+ self._name = name
+ if not req_args:
+ req_args = []
+ self._req_args = req_args
+ if not opt_args:
+ opt_args = []
+ self._opt_args = opt_args
+ self._all_args = (set(self._req_args) | set(self._opt_args))
+
+ def __call__(self, func):
+ """Wraps given function and registers it as API.
+
+ Returns original function.
+ """
+ def wrapped_fun(**kwargs):
+ """Wraps a function to do validation before calling actual func.
+
+ Wraps a function to take key-value args. only. Checks if:
+ 1) all required argument of wrapped function are provided
+ 2) no extra/un-known arguments are passed
+ 3) checks if validator for required arguments is available
+ 4) validates required arguments
+ Raises exception if no validator can be found for required args.
+ """
+ # Check if we are missing arguments.
+ if not kwargs and len(self._req_args) > 0:
+ raise MissingRequiredConf(desc='Missing all required '
+ 'attributes.')
+
+ # Check if we have unknown arguments.
+ given_args = set(kwargs.keys())
+ unknown_attrs = given_args - set(self._all_args)
+ if unknown_attrs:
+ raise RuntimeConfigError(desc=('Unknown attributes %r' %
+ unknown_attrs))
+
+ # Check if required arguments are missing
+ missing_req_args = set(self._req_args) - given_args
+ if missing_req_args:
+ conf_name = ', '.join(missing_req_args)
+ raise MissingRequiredConf(conf_name=conf_name)
+
+ #
+ # Prepare to call wrapped function.
+ #
+ # Collect required arguments in the order asked and validate it.
+ req_values = []
+ for req_arg in self._req_args:
+ req_value = kwargs.get(req_arg)
+ # Validate required value.
+ validator = get_validator(req_arg)
+ if not validator:
+ raise ValueError('No validator registered for function %s'
+ ' and arg. %s' % (func, req_arg))
+ validator(req_value)
+ req_values.append(req_value)
+
+ # Collect optional arguments.
+ opt_items = {}
+ for opt_arg, opt_value in kwargs.iteritems():
+ if opt_arg in self._opt_args:
+ opt_items[opt_arg] = opt_value
+
+ # Call actual function
+ return func(*req_values, **opt_items)
+
+ # Register wrapped function
+ _CALL_REGISTRY[self._name] = wrapped_fun
+ return func
+
+
+def is_call_registered(call_name):
+ return call_name in _CALL_REGISTRY
+
+
+def get_call(call_name):
+ return _CALL_REGISTRY.get(call_name)
+
+
+def call(symbol, **kwargs):
+ """Calls/executes BGPS public API identified by given symbol and passes
+ given kwargs as param.
+ """
+ LOG.info("API method %s called with args: %s", symbol, str(kwargs))
+
+ # TODO(PH, JK) improve the way api function modules are loaded
+ import all # noqa
+ if not is_call_registered(symbol):
+ message = 'Did not find any method registered by symbol %s' % symbol
+ raise MethodNotFound(message)
+
+ if not symbol.startswith('core') and not CORE_MANAGER.started:
+ raise CoreNotStarted(desc='CoreManager is not active.')
+
+ call = get_call(symbol)
+ try:
+ return call(**kwargs)
+ except BGPSException as r:
+ LOG.error(traceback.format_exc())
+ raise r
+ except Exception as e:
+ LOG.error(traceback.format_exc())
+ raise ApiException(desc=str(e))
diff --git a/ryu/services/protocols/bgp/api/core.py b/ryu/services/protocols/bgp/api/core.py
new file mode 100644
index 00000000..0580f615
--- /dev/null
+++ b/ryu/services/protocols/bgp/api/core.py
@@ -0,0 +1,88 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Defines APIs related to Core/CoreManager.
+"""
+import eventlet
+
+from ryu.services.protocols.bgp.api.base import register
+from ryu.services.protocols.bgp.core_manager import CORE_MANAGER
+from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
+from ryu.services.protocols.bgp.rtconf.common import CommonConf
+
+
+NEIGHBOR_RESET_WAIT_TIME = 3
+
+
+@register(name='core.start')
+def start(**kwargs):
+ """Starts new context using provided configuration.
+
+ Raises RuntimeConfigError if a context is already active.
+ """
+ if CORE_MANAGER.started:
+ raise RuntimeConfigError('Current context has to be stopped to start '
+ 'a new context.')
+
+ common_config = CommonConf(**kwargs)
+ eventlet.spawn(CORE_MANAGER.start, *[], **{'common_conf': common_config})
+ eventlet.sleep(2)
+ return True
+
+
+@register(name='core.stop')
+def stop(**kwargs):
+ """Stops current context is one is active.
+
+ Raises RuntimeConfigError if runtime is not active or initialized yet.
+ """
+ if not CORE_MANAGER.started:
+ raise RuntimeConfigError('No runtime is active. Call start to create '
+ 'a runtime')
+ CORE_MANAGER.stop()
+ return True
+
+
+@register(name='core.reset_neighbor')
+def reset_neighor(ip_address):
+ neighs_conf = CORE_MANAGER.neighbors_conf
+ neigh_conf = neighs_conf.get_neighbor_conf(ip_address)
+ # Check if we have neighbor with given IP.
+ if not neigh_conf:
+ raise RuntimeConfigError('No neighbor configuration found for given'
+ ' IP: %s' % ip_address)
+ # If neighbor is enabled, we disable it.
+ if neigh_conf.enabled:
+ # Disable neighbor to close existing session.
+ neigh_conf.enabled = False
+ # Yield here so that we give chance for neighbor to be disabled.
+ eventlet.sleep(NEIGHBOR_RESET_WAIT_TIME)
+ # Enable neighbor, so that we have a new session with it.
+ neigh_conf.enabled = True
+ else:
+ raise RuntimeConfigError('Neighbor %s is not enabled, hence cannot'
+ ' reset.' % ip_address)
+ return True
+
+
+#==============================================================================
+# Common configuration related APIs
+#==============================================================================
+
+@register(name='comm_conf.get')
+def get_common_conf():
+ comm_conf = CORE_MANAGER.common_conf
+ return comm_conf.settings
diff --git a/ryu/services/protocols/bgp/api/import_map.py b/ryu/services/protocols/bgp/api/import_map.py
new file mode 100644
index 00000000..2a23e8ee
--- /dev/null
+++ b/ryu/services/protocols/bgp/api/import_map.py
@@ -0,0 +1,79 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Import-map configuration.
+"""
+import logging
+
+from ryu.services.protocols.bgp.api.base import register
+from ryu.services.protocols.bgp.core_manager import CORE_MANAGER
+from ryu.services.protocols.bgp.core_managers.import_map_manager\
+ import ImportMapAlreadyExistsError
+from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
+
+LOG = logging.getLogger('bgpspeaker.api.import_map')
+
+
+@register(name='importmap.create')
+def create_importmap(type, action, name, value, route_family=None):
+ if action != 'drop':
+ raise RuntimeConfigError(
+ 'Unknown action. For now we only support "drop" action.'
+ )
+
+ if type not in ('prefix_match', 'rt_match'):
+ raise RuntimeConfigError(
+ 'Unknown type. We support only "prefix_match" and "rt_match".'
+ )
+
+ if type == 'prefix_match':
+ return _create_prefix_match_importmap(name, value, route_family)
+ elif type == 'rt_match':
+ return _create_rt_match_importmap(name, value)
+
+
+def _create_prefix_match_importmap(name, value, route_family):
+ core_service = CORE_MANAGER.get_core_service()
+ importmap_manager = core_service.importmap_manager
+ try:
+ if route_family == 'ipv4':
+ importmap_manager.create_vpnv4_nlri_import_map(name, value)
+ elif route_family == 'ipv6':
+ importmap_manager.create_vpnv6_nlri_import_map(name, value)
+ else:
+ raise RuntimeConfigError(
+ 'Unknown address family %s. it should be ipv4 or ipv6'
+ % route_family
+ )
+ except ImportMapAlreadyExistsError:
+ raise RuntimeConfigError(
+ 'Map with this name already exists'
+ )
+
+ return True
+
+
+def _create_rt_match_importmap(name, value):
+ core_service = CORE_MANAGER.get_core_service()
+ importmap_manager = core_service.importmap_manager
+ try:
+ importmap_manager.create_rt_import_map(name, value)
+ except ImportMapAlreadyExistsError:
+ raise RuntimeConfigError(
+ 'Map with this name already exists'
+ )
+
+ return True
diff --git a/ryu/services/protocols/bgp/api/operator.py b/ryu/services/protocols/bgp/api/operator.py
new file mode 100644
index 00000000..acaa5c79
--- /dev/null
+++ b/ryu/services/protocols/bgp/api/operator.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Api for operator. Mainly commands to build CLI and
+ operator interface around them.
+"""
+import logging
+
+from ryu.services.protocols.bgp.api.base import ApiException
+from ryu.services.protocols.bgp.api.base import register_class
+from ryu.services.protocols.bgp.api.base import register_method
+from ryu.services.protocols.bgp.api.rpc_log_handler import RpcLogHandler
+from ryu.services.protocols.bgp.operator.command import Command
+from ryu.services.protocols.bgp.operator.command import STATUS_ERROR
+from ryu.services.protocols.bgp.operator.commands.clear import ClearCmd
+from ryu.services.protocols.bgp.operator.commands.set import SetCmd
+from ryu.services.protocols.bgp.operator.commands.show import ShowCmd
+from ryu.services.protocols.bgp.operator.internal_api import InternalApi
+
+LOG = logging.getLogger('bgpspeaker.api.rtconf')
+
+
+class RootCmd(Command):
+ subcommands = {
+ 'show': ShowCmd,
+ 'set': SetCmd,
+ 'clear': ClearCmd}
+
+
+@register_class
+class OperatorApi(object):
+ default_log_format = '%(asctime)s %(levelname)s %(message)s'
+
+ def __init__(self):
+ self._init_log_handler()
+ self.internal_api = InternalApi(self.log_handler)
+
+ def _init_log_handler(self):
+ self.log_handler = RpcLogHandler()
+ self.log_handler.setLevel(logging.ERROR)
+ self.log_handler.formatter = logging.Formatter(self.default_log_format)
+
+ @register_method(name="operator.show")
+ def show(self, **kwargs):
+ return self._run('show', kw=kwargs)
+
+ @register_method(name="operator.set")
+ def set(self, **kwargs):
+ return self._run('set', kw=kwargs)
+
+ @register_method(name="operator.clear")
+ def clear(self, **kwargs):
+ return self._run('clear', kw=kwargs)
+
+ def _run(self, cmd, kw={}):
+ params = kw.get('params', [])
+ fmt = kw.get('format', 'json')
+ root = RootCmd(api=self.internal_api, resp_formatter_name=fmt)
+ ret, _ = root([cmd] + params)
+ if ret.status == STATUS_ERROR:
+ raise ApiException(str(ret.value))
+ return ret.value
+
+_OPERATOR_API = OperatorApi()
diff --git a/ryu/services/protocols/bgp/api/prefix.py b/ryu/services/protocols/bgp/api/prefix.py
new file mode 100644
index 00000000..3d1047e3
--- /dev/null
+++ b/ryu/services/protocols/bgp/api/prefix.py
@@ -0,0 +1,95 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Prefix related APIs.
+"""
+import logging
+
+from ryu.services.protocols.bgp.api.base import NEXT_HOP
+from ryu.services.protocols.bgp.api.base import PREFIX
+from ryu.services.protocols.bgp.api.base import RegisterWithArgChecks
+from ryu.services.protocols.bgp.api.base import ROUTE_DISTINGUISHER
+from ryu.services.protocols.bgp.api.base import VPN_LABEL
+from ryu.services.protocols.bgp.base import add_bgp_error_metadata
+from ryu.services.protocols.bgp.base import PREFIX_ERROR_CODE
+from ryu.services.protocols.bgp.base import validate
+from ryu.services.protocols.bgp.core import BgpCoreError
+from ryu.services.protocols.bgp.core_manager import CORE_MANAGER
+from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
+from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF
+from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_IPV4
+from ryu.services.protocols.bgp.utils import validation
+
+
+LOG = logging.getLogger('bgpspeaker.api.prefix')
+
+
+@add_bgp_error_metadata(code=PREFIX_ERROR_CODE,
+ sub_code=1,
+ def_desc='Unknown error related to operation on '
+ 'prefixes')
+class PrefixError(RuntimeConfigError):
+ pass
+
+
+@validate(name=PREFIX)
+def is_valid_prefix(ipv4_prefix):
+ return validation.is_valid_ipv4_prefix(ipv4_prefix)
+
+
+@validate(name=NEXT_HOP)
+def is_valid_next_hop(next_hop_addr):
+ return validation.is_valid_ipv4(next_hop_addr)
+
+
+@RegisterWithArgChecks(name='prefix.add_local',
+ req_args=[ROUTE_DISTINGUISHER, PREFIX, NEXT_HOP],
+ opt_args=[VRF_RF])
+def add_local(route_dist, prefix, next_hop, route_family=VRF_RF_IPV4):
+ """Adds *prefix* from VRF identified by *route_dist* and sets the source as
+ network controller.
+ """
+ try:
+ # Create new path and insert into appropriate VRF table.
+ tm = CORE_MANAGER.get_core_service().table_manager
+ label = tm.add_to_vrf(route_dist, prefix, next_hop, route_family)
+ # Currently we only allocate one label per local_prefix,
+ # so we share first label from the list.
+ if label:
+ label = label[0]
+
+ # Send success response with new label.
+ return [{ROUTE_DISTINGUISHER: route_dist, PREFIX: prefix,
+ VRF_RF: route_family, VPN_LABEL: label}]
+ except BgpCoreError as e:
+ raise PrefixError(desc=e)
+
+
+@RegisterWithArgChecks(name='prefix.delete_local',
+ req_args=[ROUTE_DISTINGUISHER, PREFIX],
+ opt_args=[VRF_RF])
+def delete_local(route_dist, prefix, route_family=VRF_RF_IPV4):
+ """Deletes/withdraws *prefix* from VRF identified by *route_dist* and
+ source as network controller.
+ """
+ try:
+ tm = CORE_MANAGER.get_core_service().table_manager
+ tm.remove_from_vrf(route_dist, prefix, route_family)
+ # Send success response to ApgwAgent.
+ return [{ROUTE_DISTINGUISHER: route_dist, PREFIX: prefix,
+ VRF_RF: route_family}]
+ except BgpCoreError as e:
+ raise PrefixError(desc=e)
diff --git a/ryu/services/protocols/bgp/api/rpc_log_handler.py b/ryu/services/protocols/bgp/api/rpc_log_handler.py
new file mode 100644
index 00000000..9b7fbba2
--- /dev/null
+++ b/ryu/services/protocols/bgp/api/rpc_log_handler.py
@@ -0,0 +1,36 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Defined log handler to be used to log to RPC connection.
+"""
+
+import logging
+
+from ryu.services.protocols.bgp.net_ctrl import NET_CONTROLLER
+from ryu.services.protocols.bgp.net_ctrl import NOTF_LOG
+
+
+class RpcLogHandler(logging.Handler):
+ """Outputs log records to `NET_CONTROLLER`."""
+ def emit(self, record):
+ msg = self.format(record)
+ NET_CONTROLLER.send_rpc_notification(
+ NOTF_LOG,
+ {
+ 'level': record.levelname,
+ 'msg': msg
+ }
+ )
diff --git a/ryu/services/protocols/bgp/api/rtconf.py b/ryu/services/protocols/bgp/api/rtconf.py
new file mode 100644
index 00000000..aca14a26
--- /dev/null
+++ b/ryu/services/protocols/bgp/api/rtconf.py
@@ -0,0 +1,169 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Runtime configuration manager.
+"""
+import logging
+
+from ryu.services.protocols.bgp.api.base import register
+from ryu.services.protocols.bgp.api.base import RegisterWithArgChecks
+from ryu.services.protocols.bgp.core_manager import CORE_MANAGER
+from ryu.services.protocols.bgp.rtconf.base import ConfWithId
+from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
+from ryu.services.protocols.bgp.rtconf import neighbors
+from ryu.services.protocols.bgp.rtconf.neighbors import NeighborConf
+from ryu.services.protocols.bgp.rtconf.vrfs import ROUTE_DISTINGUISHER
+from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF
+from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_IPV4
+from ryu.services.protocols.bgp.rtconf.vrfs import VrfConf
+
+LOG = logging.getLogger('bgpspeaker.api.rtconf')
+
+
+#==============================================================================
+# Neighbor configuration related APIs
+#==============================================================================
+
+
+def _get_neighbor_conf(neigh_ip_address):
+ """Returns neighbor configuration for given neighbor ip address.
+
+ Raises exception if no neighbor with `neigh_ip_address` exists.
+ """
+ neigh_conf = \
+ CORE_MANAGER.neighbors_conf.get_neighbor_conf(neigh_ip_address)
+ if not neigh_conf:
+ raise RuntimeConfigError(desc='No Neighbor configuration with IP'
+ ' address %s' % neigh_ip_address)
+ assert isinstance(neigh_conf, NeighborConf)
+ return neigh_conf
+
+
+@register(name='neighbor.create')
+def create_neighbor(**kwargs):
+ neigh_conf = NeighborConf(**kwargs)
+ CORE_MANAGER.neighbors_conf.add_neighbor_conf(neigh_conf)
+ return True
+
+
+@RegisterWithArgChecks(name='neighbor.update_enabled',
+ req_args=[neighbors.IP_ADDRESS, neighbors.ENABLED])
+def update_neighbor_enabled(neigh_ip_address, enabled):
+ neigh_conf = _get_neighbor_conf(neigh_ip_address)
+ neigh_conf.enabled = enabled
+ return True
+
+
+@RegisterWithArgChecks(name='neighbor.update',
+ req_args=[neighbors.IP_ADDRESS, neighbors.CHANGES])
+def update_neighbor(neigh_ip_address, changes):
+ rets = []
+ for k, v in changes.iteritems():
+ if k == neighbors.MULTI_EXIT_DISC:
+ rets.append(_update_med(neigh_ip_address, v))
+
+ if k == neighbors.ENABLED:
+ rets.append(update_neighbor_enabled(neigh_ip_address, v))
+
+ return all(rets)
+
+
+def _update_med(neigh_ip_address, value):
+ neigh_conf = _get_neighbor_conf(neigh_ip_address)
+ neigh_conf.multi_exit_disc = value
+ LOG.info('MED value for neigh: %s updated to %s' % (neigh_conf, value))
+ return True
+
+
+@RegisterWithArgChecks(name='neighbor.delete',
+ req_args=[neighbors.IP_ADDRESS])
+def delete_neighbor(neigh_ip_address):
+ neigh_conf = _get_neighbor_conf(neigh_ip_address)
+ if neigh_conf:
+ neigh_conf.enabled = False
+ CORE_MANAGER.neighbors_conf.remove_neighbor_conf(neigh_ip_address)
+ return True
+ return False
+
+
+@RegisterWithArgChecks(name='neighbor.get',
+ req_args=[neighbors.IP_ADDRESS])
+def get_neighbor_conf(neigh_ip_address):
+ """Returns a neighbor configuration for given ip address if exists."""
+ neigh_conf = _get_neighbor_conf(neigh_ip_address)
+ return neigh_conf.settings
+
+
+@register(name='neighbors.get')
+def get_neighbors_conf():
+ return CORE_MANAGER.neighbors_conf.settings
+
+
+#==============================================================================
+# VRF configuration related APIs
+#==============================================================================
+
+@register(name='vrf.create')
+def create_vrf(**kwargs):
+ vrf_conf = VrfConf(**kwargs)
+ CORE_MANAGER.vrfs_conf.add_vrf_conf(vrf_conf)
+ return True
+
+
+@register(name='vrf.update')
+def update_vrf(**kwargs):
+ route_dist = kwargs.get(ROUTE_DISTINGUISHER)
+ vrf_id = kwargs.get(ConfWithId.ID)
+ vrf_rf = kwargs.get(VRF_RF)
+ vrf_conf = CORE_MANAGER.vrfs_conf.get_vrf_conf(
+ route_dist, vrf_rf, vrf_id=vrf_id
+ )
+
+ # If we do not have a VrfConf with given id, we create one.
+ if not vrf_conf:
+ create_vrf(**kwargs)
+ else:
+ vrf_conf.update(**kwargs)
+ return True
+
+
+@RegisterWithArgChecks(name='vrf.delete', req_args=[ROUTE_DISTINGUISHER])
+def delete_vrf(route_dist):
+ vrf_conf = CORE_MANAGER.vrfs_conf.remove_vrf_conf(route_dist)
+ if vrf_conf:
+ return True
+
+ return False
+
+
+@RegisterWithArgChecks(
+ name='vrf.get',
+ req_args=[ROUTE_DISTINGUISHER],
+ opt_args=[VRF_RF])
+def get_vrf(route_dist, route_family=VRF_RF_IPV4):
+ vrf_conf = CORE_MANAGER.vrfs_conf.get_vrf_conf(
+ route_dist, vrf_rf=route_family
+ )
+ if not vrf_conf:
+ raise RuntimeConfigError(desc='No VrfConf with vpn id %s' %
+ route_dist)
+ return vrf_conf.settings
+
+
+@register(name='vrfs.get')
+def get_vrfs_conf():
+ vrfs_conf = CORE_MANAGER.vrfs_conf
+ return vrfs_conf.settings
diff --git a/ryu/services/protocols/bgp/application.py b/ryu/services/protocols/bgp/application.py
new file mode 100644
index 00000000..dbff33d8
--- /dev/null
+++ b/ryu/services/protocols/bgp/application.py
@@ -0,0 +1,198 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ Defines bases classes to create a BGP application.
+"""
+import eventlet
+import imp
+import logging
+import traceback
+
+from ryu.services.protocols.bgp.api.base import call
+from ryu.services.protocols.bgp.base import add_bgp_error_metadata
+from ryu.services.protocols.bgp.base import BGPSException
+from ryu.services.protocols.bgp.base import BIN_ERROR
+from ryu.services.protocols.bgp.core_manager import CORE_MANAGER
+from ryu.services.protocols.bgp import net_ctrl
+from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
+from ryu.services.protocols.bgp.rtconf.common import BGP_SERVER_PORT
+from ryu.services.protocols.bgp.rtconf.common import DEFAULT_BGP_SERVER_PORT
+from ryu.services.protocols.bgp.rtconf.common import \
+ DEFAULT_REFRESH_MAX_EOR_TIME
+from ryu.services.protocols.bgp.rtconf.common import \
+ DEFAULT_REFRESH_STALEPATH_TIME
+from ryu.services.protocols.bgp.rtconf.common import LABEL_RANGE
+from ryu.services.protocols.bgp.rtconf.common import LOCAL_AS
+from ryu.services.protocols.bgp.rtconf.common import REFRESH_MAX_EOR_TIME
+from ryu.services.protocols.bgp.rtconf.common import REFRESH_STALEPATH_TIME
+from ryu.services.protocols.bgp.rtconf.common import ROUTER_ID
+from ryu.services.protocols.bgp.rtconf import neighbors
+from ryu.services.protocols.bgp.utils.dictconfig import dictConfig
+from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4
+
+LOG = logging.getLogger('bgpspeaker.application')
+
+
+@add_bgp_error_metadata(code=BIN_ERROR,
+ sub_code=1,
+ def_desc='Unknown bootstrap exception.')
+class ApplicationException(BGPSException):
+ """Specific Base exception related to `BaseApplication`."""
+ pass
+
+
+class BaseApplication(object):
+ def __init__(self, bind_ip, bind_port, config_file=None):
+ self.bind_ip = BaseApplication.validate_rpc_ip(bind_ip)
+ self.bind_port = BaseApplication.validate_rpc_port(bind_port)
+ self.config_file = config_file
+
+ def start(self):
+ # Only two main green threads are required for APGW bgp-agent.
+ # One for NetworkController, another for BGPS core.
+ pool = eventlet.GreenPool()
+
+ # If configuration file was provided and loaded successfully. We start
+ # BGPS core using these settings. If no configuration file is provided
+ # or if configuration file is missing minimum required settings BGPS
+ # core is not started.
+ if self.config_file:
+ LOG.debug('Loading config. from settings file.')
+ settings = self.load_config(self.config_file)
+ # Configure log settings, if available.
+ if getattr(settings, 'LOGGING', None):
+ dictConfig(settings.LOGGING)
+
+ if getattr(settings, 'BGP', None):
+ self._start_core(settings)
+
+ # Start Network Controller to server RPC peers.
+ pool.spawn(net_ctrl.NET_CONTROLLER.start, *[],
+ **{net_ctrl.NC_RPC_BIND_IP: self.bind_ip,
+ net_ctrl.NC_RPC_BIND_PORT: self.bind_port})
+ LOG.debug('Started Network Controller')
+
+ # Wait for Network Controller and/or BGPS to finish
+ pool.waitall()
+
+ @classmethod
+ def validate_rpc_ip(cls, ip):
+ """Validates given ip for use as rpc host bind address.
+ """
+ if not is_valid_ipv4(ip):
+ raise ApplicationException(desc='Invalid rpc ip address.')
+ return ip
+
+ @classmethod
+ def validate_rpc_port(cls, port):
+ """Validates give port for use as rpc server port.
+ """
+ if not port:
+ raise ApplicationException(desc='Invalid rpc port number.')
+ if not isinstance(port, (int, long)) and isinstance(port, str):
+ port = int(port)
+
+ return port
+
+ def load_config(self, config_file):
+ """Validates give file as settings file for BGPSpeaker.
+
+ Load the configuration from file as bgpspeaker.setting module.
+ """
+ if not config_file or not isinstance(config_file, str):
+ raise ApplicationException('Invalid configuration file.')
+
+ # Check if file can be read
+ try:
+ return imp.load_source('bgpspeaker.settings', config_file)
+ except Exception as e:
+ raise ApplicationException(desc=str(e))
+
+ def _start_core(self, settings):
+ """Starts BGPS core using setting and given pool.
+ """
+ # Get common settings
+ routing_settings = settings.BGP.get('routing')
+ common_settings = {}
+
+ # Get required common settings.
+ try:
+ common_settings[LOCAL_AS] = routing_settings.pop(LOCAL_AS)
+ common_settings[ROUTER_ID] = routing_settings.pop(ROUTER_ID)
+ except KeyError as e:
+ raise ApplicationException(
+ desc='Required minimum configuration missing %s' %
+ e)
+
+ # Get optional common settings
+ common_settings[BGP_SERVER_PORT] = \
+ routing_settings.get(BGP_SERVER_PORT, DEFAULT_BGP_SERVER_PORT)
+ common_settings[REFRESH_STALEPATH_TIME] = \
+ routing_settings.get(REFRESH_STALEPATH_TIME,
+ DEFAULT_REFRESH_STALEPATH_TIME)
+ common_settings[REFRESH_MAX_EOR_TIME] = \
+ routing_settings.get(REFRESH_MAX_EOR_TIME,
+ DEFAULT_REFRESH_MAX_EOR_TIME)
+ label_range = routing_settings[LABEL_RANGE]
+ if label_range:
+ common_settings[LABEL_RANGE] = label_range
+
+ # Start BGPS core service
+ call('core.start', **common_settings)
+ # Give chance for core to start running
+
+ # TODO(Team): How to wait for core start to happen?!
+ eventlet.sleep(3)
+
+ LOG.debug('Core started %s' % CORE_MANAGER.started)
+ # Core manager started add configured neighbor and vrfs
+ if CORE_MANAGER.started:
+ # Add neighbors.
+ self._add_neighbors(routing_settings)
+
+ # Add Vrfs.
+ self._add_vrfs(routing_settings)
+
+ def _add_neighbors(self, routing_settings):
+ """Add bgp peers/neighbors from given settings to BGPS runtime.
+
+ All valid neighbors are loaded. Miss-configured neighbors are ignored
+ and error is logged.
+ """
+ bgp_neighbors = routing_settings.get('bgp_neighbors')
+ for ip, bgp_neighbor in bgp_neighbors.items():
+ try:
+ bgp_neighbor[neighbors.IP_ADDRESS] = ip
+ call('neighbor.create', **bgp_neighbor)
+ LOG.debug('Added neighbor %s' % neighbors.IP_ADDRESS)
+ except RuntimeConfigError as re:
+ LOG.error(re)
+ LOG.error(traceback.format_exc())
+ continue
+
+ def _add_vrfs(self, routing_settings):
+ """Add VRFs from given settings to BGPS runtime.
+
+ If any of the VRFs are miss-configured errors are logged.
+ All valid VRFs are loaded.
+ """
+ vpns_conf = routing_settings.get('vpns')
+ for vrf in vpns_conf:
+ try:
+ call('vrf.create', **vrf)
+ LOG.debug('Added vrf %s' % str(vrf))
+ except RuntimeConfigError as e:
+ LOG.error(e)
+ continue
diff --git a/ryu/services/protocols/bgp/base.py b/ryu/services/protocols/bgp/base.py
new file mode 100644
index 00000000..d68f1ad4
--- /dev/null
+++ b/ryu/services/protocols/bgp/base.py
@@ -0,0 +1,464 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ Defines some base class related to managing green threads.
+"""
+import abc
+import eventlet
+import logging
+import time
+import traceback
+import weakref
+
+from eventlet.timeout import Timeout
+from ryu.services.protocols.bgp.protocols.bgp import nlri
+from ryu.services.protocols.bgp.utils.circlist import CircularListType
+from ryu.services.protocols.bgp.utils.evtlet import LoopingCall
+
+
+# Logger instance for this module.
+LOG = logging.getLogger('bgpspeaker.base')
+
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from ordereddict import OrderedDict
+
+# Pointer to active/available OrderedDict.
+OrderedDict = OrderedDict
+
+
+# Currently supported address families.
+SUPPORTED_GLOBAL_RF = set([nlri.RF_IPv4_VPN,
+ nlri.RF_RTC_UC,
+ nlri.RF_IPv6_VPN
+ ])
+
+
+# Various error codes
+ACTIVITY_ERROR_CODE = 100
+RUNTIME_CONF_ERROR_CODE = 200
+BIN_ERROR = 300
+NET_CTRL_ERROR_CODE = 400
+API_ERROR_CODE = 500
+PREFIX_ERROR_CODE = 600
+BGP_PROCESSOR_ERROR_CODE = 700
+CORE_ERROR_CODE = 800
+
+# Registry of custom exceptions
+# Key: code:sub-code
+# Value: exception class
+_EXCEPTION_REGISTRY = {}
+
+
+class BGPSException(Exception):
+ """Base exception class for all BGPS related exceptions.
+ """
+
+ CODE = 1
+ SUB_CODE = 1
+ DEF_DESC = 'Unknown exception.'
+
+ def __init__(self, desc=None):
+ super(BGPSException, self).__init__()
+ if not desc:
+ desc = self.__class__.DEF_DESC
+ kls = self.__class__
+ self.message = '%d.%d - %s' % (kls.CODE, kls.SUB_CODE, desc)
+
+ def __repr__(self):
+ kls = self.__class__
+ return '<%s(desc=%s)>' % (kls, self.message)
+
+ def __str__(self, *args, **kwargs):
+ return self.message
+
+
+def add_bgp_error_metadata(code, sub_code, def_desc='unknown'):
+ """Decorator for all exceptions that want to set exception class meta-data.
+ """
+ # Check registry if we already have an exception with same code/sub-code
+ if _EXCEPTION_REGISTRY.get((code, sub_code)) is not None:
+ raise ValueError('BGPSException with code %d and sub-code %d '
+ 'already defined.' % (code, sub_code))
+
+ def decorator(klass):
+ """Sets class constants for exception code and sub-code.
+
+ If given class is sub-class of BGPSException we sets class constants.
+ """
+ if issubclass(klass, BGPSException):
+ _EXCEPTION_REGISTRY[(code, sub_code)] = klass
+ klass.CODE = code
+ klass.SUB_CODE = sub_code
+ klass.DEF_DESC = def_desc
+ return klass
+ return decorator
+
+
+@add_bgp_error_metadata(code=ACTIVITY_ERROR_CODE,
+ sub_code=1,
+ def_desc='Unknown activity exception.')
+class ActivityException(BGPSException):
+ """Base class for exceptions related to Activity.
+ """
+ pass
+
+
+class Activity(object):
+ """Base class for a thread of execution that provides some custom settings.
+
+ Activity is also a container of other activities or threads that it has
+ started. Inside a Activity you should always use one of the spawn method
+ to start another activity or greenthread. Activity is also holds pointers
+ to sockets that it or its child activities of threads have create.
+ """
+ __metaclass__ = abc.ABCMeta
+
+ def __init__(self, name=None):
+ self._name = name
+ if self._name is None:
+ self._name = 'UnknownActivity: ' + str(time.time())
+ self._child_thread_map = weakref.WeakValueDictionary()
+ self._child_activity_map = weakref.WeakValueDictionary()
+ self._asso_socket_map = weakref.WeakValueDictionary()
+ self._timers = weakref.WeakValueDictionary()
+ self._started = False
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def started(self):
+ return self._started
+
+ def _validate_activity(self, activity):
+ """Checks the validity of the given activity before it can be started.
+ """
+ if not self._started:
+ raise ActivityException(desc='Tried to spawn a child activity'
+ ' before Activity was started.')
+
+ if activity.started:
+ raise ActivityException(desc='Tried to start an Activity that was '
+ 'already started.')
+
+ def _spawn_activity(self, activity, *args, **kwargs):
+ """Starts *activity* in a new thread and passes *args* and *kwargs*.
+
+ Maintains pointer to this activity and stops *activity* when this
+ activity is stopped.
+ """
+ self._validate_activity(activity)
+
+ # Spawn a new greenthread for given activity
+ greenthread = eventlet.spawn(activity.start, *args, **kwargs)
+ self._child_thread_map[activity.name] = greenthread
+ self._child_activity_map[activity.name] = activity
+ return greenthread
+
+ def _spawn_activity_after(self, seconds, activity, *args, **kwargs):
+ self._validate_activity(activity)
+
+ # Schedule to spawn a new greenthread after requested delay
+ greenthread = eventlet.spawn_after(seconds, activity.start, *args,
+ **kwargs)
+ self._child_thread_map[activity.name] = greenthread
+ self._child_activity_map[activity.name] = activity
+ return greenthread
+
+ def _validate_callable(self, callable_):
+ if callable_ is None:
+ raise ActivityException(desc='Callable cannot be None')
+
+ if not hasattr(callable_, '__call__'):
+ raise ActivityException(desc='Currently only supports instances'
+ ' that have __call__ as callable which'
+ ' is missing in given arg.')
+ if not self._started:
+ raise ActivityException(desc='Tried to spawn a child thread '
+ 'before this Activity was started.')
+
+ def _spawn(self, name, callable_, *args, **kwargs):
+ self._validate_callable(callable_)
+ greenthread = eventlet.spawn(callable_, *args, **kwargs)
+ self._child_thread_map[name] = greenthread
+ return greenthread
+
+ def _spawn_after(self, name, seconds, callable_, *args, **kwargs):
+ self._validate_callable(callable_)
+ greenthread = eventlet.spawn_after(seconds, callable_, *args, **kwargs)
+ self._child_thread_map[name] = greenthread
+ return greenthread
+
+ def _create_timer(self, name, func, *arg, **kwarg):
+ timer = LoopingCall(func, *arg, **kwarg)
+ self._timers[name] = timer
+ return timer
+
+ @abc.abstractmethod
+ def _run(self, *args, **kwargs):
+ """Main activity of this class.
+
+ Can launch other activity/callables here.
+ Sub-classes should override this method.
+ """
+ raise NotImplementedError()
+
+ def start(self, *args, **kwargs):
+ """Starts the main activity of this class.
+
+ Calls *_run* and calls *stop* when *_run* is finished.
+ This method should be run in a new greenthread as it may not return
+ immediately.
+ """
+ if self.started:
+ raise ActivityException(desc='Activity already started')
+
+ self._started = True
+ try:
+ self._run(*args, **kwargs)
+ except BGPSException:
+ LOG.error(traceback.format_exc())
+ finally:
+ if self.started: # could have been stopped somewhere else
+ self.stop()
+
+ def pause(self, seconds=0):
+ """Relinquishes eventlet hub for given number of seconds.
+
+ In other words is puts to sleep to give other greeenthread a chance to
+ run.
+ """
+ eventlet.sleep(seconds)
+
+ def _stop_child_activities(self):
+ """Stop all child activities spawn by this activity.
+ """
+ # Iterating over items list instead of iteritems to avoid dictionary
+ # changed size during iteration
+ child_activities = self._child_activity_map.items()
+ for child_name, child_activity in child_activities:
+ LOG.debug('%s: Stopping child activity %s ' %
+ (self.name, child_name))
+ if child_activity.started:
+ child_activity.stop()
+
+ def _stop_child_threads(self):
+ """Stops all threads spawn by this activity.
+ """
+ child_threads = self._child_thread_map.items()
+ for thread_name, thread in child_threads:
+ LOG.debug('%s: Stopping child thread %s' %
+ (self.name, thread_name))
+ thread.kill()
+
+ def _close_asso_sockets(self):
+ """Closes all the sockets linked to this activity.
+ """
+ asso_sockets = self._asso_socket_map.items()
+ for sock_name, sock in asso_sockets:
+ LOG.debug('%s: Closing socket %s - %s' %
+ (self.name, sock_name, sock))
+ sock.close()
+
+ def _stop_timers(self):
+ timers = self._timers.items()
+ for timer_name, timer in timers:
+ LOG.debug('%s: Stopping timer %s' % (self.name, timer_name))
+ timer.stop()
+
+ def stop(self):
+ """Stops all child threads and activities and closes associated
+ sockets.
+
+ Re-initializes this activity to be able to start again.
+ Raise `ActivityException` if activity is not currently started.
+ """
+ if not self.started:
+ raise ActivityException(desc='Cannot call stop when activity is '
+ 'not started or has been stopped already.')
+
+ LOG.debug('Stopping activity %s.' % (self.name))
+ self._stop_timers()
+ self._stop_child_activities()
+ self._stop_child_threads()
+ self._close_asso_sockets()
+
+ # Setup activity for start again.
+ self._started = False
+ self._asso_socket_map = weakref.WeakValueDictionary()
+ self._child_activity_map = weakref.WeakValueDictionary()
+ self._child_thread_map = weakref.WeakValueDictionary()
+ self._timers = weakref.WeakValueDictionary()
+ LOG.debug('Stopping activity %s finished.' % self.name)
+
+ def _listen_tcp(self, loc_addr, conn_handle):
+ """Creates a TCP server socket which listens on `port` number.
+
+ For each connection `server_factory` starts a new protocol.
+ """
+ server = eventlet.listen(loc_addr)
+ server_name = self.name + '_server@' + str(loc_addr)
+ self._asso_socket_map[server_name] = server
+
+ # We now wait for connection requests from client.
+ while True:
+ sock, client_address = server.accept()
+ LOG.debug('Connect request received from client for port'
+ ' %s:%s' % client_address)
+ client_name = self.name + '_client@' + str(client_address)
+ self._asso_socket_map[client_name] = sock
+ self._spawn(client_name, conn_handle, sock)
+
+ def _connect_tcp(self, peer_addr, conn_handler, time_out=None,
+ bind_address=None):
+ """Creates a TCP connection to given peer address.
+
+ Tries to create a socket for `timeout` number of seconds. If
+ successful, uses the socket instance to start `client_factory`.
+ The socket is bound to `bind_address` if specified.
+ """
+ LOG.debug('Connect TCP called for %s:%s' % (peer_addr[0],
+ peer_addr[1]))
+ with Timeout(time_out, False):
+ sock = eventlet.connect(peer_addr, bind=bind_address)
+ if sock:
+ # Connection name for pro-active connection is made up
+ # of local end address + remote end address
+ conn_name = ('L: ' + str(sock.getsockname()) + ', R: ' +
+ str(sock.getpeername()))
+ self._asso_socket_map[conn_name] = sock
+ # If connection is established, we call connection handler
+ # in a new thread.
+ self._spawn(conn_name, conn_handler, sock)
+
+
+#
+# Sink
+#
+class Sink(object):
+ """An entity to which we send out messages (eg. BGP routes)."""
+
+ #
+ # OutgoingMsgList
+ #
+ # A circular list type in which objects are linked to each
+ # other using the 'next_sink_out_route' and 'prev_sink_out_route'
+ # attributes.
+ #
+ OutgoingMsgList = CircularListType(next_attr_name='next_sink_out_route',
+ prev_attr_name='prev_sink_out_route')
+
+ # Next available index that can identify an instance uniquely.
+ idx = 0
+
+ @staticmethod
+ def next_index():
+ """Increments the sink index and returns the value."""
+ Sink.idx = Sink.idx + 1
+ return Sink.idx
+
+ def __init__(self):
+ # A small integer that represents this sink.
+ self.index = Sink.next_index()
+
+ # Event used to signal enqueing.
+ from utils.evtlet import EventletIOFactory
+ self.outgoing_msg_event = EventletIOFactory.create_custom_event()
+
+ self.messages_queued = 0
+ # List of msgs. that are to be sent to this peer. Each item
+ # in the list is an instance of OutgoingRoute.
+ self.outgoing_msg_list = Sink.OutgoingMsgList()
+
+ def clear_outgoing_msg_list(self):
+ self.outgoing_msg_list = Sink.OutgoingMsgList()
+
+ def enque_outgoing_msg(self, msg):
+ self.outgoing_msg_list.append(msg)
+ self.outgoing_msg_event.set()
+
+ self.messages_queued += 1
+
+ def enque_first_outgoing_msg(self, msg):
+ self.outgoing_msg_list.prepend(msg)
+ self.outgoing_msg_event.set()
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ """Pops and returns the first outgoing message from the list.
+
+ If message list currently has no messages, the calling thread will
+ be put to sleep until we have at-least one message in the list that
+ can be poped and returned.
+ """
+ # We pick the first outgoing available and send it.
+ outgoing_msg = self.outgoing_msg_list.pop_first()
+ # If we do not have any outgoing msg., we wait.
+ if outgoing_msg is None:
+ self.outgoing_msg_event.clear()
+ self.outgoing_msg_event.wait()
+ outgoing_msg = self.outgoing_msg_list.pop_first()
+
+ return outgoing_msg
+
+
+#
+# Source
+#
+class Source(object):
+ """An entity that gives us BGP routes. A BGP peer, for example."""
+
+ def __init__(self, version_num):
+ # Number that is currently being used to stamp information
+ # received from this source. We will bump this number up when
+ # the information that is now expected from the source belongs
+ # to a different logical batch. This mechanism can be used to
+ # identify stale information.
+ self.version_num = version_num
+
+
+class FlexinetPeer(Source, Sink):
+ def __init__(self):
+ # Initialize source and sink
+ Source.__init__(self, 1)
+ Sink.__init__(self)
+
+
+# Registry of validators for configuration/settings.
+_VALIDATORS = {}
+
+
+def validate(**kwargs):
+ """Defines a decorator to register a validator with a name for look-up.
+
+ If name is not provided we use function name as name of the validator.
+ """
+ def decorator(func):
+ _VALIDATORS[kwargs.pop('name', func.func_name)] = func
+ return func
+
+ return decorator
+
+
+def get_validator(name):
+ """Returns a validator registered for given name.
+ """
+ return _VALIDATORS.get(name)
diff --git a/ryu/services/protocols/bgp/constants.py b/ryu/services/protocols/bgp/constants.py
new file mode 100644
index 00000000..b1af9b0a
--- /dev/null
+++ b/ryu/services/protocols/bgp/constants.py
@@ -0,0 +1,50 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ Module that holds various constants.
+
+ This module helps in breaking circular dependencies too.
+"""
+
+# Various states of bgp state machine.
+BGP_FSM_IDLE = 'Idle'
+BGP_FSM_CONNECT = 'Connect'
+BGP_FSM_ACTIVE = 'Active'
+BGP_FSM_OPEN_SENT = 'OpenSent'
+BGP_FSM_OPEN_CONFIRM = 'OpenConfirm'
+BGP_FSM_ESTABLISHED = 'Established'
+
+# All valid BGP finite state machine states.
+BGP_FSM_VALID_STATES = (BGP_FSM_IDLE, BGP_FSM_CONNECT, BGP_FSM_ACTIVE,
+ BGP_FSM_OPEN_SENT, BGP_FSM_OPEN_CONFIRM,
+ BGP_FSM_ESTABLISHED)
+
+# Supported bgp protocol version number.
+BGP_VERSION_NUM = 4
+
+# Standard BGP server port number.
+STD_BGP_SERVER_PORT_NUM = 179
+
+#
+# Constants used to indicate VRF prefix source.
+#
+# It indicates prefix inside VRF table came from bgp peer to VPN table and then
+# to VRF table..
+VPN_TABLE = 'vpn_table'
+VRF_TABLE = 'vrf_table'
+
+# RTC EOR timer default value
+# Time to wait for RTC-EOR, before we can send initial UPDATE as per RFC
+RTC_EOR_DEFAULT_TIME = 60
diff --git a/ryu/services/protocols/bgp/core.py b/ryu/services/protocols/bgp/core.py
new file mode 100644
index 00000000..01ed37b9
--- /dev/null
+++ b/ryu/services/protocols/bgp/core.py
@@ -0,0 +1,422 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Core of BGPSpeaker.
+
+ Provides CoreService which is responsible for establishing bgp sessions with
+ peers and maintains VRFs and Global tables.
+"""
+import logging
+
+from ryu.services.protocols.bgp.protocols.bgp import exceptions
+from ryu.services.protocols.bgp.protocols.bgp import nlri
+from ryu.services.protocols.bgp.protocols.bgp import pathattr
+from ryu.services.protocols.bgp.base import Activity
+from ryu.services.protocols.bgp.base import add_bgp_error_metadata
+from ryu.services.protocols.bgp.base import BGPSException
+from ryu.services.protocols.bgp.base import CORE_ERROR_CODE
+from ryu.services.protocols.bgp.constants import STD_BGP_SERVER_PORT_NUM
+from ryu.services.protocols.bgp import core_managers
+from ryu.services.protocols.bgp.model import FlexinetOutgoingRoute
+from ryu.services.protocols.bgp.protocol import Factory
+from ryu.services.protocols.bgp.signals.emit import BgpSignalBus
+from ryu.services.protocols.bgp.speaker import BgpProtocol
+from ryu.services.protocols.bgp.utils.rtfilter import RouteTargetManager
+from ryu.services.protocols.bgp.utils import stats
+
+
+LOG = logging.getLogger('bgpspeaker.core')
+
+# Interface IP address on which to run bgp server. Core service listens on all
+# interfaces of the host on port 179 - standard bgp port.
+CORE_IP = '0.0.0.0'
+
+# Required dictates that Origin attribute be incomplete
+EXPECTED_ORIGIN = pathattr.Origin.INCOMPLETE
+
+
+@add_bgp_error_metadata(code=CORE_ERROR_CODE, sub_code=1,
+ def_desc='Unknown error occurred related to core.')
+class BgpCoreError(BGPSException):
+ """Base exception related to all tables and peer management.
+ """
+ pass
+
+
+class CoreService(Factory, Activity):
+ """A service that maintains eBGP/iBGP sessions with BGP peers.
+
+ Two instances of this class don't share any BGP state with each
+ other. Manages peers, tables for various address-families, etc.
+ """
+
+ protocol = BgpProtocol
+
+ def __init__(self, common_conf, neighbors_conf, vrfs_conf):
+ self._common_config = common_conf
+ self._neighbors_conf = neighbors_conf
+ self._vrfs_conf = vrfs_conf
+
+ Activity.__init__(self, name='core_service')
+
+ self._signal_bus = BgpSignalBus()
+ self._init_signal_listeners()
+
+ self._rt_mgr = RouteTargetManager(self, neighbors_conf, vrfs_conf)
+
+ self._table_manager = core_managers.TableCoreManager(
+ self, common_conf
+ )
+
+ self._importmap_manager = core_managers.ImportMapManager()
+
+ # Autonomous system number of this BGP speaker.
+ self._asn = self._common_config.local_as
+
+ self._peer_manager = core_managers.PeerManager(
+ self,
+ self._neighbors_conf,
+ )
+
+ # Initialize sink for flexinet-peers
+ self._sinks = set()
+
+ self._conf_manager = core_managers.ConfigurationManager(
+ self, common_conf, vrfs_conf, neighbors_conf
+ )
+
+ # Register Flexinet peer sink
+ from ryu.services.protocols.bgp.speaker.net_ctrl import NET_CONTROLLER
+
+ self.register_flexinet_sink(NET_CONTROLLER)
+
+ # State per route family
+ # Key: RouteFamily
+ # Value: BgpInstanceRf
+ self.rf_state = {}
+
+ # Protocol factories for pro-active and re-active bgp-sessions.
+ self.client_factory = None
+ self.server_factory = None
+
+ # Key: RD:Next_Hop
+ # Value: label
+ self._next_hop_label = {}
+
+ # BgpProcessor instance (initialized during start)
+ self._bgp_processor = None
+
+ def _init_signal_listeners(self):
+ self._signal_bus.register_listener(
+ BgpSignalBus.BGP_DEST_CHANGED,
+ lambda _, dest: self.enqueue_for_bgp_processing(dest)
+ )
+ self._signal_bus.register_listener(
+ BgpSignalBus.BGP_VRF_REMOVED,
+ lambda _, route_dist: self.on_vrf_removed(route_dist)
+ )
+ self._signal_bus.register_listener(
+ BgpSignalBus.BGP_VRF_ADDED,
+ lambda _, vrf_conf: self.on_vrf_added(vrf_conf)
+ )
+ self._signal_bus.register_listener(
+ BgpSignalBus.BGP_VRF_STATS_CONFIG_CHANGED,
+ lambda _, vrf_conf: self.on_stats_config_change(vrf_conf)
+ )
+
+ @property
+ def router_id(self):
+ return self._common_config.router_id
+
+ @property
+ def global_interested_rts(self):
+ return self._rt_mgr.global_interested_rts
+
+ @property
+ def asn(self):
+ return self._asn
+
+ @property
+ def table_manager(self):
+ return self._table_manager
+
+ @property
+ def importmap_manager(self):
+ return self._importmap_manager
+
+ @property
+ def peer_manager(self):
+ return self._peer_manager
+
+ @property
+ def rt_manager(self):
+ return self._rt_mgr
+
+ @property
+ def signal_bus(self):
+ return self._signal_bus
+
+ def enqueue_for_bgp_processing(self, dest):
+ return self._bgp_processor.enqueue(dest)
+
+ def on_vrf_removed(self, route_dist):
+ # Remove stats timer linked with this vrf.
+ vrf_stats_timer = self._timers.get(route_dist)
+ if vrf_stats_timer:
+ vrf_stats_timer.stop()
+ del self._timers[route_dist]
+
+ def on_vrf_added(self, vrf_conf):
+ # Setup statistics timer.
+ rd = vrf_conf.route_dist
+ rf = vrf_conf.route_family
+ vrf_table = self._table_manager.get_vrf_table(rd, rf)
+ vrf_stats_timer = self._create_timer(
+ rd,
+ stats.log,
+ stats_source=vrf_table.get_stats_summary_dict
+ )
+
+ # Start statistics timer if applicable.
+ if vrf_conf.stats_log_enabled:
+ vrf_stats_timer.start(vrf_conf.stats_time)
+
+ def on_stats_config_change(self, vrf_conf):
+ vrf_stats_timer = self._timers.get(
+ vrf_conf.route_dist
+ )
+ vrf_stats_timer.stop()
+ vrf_stats_timer.start(vrf_conf.stats_time)
+
+ def _run(self, *args, **kwargs):
+ from ryu.services.protocols.bgp.speaker.processor import BgpProcessor
+ # Initialize bgp processor.
+ self._bgp_processor = BgpProcessor(self)
+ # Start BgpProcessor in a separate thread.
+ processor_thread = self._spawn_activity(self._bgp_processor)
+
+ # Pro-actively try to establish bgp-session with peers.
+ for peer in self._peer_manager.iterpeers:
+ self._spawn_activity(peer, self)
+
+ # Reactively establish bgp-session with peer by listening on
+ # server port for connection requests.
+ server_addr = (CORE_IP, self._common_config.bgp_server_port)
+ server_thread = self._listen_tcp(server_addr, self.start_protocol)
+
+ server_thread.wait()
+ processor_thread.wait()
+
+ #=========================================================================
+ # RTC address family related utilities
+ #=========================================================================
+
+ def update_rtfilters(self):
+ """Updates RT filters for each peer.
+
+ Should be called if a new RT Nlri's have changed based on the setting.
+ Currently only used by `Processor` to update the RT filters after it
+ has processed a RT destination. If RT filter has changed for a peer we
+ call RT filter change handler.
+ """
+ # Update RT filter for all peers
+ # TODO(PH): Check if getting this map can be optimized (if expensive)
+ new_peer_to_rtfilter_map = self._compute_rtfilter_map()
+
+ # If we have new best path for RT NLRI, we have to update peer RT
+ # filters and take appropriate action of sending them NLRIs for other
+ # address-families as per new RT filter if necessary.
+ for peer in self._peer_manager.iterpeers:
+ pre_rt_filter = self._rt_mgr.peer_to_rtfilter_map.get(peer, set())
+ curr_rt_filter = new_peer_to_rtfilter_map.get(peer, set())
+
+ old_rts = pre_rt_filter - curr_rt_filter
+ new_rts = curr_rt_filter - pre_rt_filter
+ # If interested RTs for a peer changes
+ if new_rts or old_rts:
+ LOG.debug('RT Filter for peer %s updated: '
+ 'Added RTs %s, Removed Rts %s' %
+ (peer.ip_address, new_rts, old_rts))
+ self._on_update_rt_filter(peer, new_rts, old_rts)
+ # Update to new RT filters
+ self._peer_manager.set_peer_to_rtfilter_map(new_peer_to_rtfilter_map)
+ self._rt_mgr.peer_to_rtfilter_map = new_peer_to_rtfilter_map
+ LOG.debug('Updated RT filters: %s' %
+ (str(self._rt_mgr.peer_to_rtfilter_map)))
+ # Update interested RTs i.e. RTs on the path that will be installed
+ # into global tables
+ self._rt_mgr.update_interested_rts()
+
+ def _on_update_rt_filter(self, peer, new_rts, old_rts):
+ """Handles update of peer RT filter.
+
+ Parameters:
+ - `peer`: (Peer) whose RT filter has changed.
+ - `new_rts`: (set) of new RTs that peer is interested in.
+ - `old_rts`: (set) of RTs that peers is no longer interested in.
+ """
+ for table in self._table_manager._global_tables.itervalues():
+ if table.route_family == nlri.RF_RTC_UC:
+ continue
+ self._spawn('rt_filter_chg_%s' % peer,
+ self._rt_mgr.on_rt_filter_chg_sync_peer,
+ peer, new_rts, old_rts, table)
+ LOG.debug('RT Filter change handler launched for route_family %s'
+ % table.route_family)
+
+ def _compute_rtfilter_map(self):
+ """Returns neighbor's RT filter (permit/allow filter based on RT).
+
+ Walks RT filter tree and computes current RT filters for each peer that
+ have advertised RT NLRIs.
+ Returns:
+ dict of peer, and `set` of rts that a particular neighbor is
+ interested in.
+ """
+ rtfilter_map = {}
+
+ def get_neigh_filter(neigh):
+ neigh_filter = rtfilter_map.get(neigh)
+ # Lazy creation of neighbor RT filter
+ if neigh_filter is None:
+ neigh_filter = set()
+ rtfilter_map[neigh] = neigh_filter
+ return neigh_filter
+
+ # Check if we have to use all paths or just best path
+ if self._common_config.max_path_ext_rtfilter_all:
+ # We have to look at all paths for a RtDest
+ for rtcdest in self._table_manager.get_rtc_table().itervalues():
+ known_path_list = rtcdest.known_path_list
+ for path in known_path_list:
+ neigh = path.source
+
+ # We ignore NC
+ if neigh is None:
+ continue
+
+ neigh_filter = get_neigh_filter(neigh)
+ neigh_filter.add(path.nlri.route_target)
+ else:
+ # We iterate over all destination of the RTC table and for iBGP
+ # peers we use all known paths' RTs for RT filter and for eBGP
+ # peers we only consider best-paths' RTs for RT filter
+ for rtcdest in self._table_manager.get_rtc_table().itervalues():
+ path = rtcdest.best_path
+ # If this destination does not have any path, we continue
+ if not path:
+ continue
+
+ neigh = path.source
+ # Consider only eBGP peers and ignore NC
+ if neigh and neigh.is_ebgp_peer():
+ # For eBGP peers we use only best-path to learn RT filter
+ neigh_filter = get_neigh_filter(neigh)
+ neigh_filter.add(path.nlri.route_target)
+ else:
+ # For iBGP peers we use all known paths to learn RT filter
+ known_path_list = rtcdest.known_path_list
+ for path in known_path_list:
+ neigh = path.source
+ # We ignore NC, and eBGP peers
+ if neigh and not neigh.is_ebgp_peer():
+ neigh_filter = get_neigh_filter(neigh)
+ neigh_filter.add(path.nlri.route_target)
+
+ return rtfilter_map
+
+ #=========================================================================
+ # Peer or Neighbor related handles/utilities.
+ #=========================================================================
+ def register_flexinet_sink(self, sink):
+ self._sinks.add(sink)
+
+ def unregister_flexinet_sink(self, sink):
+ self._sinks.remove(sink)
+
+ def update_flexinet_peers(self, path, route_disc):
+ for sink in self._sinks:
+ out_route = FlexinetOutgoingRoute(path, route_disc)
+ sink.enque_outgoing_msg(out_route)
+
+ def on_peer_added(self, peer):
+ if self.started:
+ self._spawn_activity(
+ peer, self.start_protocol
+ )
+
+ # We need to handle new RTC_AS
+ if peer.rtc_as != self.asn:
+ self._spawn(
+ 'NEW_RTC_AS_HANDLER %s' % peer.rtc_as,
+ self._rt_mgr.update_rtc_as_set
+ )
+
+ def on_peer_removed(self, peer):
+ if peer.rtc_as != self.asn:
+ self._spawn(
+ 'OLD_RTC_AS_HANDLER %s' % peer.rtc_as,
+ self._rt_mgr.update_rtc_as_set
+ )
+
+ def build_protocol(self, socket):
+ assert socket
+ # Check if its a reactive connection or pro-active connection
+ _, remote_port = socket.getpeername()
+ is_reactive_conn = True
+ if remote_port == STD_BGP_SERVER_PORT_NUM:
+ is_reactive_conn = False
+
+ bgp_protocol = self.protocol(
+ socket,
+ self._signal_bus,
+ is_reactive_conn=is_reactive_conn
+ )
+ return bgp_protocol
+
+ def start_protocol(self, socket):
+ """Handler of new connection requests on bgp server port.
+
+ Checks if new connection request is valid and starts new instance of
+ protocol.
+ """
+ assert socket
+ peer_addr, peer_port = socket.getpeername()
+ peer = self._peer_manager.get_by_addr(peer_addr)
+ bgp_proto = self.build_protocol(socket)
+
+ # We reject this connection request from peer:
+ # 1) If we have connection initiated by a peer that is not in our
+ # configuration.
+ # 2) If this neighbor is not enabled according to configuration.
+ if not peer or not peer.enabled:
+ LOG.debug('Closed connection to %s:%s as it is not a recognized'
+ ' peer.' % (peer_addr, peer_port))
+ # Send connection rejected notification as per RFC
+ code = exceptions.ConnRejected.CODE
+ subcode = exceptions.ConnRejected.SUB_CODE
+ bgp_proto.send_notification(code, subcode)
+ elif not (peer.in_idle() or peer.in_active() or peer.in_connect()):
+ LOG.debug('Closing connection to %s:%s as we have connection'
+ ' in state other than IDLE or ACTIVE,'
+ ' i.e. connection resolution' %
+ (peer_addr, peer_port))
+ # Send Connection Collision Resolution notification as per RFC.
+ code = exceptions.CollisionResolution.CODE
+ subcode = exceptions.CollisionResolution.SUB_CODE
+ bgp_proto.send_notification(code, subcode)
+ else:
+ self._spawn_activity(bgp_proto, peer)
diff --git a/ryu/services/protocols/bgp/core_manager.py b/ryu/services/protocols/bgp/core_manager.py
new file mode 100644
index 00000000..5a4caf67
--- /dev/null
+++ b/ryu/services/protocols/bgp/core_manager.py
@@ -0,0 +1,72 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Core Manager module dedicated for providing CORE_MANAGER singleton
+"""
+from ryu.services.protocols.bgp.base import Activity
+from ryu.services.protocols.bgp.base import ActivityException
+from ryu.services.protocols.bgp.rtconf.neighbors import NeighborsConf
+from ryu.services.protocols.bgp.rtconf.vrfs import VrfsConf
+
+
+class _CoreManager(Activity):
+ """Core service manager.
+ """
+
+ def __init__(self):
+ self._common_conf = None
+ self._neighbors_conf = None
+ self._vrfs_conf = None
+ self._core_service = None
+ super(_CoreManager, self).__init__()
+
+ def _run(self, *args, **kwargs):
+ self._common_conf = kwargs.pop('common_conf')
+ self._neighbors_conf = NeighborsConf()
+ self._vrfs_conf = VrfsConf()
+ from ryu.services.protocols.bgp.speaker.core import CoreService
+ self._core_service = CoreService(self._common_conf,
+ self._neighbors_conf,
+ self._vrfs_conf)
+ core_activity = self._spawn_activity(self._core_service)
+ core_activity.wait()
+
+ def get_core_service(self):
+ self._check_started()
+ return self._core_service
+
+ def _check_started(self):
+ if not self.started:
+ raise ActivityException('Cannot access any property before '
+ 'activity has started')
+
+ @property
+ def common_conf(self):
+ self._check_started()
+ return self._common_conf
+
+ @property
+ def neighbors_conf(self):
+ self._check_started()
+ return self._neighbors_conf
+
+ @property
+ def vrfs_conf(self):
+ self._check_started()
+ return self._vrfs_conf
+
+# _CoreManager instance that manages core bgp service and configuration data.
+CORE_MANAGER = _CoreManager()
diff --git a/ryu/services/protocols/bgp/core_managers/__init__.py b/ryu/services/protocols/bgp/core_managers/__init__.py
new file mode 100644
index 00000000..883de2d2
--- /dev/null
+++ b/ryu/services/protocols/bgp/core_managers/__init__.py
@@ -0,0 +1,22 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from configuration_manager import ConfigurationManager
+from import_map_manager import ImportMapManager
+from peer_manager import PeerManager
+from table_manager import TableCoreManager
+__all__ = ['ImportMapManager', 'TableCoreManager', 'PeerManager',
+ 'ConfigurationManager']
diff --git a/ryu/services/protocols/bgp/core_managers/configuration_manager.py b/ryu/services/protocols/bgp/core_managers/configuration_manager.py
new file mode 100644
index 00000000..ffac9bcd
--- /dev/null
+++ b/ryu/services/protocols/bgp/core_managers/configuration_manager.py
@@ -0,0 +1,125 @@
+from ryu.services.protocols.bgp.rtconf.base import ConfWithStats
+from ryu.services.protocols.bgp.rtconf.common import CommonConfListener
+from ryu.services.protocols.bgp.rtconf.neighbors import NeighborsConfListener
+from ryu.services.protocols.bgp.rtconf import vrfs
+from ryu.services.protocols.bgp.rtconf.vrfs import VrfConf
+from ryu.services.protocols.bgp.rtconf.vrfs import VrfsConfListener
+
+import logging
+
+LOG = logging.getLogger('bgpspeaker.core_managers.table_mixin')
+
+
+class ConfigurationManager(CommonConfListener, VrfsConfListener,
+ NeighborsConfListener):
+ def __init__(self, core_service, common_conf, vrfs_conf, neighbors_conf):
+ self._signal_bus = core_service.signal_bus
+ self._common_config = common_conf
+ self._peer_manager = core_service.peer_manager
+ self._table_manager = core_service.table_manager
+ self._rt_manager = core_service.rt_manager
+ CommonConfListener.__init__(self, common_conf)
+ VrfsConfListener.__init__(self, vrfs_conf)
+ NeighborsConfListener.__init__(self, neighbors_conf)
+
+ def on_update_common_conf(self, evt):
+ raise NotImplementedError()
+
+ def on_add_neighbor_conf(self, evt):
+ neigh_conf = evt.value
+ self._peer_manager.add_peer(neigh_conf, self._common_config)
+
+ def on_remove_neighbor_conf(self, evt):
+ neigh_conf = evt.value
+ self._peer_manager.remove_peer(neigh_conf)
+
+ def on_chg_vrf_conf(self, evt):
+ evt_value = evt.value
+ vrf_conf = evt.src
+ new_imp_rts, removed_imp_rts, import_maps, re_export, re_import = \
+ evt_value
+ route_family = vrf_conf.route_family
+ vrf_table = self._table_manager.get_vrf_table(
+ vrf_conf.route_dist, route_family
+ )
+ assert vrf_table
+
+ # If we have new import RTs we have to update RTC table and make route
+ # refresh request to peers not participating in RT address-family
+ self._table_manager.update_vrf_table_links(
+ vrf_table, new_imp_rts, removed_imp_rts
+ )
+
+ # If other properties of VRF changed we re-install local paths.
+ if re_export:
+ self._table_manager.re_install_net_ctrl_paths(vrf_table)
+
+ # We have to withdraw paths that do not have any RT that are or
+ # interest
+ vrf_table.clean_uninteresting_paths()
+ if import_maps is not None:
+ vrf_table.init_import_maps(import_maps)
+ changed_dests = vrf_table.apply_import_maps()
+ for dest in changed_dests:
+ self._signal_bus.dest_changed(dest)
+
+ # import new rts
+ if re_import:
+ LOG.debug(
+ "RE-importing prefixes from VPN table to VRF %s"
+ % repr(vrf_table)
+ )
+ self._table_manager.import_all_vpn_paths_to_vrf(vrf_table)
+ else:
+ self._table_manager.import_all_vpn_paths_to_vrf(
+ vrf_table, new_imp_rts
+ )
+
+ # Update local/global RT NLRIs
+ self._rt_manager.update_local_rt_nlris()
+
+ def on_remove_vrf_conf(self, evt):
+ """Removes VRF table associated with given `vrf_conf`.
+
+ Cleans up other links to this table as well.
+ """
+ vrf_conf = evt.value
+ # Detach VrfConf change listener.
+ vrf_conf.remove_listener(VrfConf.VRF_CHG_EVT, self.on_chg_vrf_conf)
+
+ self._table_manager.remove_vrf_by_vrf_conf(vrf_conf)
+
+ # Update local RT NLRIs
+ self._rt_manager.update_local_rt_nlris()
+
+ self._signal_bus.vrf_removed(vrf_conf.route_dist)
+
+ def on_add_vrf_conf(self, evt):
+ """Event handler for new VrfConf.
+
+ Creates a VrfTable to store routing information related to new Vrf.
+ Also arranges for related paths to be imported to this VrfTable.
+ """
+ vrf_conf = evt.value
+ route_family = vrf_conf.route_family
+ assert route_family in vrfs.SUPPORTED_VRF_RF
+ # Create VRF table with given configuration.
+ vrf_table = self._table_manager.create_and_link_vrf_table(vrf_conf)
+
+ # Attach VrfConf change listeners.
+ vrf_conf.add_listener(ConfWithStats.UPDATE_STATS_LOG_ENABLED_EVT,
+ self.on_stats_config_change)
+ vrf_conf.add_listener(ConfWithStats.UPDATE_STATS_TIME_EVT,
+ self.on_stats_config_change)
+ vrf_conf.add_listener(VrfConf.VRF_CHG_EVT, self.on_chg_vrf_conf)
+
+ # Import paths from VPN table that match this VRF/VPN.
+ self._table_manager.import_all_vpn_paths_to_vrf(vrf_table)
+
+ # Update local RT NLRIs
+ self._rt_manager.update_local_rt_nlris()
+ self._signal_bus.vrf_added(vrf_conf)
+
+ def on_stats_config_change(self, evt):
+ vrf_conf = evt.src
+ self._signal_bus.stats_config_changed(vrf_conf)
diff --git a/ryu/services/protocols/bgp/core_managers/import_map_manager.py b/ryu/services/protocols/bgp/core_managers/import_map_manager.py
new file mode 100644
index 00000000..98fcb8de
--- /dev/null
+++ b/ryu/services/protocols/bgp/core_managers/import_map_manager.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ryu.services.protocols.bgp.info_base.vrf import VrfRtImportMap
+from ryu.services.protocols.bgp.info_base.vrf4 import Vrf4NlriImportMap
+from ryu.services.protocols.bgp.info_base.vrf6 import Vrf6NlriImportMap
+
+
+class ImportMapManager(object):
+
+ def __init__(self):
+ self._import_maps_by_name = {}
+
+ def create_vpnv4_nlri_import_map(self, name, value):
+ self._create_import_map_factory(name, value, Vrf4NlriImportMap)
+
+ def create_vpnv6_nlri_import_map(self, name, value):
+ self._create_import_map_factory(name, value, Vrf6NlriImportMap)
+
+ def create_rt_import_map(self, name, value):
+ self._create_import_map_factory(name, value, VrfRtImportMap)
+
+ def _create_import_map_factory(self, name, value, cls):
+ if self._import_maps_by_name.get(name) is not None:
+ raise ImportMapAlreadyExistsError()
+ self._import_maps_by_name[name] = cls(value)
+
+ def get_import_map_by_name(self, name):
+ return self._import_maps_by_name.get(name)
+
+
+class ImportMapAlreadyExistsError(Exception):
+ pass
diff --git a/ryu/services/protocols/bgp/core_managers/peer_manager.py b/ryu/services/protocols/bgp/core_managers/peer_manager.py
new file mode 100644
index 00000000..1b923862
--- /dev/null
+++ b/ryu/services/protocols/bgp/core_managers/peer_manager.py
@@ -0,0 +1,305 @@
+import logging
+
+from ryu.services.protocols.bgp.base import SUPPORTED_GLOBAL_RF
+from ryu.services.protocols.bgp.model import OutgoingRoute
+from ryu.services.protocols.bgp.peer import Peer
+from ryu.services.protocols.bgp.protocols.bgp import pathattr
+from ryu.services.protocols.bgp.protocols.bgp import nlri
+from ryu.services.protocols.bgp.utils.bgp \
+ import clone_path_and_update_med_for_target_neighbor
+
+LOG = logging.getLogger('bgpspeaker.core_managers.peer_manager')
+
+
+class PeerManager(object):
+ def __init__(
+ self, core_service, neighbors_conf,
+ ):
+ self._core_service = core_service
+ self._signal_bus = core_service.signal_bus
+ self._table_manager = core_service.table_manager
+ self._rt_manager = core_service.rt_manager
+ self._peers = {}
+
+ # Peer to RTFilter map
+ # Key: Peer instance
+ # Value: set of RTs that constitute RT filter for this peer
+ self._peer_to_rtfilter_map = {}
+ self._neighbors_conf = neighbors_conf
+
+ @property
+ def iterpeers(self):
+ return self._peers.itervalues()
+
+ def set_peer_to_rtfilter_map(self, new_map):
+ self._peer_to_rtfilter_map = new_map
+
+ def add_peer(self, neigh_conf, common_conf):
+ peer = Peer(common_conf, neigh_conf, self._core_service,
+ self._signal_bus, self)
+ self._peers[neigh_conf.ip_address] = peer
+ self._core_service.on_peer_added(peer)
+
+ def remove_peer(self, neigh_conf):
+ neigh_ip_address = neigh_conf.ip_address
+ peer = self._peers.get(neigh_ip_address)
+ peer.stop()
+ del self._peers[neigh_ip_address]
+ self._core_service.on_peer_removed(peer)
+
+ def get_by_addr(self, addr):
+ return self._peers.get(addr)
+
+ def on_peer_down(self, peer):
+ """Peer down handler.
+
+ Cleans up the paths in global tables that was received from this peer.
+ """
+ LOG.debug('Cleaning obsolete paths whose source/version: %s/%s' %
+ (peer.ip_address, peer.version_num))
+ # Launch clean-up for each global tables.
+ self._table_manager.clean_stale_routes(peer)
+
+ def _get_non_rtc_peers(self):
+ non_rtc_peer_list = set()
+ for peer in self._peers.itervalues():
+ if (peer.in_established() and
+ not peer.is_mpbgp_cap_valid(nlri.RF_RTC_UC)):
+ non_rtc_peer_list.add(peer)
+ return non_rtc_peer_list
+
+ def curr_peer_rtfilter(self, peer):
+ return self._peer_to_rtfilter_map.get(peer)
+
+ def get_peers_in_established(self):
+ """Returns list of peers in established state."""
+ est_peers = []
+ for peer in self._peers.itervalues():
+ if peer.in_established:
+ est_peers.append(peer)
+ return est_peers
+
+ def resend_sent(self, route_family, peer):
+ """For given `peer` re-send sent paths.
+
+ Parameters:
+ - `route-family`: (RouteFamily) of the sent paths to re-send
+ - `peer`: (Peer) peer for which we need to re-send sent paths
+ """
+ if peer not in self._peers.values():
+ raise ValueError('Could not find given peer (%s)' % peer)
+
+ if route_family not in SUPPORTED_GLOBAL_RF:
+ raise ValueError(
+ 'Given route family (%s) is not supported.' % route_family
+ )
+
+ # Iterate over the global table for given afi, safi and enqueue
+ # out-going routes.
+ table = self._table_manager.get_global_table_by_route_family(
+ route_family
+ )
+
+ for destination in table.itervalues():
+ # Check if this destination's sent - routes include this peer.
+ # i.e. check if this destinations was advertised and enqueue
+ # the path only if it was. If the current best-path has not been
+ # advertised before, it might already have a OutgoingRoute queued
+ # to be sent to the peer.
+ sent_routes = destination.sent_routes
+ if sent_routes is None or len(sent_routes) == 0:
+ continue
+ for sent_route in sent_routes:
+ if sent_route.sent_peer == peer:
+ # update med - if previously med was set per neighbor or
+ # wasn't set at all now it could have changed and we may
+ # need to set new value there
+ p = sent_route.path
+ if p.med_set_by_target_neighbor\
+ or p.get_pattr(pathattr.Med.ATTR_NAME) is None:
+ sent_route.path = \
+ clone_path_and_update_med_for_target_neighbor(
+ sent_route.path, peer.med
+ )
+
+ ogr = OutgoingRoute(sent_route.path,
+ for_route_refresh=True)
+ peer.enque_outgoing_msg(ogr)
+
+ def req_rr_to_non_rtc_peers(self, route_family):
+ """Makes refresh request to all peers for given address family.
+
+ Skips making request to peer that have valid RTC capability.
+ """
+ assert route_family != nlri.RF_RTC_UC
+ for peer in self._peers.itervalues():
+ # First check if peer is in established state
+ if (peer.in_established and
+ # Check if peer has valid capability for given address
+ # family
+ peer.is_mbgp_cap_valid(route_family) and
+ # Check if peer has valid capability for RTC
+ not peer.is_mbgp_cap_valid(nlri.RF_RTC_UC)):
+ peer.request_route_refresh(route_family)
+
+ def make_route_refresh_request(self, peer_ip, *route_families):
+ """Request route-refresh for peer with `peer_ip` for given
+ `route_families`.
+
+ Will make route-refresh request for a given `route_family` only if such
+ capability is supported and if peer is in ESTABLISHED state. Else, such
+ requests are ignored. Raises appropriate error in other cases. If
+ `peer_ip` is equal to 'all' makes refresh request to all valid peers.
+ """
+ LOG.debug('Route refresh requested for peer %s and route families %s'
+ % (peer_ip, route_families))
+ if not SUPPORTED_GLOBAL_RF.intersection(route_families):
+ raise ValueError('Given route family(s) % is not supported.' %
+ route_families)
+
+ peer_list = []
+ # If route-refresh is requested for all peers.
+ if peer_ip == 'all':
+ peer_list.extend(self.get_peers_in_established())
+ else:
+ given_peer = self._peers.get(peer_ip)
+ if not given_peer:
+ raise ValueError('Invalid/unrecognized peer %s' % peer_ip)
+ if not given_peer.in_established:
+ raise ValueError('Peer currently do not have established'
+ ' session.')
+ peer_list.append(given_peer)
+
+ # Make route refresh request to valid peers.
+ for peer in peer_list:
+ peer.request_route_refresh(*route_families)
+
+ return True
+
+ def comm_all_rt_nlris(self, peer):
+ """Shares/communicates current best rt_nlri paths with this peers.
+
+ Can be used to send initial updates after we have established session
+ with `peer` with which RTC capability is valid. Takes into account
+ peers RTC_AS setting and filters all RT NLRIs whose origin AS do not
+ match this setting.
+ """
+ # First check if for this peer mpbgp-rtc is valid.
+ if not peer.is_mbgp_cap_valid(nlri.RF_RTC_UC):
+ return
+
+ neigh_conf = self._neighbors_conf.get_neighbor_conf(peer.ip_address)
+ peer_rtc_as = neigh_conf.rtc_as
+ # Iterate over all RT_NLRI destination communicate qualifying RT_NLRIs
+ rtc_table = self._table_manager.get_rtc_table()
+ for dest in rtc_table.itervalues():
+ best_path = dest.best_path
+ # Ignore a destination that currently does not have best path
+ if not best_path:
+ continue
+
+ # If this is a local path
+ if best_path.source is None:
+ # Check RT NLRI's origin AS matches peer RTC_AS setting
+ origin_as = best_path.nlri.origin_as
+ if origin_as == peer_rtc_as:
+ peer.communicate_path(best_path)
+ else:
+ # Communicate all remote RT NLRIs
+ peer.communicate_path(best_path)
+
+ # Also communicate EOR as per RFC
+ peer.enque_end_of_rib(nlri.RF_RTC_UC)
+
+ def comm_all_best_paths(self, peer):
+ """Shares/communicates current best paths with this peers.
+
+ Can be used to send initial updates after we have established session
+ with `peer`.
+ """
+ LOG.debug('Communicating current best path for all afi/safi except'
+ ' 1/132')
+ # We will enqueue best path from all global destination.
+ for route_family, table in self._table_manager.iter:
+ if route_family == nlri.RF_RTC_UC:
+ continue
+ if peer.is_mbgp_cap_valid(route_family):
+ for dest in table.itervalues():
+ if dest.best_path:
+ peer.communicate_path(dest.best_path)
+
+ def comm_new_best_to_bgp_peers(self, new_best_path):
+ """Communicates/enqueues given best path to be sent to all qualifying
+ bgp peers.
+
+ If this path came from iBGP peers, it is not sent to other iBGP peers.
+ If this path has community-attribute, and if settings for recognize-
+ well-know attributes is set, we do as per [RFC1997], and queue outgoing
+ route only to qualifying BGP peers.
+ """
+ # Filter based on standard community
+ # If new best path has community attribute, it should be taken into
+ # account when sending UPDATE to peers.
+ comm_attr = new_best_path.get_pattr(pathattr.Community.ATTR_NAME)
+ if comm_attr:
+ comm_attr_na = comm_attr.has_comm_attr(
+ pathattr.Community.NO_ADVERTISE
+ )
+ # If we have NO_ADVERTISE attribute is present, we do not send
+ # UPDATE to any peers
+ if comm_attr_na:
+ LOG.debug('New best path has community attr. NO_ADVERTISE = %s'
+ '. Hence not advertising to any peer' % comm_attr_na)
+ return
+
+ qualified_peers = self._collect_peers_of_interest(
+ new_best_path
+ )
+
+ # Distribute new best-path to qualified peers.
+ for peer in qualified_peers:
+ peer.communicate_path(new_best_path)
+
+ def _collect_peers_of_interest(self, new_best_path):
+ """Collect all peers that qualify for sharing a path with given RTs.
+ """
+ path_rts = new_best_path.get_rts()
+ qualified_peers = set(self._peers.values())
+
+ # Filter out peers based on RTC_AS setting if path is for RT_NLRI
+ qualified_peers = self._rt_manager.filter_by_origin_as(
+ new_best_path, qualified_peers
+ )
+
+ # We continue to filter out qualified peer based on path RTs
+ # If new best path has RTs, we need to share this UPDATE with
+ # qualifying peers
+ if path_rts:
+ # We add Default_RTC_NLRI to path RTs so that we can send it to
+ # peers that have expressed interest in all paths
+ path_rts.append(nlri.RtNlri.DEFAULT_RT)
+ # All peers that do not have RTC capability qualify
+ qualified_peers = set(self._get_non_rtc_peers())
+ # Peers that have RTC capability and have common RT with the path
+ # also qualify
+ peer_to_rtfilter_map = self._peer_to_rtfilter_map
+ for peer, rt_filter in peer_to_rtfilter_map.iteritems():
+ # Ignore Network Controller (its not a BGP peer)
+ if peer is None:
+ continue
+
+ if rt_filter is None:
+ qualified_peers.add(peer)
+ elif rt_filter.intersection(path_rts):
+ qualified_peers.add(peer)
+
+ return qualified_peers
+
+ def schedule_rr_to_non_rtc_peers(self):
+ for route_family in SUPPORTED_GLOBAL_RF:
+ # Since we are dealing with peers that do not support RTC,
+ # ignore this address family
+ if route_family == nlri.RF_RTC_UC:
+ continue
+
+ self.req_rr_to_non_rtc_peers(route_family)
diff --git a/ryu/services/protocols/bgp/core_managers/table_manager.py b/ryu/services/protocols/bgp/core_managers/table_manager.py
new file mode 100644
index 00000000..ea29f445
--- /dev/null
+++ b/ryu/services/protocols/bgp/core_managers/table_manager.py
@@ -0,0 +1,500 @@
+import logging
+
+from ryu.services.protocols.bgp.base import SUPPORTED_GLOBAL_RF
+from ryu.services.protocols.bgp.info_base.rtc import RtcTable
+from ryu.services.protocols.bgp.info_base.vpnv4 import Vpnv4Path
+from ryu.services.protocols.bgp.info_base.vpnv4 import Vpnv4Table
+from ryu.services.protocols.bgp.info_base.vpnv6 import Vpnv6Path
+from ryu.services.protocols.bgp.info_base.vpnv6 import Vpnv6Table
+from ryu.services.protocols.bgp.info_base.vrf4 import Vrf4Table
+from ryu.services.protocols.bgp.info_base.vrf6 import Vrf6Table
+from ryu.services.protocols.bgp.rtconf import vrfs
+from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_IPV4
+from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_IPV6
+from ryu.services.protocols.bgp.protocols.bgp import nlri
+from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4
+from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4_prefix
+from ryu.services.protocols.bgp.utils.validation import is_valid_ipv6
+from ryu.services.protocols.bgp.utils.validation import is_valid_ipv6_prefix
+
+
+LOG = logging.getLogger('bgpspeaker.core_managers.table_mixin')
+
+
+class TableCoreManager(object):
+ """Methods performing core operations on tables."""
+
+ def __init__(self, core_service, common_conf):
+
+ self._tables = {}
+ self._rt_mgr = core_service.rt_manager
+ self._signal_bus = core_service.signal_bus
+
+ # (VRF) Tables to which the routes with a given route target
+ # should be imported.
+ #
+ # Key: RouteTarget
+ # Value: List of tables.
+ self._tables_for_rt = {}
+
+ # Global/Default tables, keyed by RouteFamily.
+ self._global_tables = {}
+
+ self._core_service = core_service
+ self._signal_bus = self._core_service.signal_bus
+
+ # VPN label range
+ self._asbr_label_range = common_conf.label_range
+
+ self._next_vpnv4_label = int(self._asbr_label_range[0])
+
+ self._next_hop_label = {}
+
+ @property
+ def global_tables(self):
+ return self._global_tables
+
+ def remove_vrf_by_vrf_conf(self, vrf_conf):
+
+ route_family = vrf_conf.route_family
+ assert route_family in (vrfs.VRF_RF_IPV4, vrfs.VRF_RF_IPV6)
+ table_id = (vrf_conf.route_dist, route_family)
+
+ vrf_table = self._tables.pop(table_id)
+
+ self._remove_links_to_vrf_table(vrf_table)
+
+ # Withdraw the best-path whose source was NC since it may have been
+ # exported to VPN table.
+ for destination in vrf_table.itervalues():
+ best_path = destination.best_path
+ if best_path and best_path.source is None:
+ vpn_clone = best_path.clone_to_vpn(vrf_conf.route_dist,
+ for_withdrawal=True)
+ self.learn_path(vpn_clone)
+ LOG.debug('VRF with RD %s marked for removal' % vrf_conf.route_dist)
+
+ def import_all_vpn_paths_to_vrf(self, vrf_table, import_rts=None):
+ """Imports Vpnv4/6 paths from Global/VPN table into given Vrfv4/6
+ table.
+ :param vrf_table: Vrf table to which we import
+ :type vrf_table: VrfTable
+ :param import_rts: import RTs to override default import_rts of
+ vrf table for this import
+ :type import_rts: set of strings
+
+
+ Checks if we have any path RT common with VRF table's import RT.
+ """
+ rfs = (Vrf4Table.ROUTE_FAMILY, Vrf6Table.ROUTE_FAMILY)
+ assert vrf_table.route_family in rfs, 'Invalid VRF table.'
+
+ if vrf_table.route_family == Vrf4Table.ROUTE_FAMILY:
+ vpn_table = self.get_vpn4_table()
+ else:
+ vpn_table = self.get_vpn6_table()
+
+ vrf_table.import_vpn_paths_from_table(vpn_table, import_rts)
+
+ def learn_path(self, path):
+ """Inserts `path` into correct global table.
+
+ Since known paths to `Destination` has changes, we queue it for further
+ processing.
+ """
+ # Get VPN/Global table
+ table = self.get_global_table_by_route_family(path.route_family)
+ gpath_dest = table.insert(path)
+ # Since destination was updated, we enqueue it for processing.
+ self._signal_bus.dest_changed(gpath_dest)
+
+ def remember_sent_route(self, sent_route):
+ """Records `sent_route` inside proper table.
+
+ Records of `sent_route` from Adj-RIB-out.
+ """
+ route_family = sent_route.path.route_family
+ table = self.get_global_table_by_route_family(route_family)
+ table.insert_sent_route(sent_route)
+
+ def on_interesting_rts_change(self, new_global_rts, removed_global_rts):
+ """Update global tables as interested RTs changed.
+
+ Adds `new_rts` and removes `removed_rts` rt nlris. Does not check if
+ `new_rts` or `removed_rts` are already present. Schedules refresh
+ request to peers that do not participate in RTC address-family.
+ """
+ # We add new RT NLRI and request RR for other peers.
+ if new_global_rts:
+ LOG.debug(
+ 'Sending route_refresh to all neighbors that'
+ ' did not negotiate RTC capability.'
+ )
+
+ pm = self._core_service.peer_manager
+ pm.schedule_rr_to_non_rtc_peers()
+ if removed_global_rts:
+ LOG.debug(
+ 'Cleaning up global tables as some interested RTs were removed'
+ )
+ self._clean_global_uninteresting_paths()
+
+ def get_global_table_by_route_family(self, route_family):
+ if route_family not in SUPPORTED_GLOBAL_RF:
+ raise ValueError(
+ 'Given route family: %s currently not supported' % route_family
+ )
+
+ global_table = None
+ if route_family == nlri.RF_IPv4_VPN:
+ global_table = self.get_vpn4_table()
+
+ elif route_family == nlri.RF_IPv6_VPN:
+ global_table = self.get_vpn6_table()
+
+ elif route_family == nlri.RF_RTC_UC:
+ global_table = self.get_rtc_table()
+
+ return global_table
+
+ def get_vrf_table(self, vrf_rd, vrf_rf):
+ assert vrf_rd is not None
+ return self._tables.get((vrf_rd, vrf_rf))
+
+ def get_vrf_tables(self, vrf_rf=None):
+ vrf_tables = {}
+ for (scope_id, table_id), table in self._tables.items():
+ if scope_id is None:
+ continue
+ if vrf_rf is not None and table_id != vrf_rf:
+ continue
+ vrf_tables[(scope_id, table_id)] = table
+ return vrf_tables
+
+ def get_vpn6_table(self):
+ """Returns global VPNv6 table.
+
+ Creates the table if it does not exist.
+ """
+ vpn_table = self._global_tables.get(nlri.RF_IPv6_VPN)
+ # Lazy initialize the table.
+ if not vpn_table:
+ vpn_table = Vpnv6Table(self._core_service, self._signal_bus)
+ self._global_tables[nlri.RF_IPv6_VPN] = vpn_table
+ self._tables[(None, nlri.RF_IPv6_VPN)] = vpn_table
+
+ return vpn_table
+
+ def get_vpn4_table(self):
+ """Returns global VPNv6 table.
+
+ Creates the table if it does not exist.
+ """
+ vpn_table = self._global_tables.get(nlri.RF_IPv4_VPN)
+ # Lazy initialize the table.
+ if not vpn_table:
+ vpn_table = Vpnv4Table(self._core_service, self._signal_bus)
+ self._global_tables[nlri.RF_IPv4_VPN] = vpn_table
+ self._tables[(None, nlri.RF_IPv4_VPN)] = vpn_table
+
+ return vpn_table
+
+ def get_rtc_table(self):
+ """Returns global RTC table.
+
+ Creates the table if it does not exist.
+ """
+ rtc_table = self._global_tables.get(nlri.RF_RTC_UC)
+ # Lazy initialization of the table.
+ if not rtc_table:
+ rtc_table = RtcTable(self._core_service, self._signal_bus)
+ self._global_tables[nlri.RF_RTC_UC] = rtc_table
+ self._tables[(None, nlri.RF_RTC_UC)] = rtc_table
+ return rtc_table
+
+ def get_next_vpnv4_label(self):
+ # Get next available label
+ lbl = self._next_vpnv4_label
+ # Check if label is within max. range allowed.
+ if lbl > int(self._asbr_label_range[1]):
+ # Currently we log error message if we exceed configured range.
+ message = 'Have reached max label range'
+ LOG.error(message)
+ raise ValueError(message)
+ # Increment label by 1 as next label.
+ self._next_vpnv4_label += 1
+ return lbl
+
+ def get_nexthop_label(self, label_key):
+ return self._next_hop_label.get(label_key, None)
+
+ def set_nexthop_label(self, key, value):
+ self._next_hop_label[key] = value
+
+ def update_vrf_table_links(self, vrf_table, new_imp_rts,
+ removed_imp_rts):
+ """Update mapping from RT to VRF table."""
+ assert vrf_table
+ if new_imp_rts:
+ self._link_vrf_table(vrf_table, new_imp_rts)
+ if removed_imp_rts:
+ self._remove_links_to_vrf_table_for_rts(vrf_table,
+ removed_imp_rts)
+
+ def re_install_net_ctrl_paths(self, vrf_table):
+ """Re-installs paths from NC with current BGP policy.
+
+ Iterates over known paths from NC installed in `vrf4_table` and
+ adds new path with path attributes as per current VRF configuration.
+ """
+ assert vrf_table
+ for dest in vrf_table.itervalues():
+ for path in dest.known_path_list:
+ if path.source is None:
+ vrf_table.insert_vrf_path(
+ path.nlri,
+ path.nexthop,
+ gen_lbl=True
+ )
+ LOG.debug('Re-installed NC paths with current policy for table %s.' %
+ str(vrf_table))
+
+ def _remove_links_to_vrf_table(self, vrf_table):
+ """Removes any links to given `vrf_table`."""
+ assert vrf_table
+ vrf_conf = vrf_table.vrf_conf
+ self._remove_links_to_vrf_table_for_rts(vrf_table,
+ vrf_conf.import_rts)
+
+ def _remove_links_to_vrf_table_for_rts(self, vrf_table, rts):
+ rts_with_no_table = set()
+ affected_tables = set()
+ route_family = vrf_table.route_family
+ for rt in rts:
+ rt_rf_id = rt + ':' + str(route_family)
+ rt_specific_tables = self._tables_for_rt.get(rt_rf_id)
+ affected_tables.update(rt_specific_tables)
+ if rt_specific_tables:
+ try:
+ rt_specific_tables.remove(vrf_table)
+ except KeyError:
+ LOG.debug('Did not find table listed as interested '
+ 'for its import RT: %s' % rt)
+ if len(rt_specific_tables) == 0:
+ rts_with_no_table.add(rt)
+
+ # Remove records of RT that have no tables associated with it.
+ for rt in rts_with_no_table:
+ rt_rf_id = rt + ':' + str(route_family)
+ del self._tables_for_rt[rt_rf_id]
+
+ def create_and_link_vrf_table(self, vrf_conf):
+ """Factory method to create VRF table for given `vrf_conf`.
+
+ Adds mapping to this table with appropriate scope. Also, adds mapping
+ for import RT of this VRF to created table to facilitate
+ importing/installing of paths from global tables.
+ Returns created table.
+ """
+
+ route_family = vrf_conf.route_family
+ assert route_family in (VRF_RF_IPV4, VRF_RF_IPV6)
+ vrf_table = None
+ if route_family == VRF_RF_IPV4:
+ vrf_table = Vrf4Table(
+ vrf_conf, self._core_service, self._signal_bus
+ )
+ table_id = (vrf_conf.route_dist, route_family)
+ self._tables[table_id] = vrf_table
+
+ elif route_family == VRF_RF_IPV6:
+ vrf_table = Vrf6Table(
+ vrf_conf, self._core_service, self._signal_bus
+ )
+ table_id = (vrf_conf.route_dist, route_family)
+ self._tables[table_id] = vrf_table
+
+ assert vrf_table is not None
+ LOG.debug('Added new VrfTable with rd: %s and add_fmly: %s' %
+ (vrf_conf.route_dist, route_family))
+
+ import_rts = vrf_conf.import_rts
+ # If VRF is configured with import RT, we put this table
+ # in a list corresponding to this RT for easy access.
+ if import_rts:
+ self._link_vrf_table(vrf_table, import_rts)
+
+ return vrf_table
+
+ def _link_vrf_table(self, vrf_table, rt_list):
+ route_family = vrf_table.route_family
+ for rt in rt_list:
+ rt_rf_id = rt + ':' + str(route_family)
+ table_set = self._tables_for_rt.get(rt_rf_id)
+ if table_set is None:
+ table_set = set()
+ self._tables_for_rt[rt_rf_id] = table_set
+ table_set.add(vrf_table)
+ LOG.debug('Added VrfTable %s to import RT table list: %s' %
+ (vrf_table, rt))
+
+ def _clean_global_uninteresting_paths(self):
+ """Marks paths that do not have any route targets of interest
+ for withdrawal.
+
+ Since global tables can have paths with route targets that are not
+ interesting any more, we have to clean these paths so that appropriate
+ withdraw are sent out to NC and other peers. Interesting route targets
+ change as VRF are modified or some filter is that specify what route
+ targets are allowed are updated. This clean up should only be done when
+ a route target is no longer considered interesting and some paths with
+ that route target was installing in any of the global table.
+ """
+ uninteresting_dest_count = 0
+ interested_rts = self._rt_mgr.global_interested_rts
+ LOG.debug('Cleaning uninteresting paths. Global interested RTs %s' %
+ interested_rts)
+ for route_family in SUPPORTED_GLOBAL_RF:
+ # TODO(PH): We currently do not install RT_NLRI paths based on
+ # extended path attributes (RT)
+ if route_family == nlri.RF_RTC_UC:
+ continue
+ table = self.get_global_table_by_route_family(route_family)
+ uninteresting_dest_count += \
+ table.clean_uninteresting_paths(interested_rts)
+
+ LOG.debug('Found %s number of destinations had uninteresting paths.' %
+ str(uninteresting_dest_count))
+
+ def import_single_vpn_path_to_all_vrfs(self, vpn_path, path_rts=None):
+ """Imports *vpnv4_path* to qualifying VRF tables.
+
+ Import RTs of VRF table is matched with RTs from *vpn4_path* and if we
+ have any common RTs we import the path into VRF.
+ """
+ assert (vpn_path.route_family in
+ (Vpnv4Path.ROUTE_FAMILY, Vpnv6Path.ROUTE_FAMILY))
+ LOG.debug('Importing path %s to qualifying VRFs' % vpn_path)
+
+ # If this path has no RTs we are done.
+ if not path_rts:
+ LOG.info('Encountered a path with no RTs: %s' % vpn_path)
+ return
+
+ # We match path RTs with all VRFs that are interested in them.
+ interested_tables = set()
+
+ # Get route family of VRF to when this VPN Path can be imported to
+ route_family = nlri.RF_IPv4_UC
+ if vpn_path.route_family != nlri.RF_IPv4_VPN:
+ route_family = nlri.RF_IPv6_UC
+ for rt in path_rts:
+ rt_rf_id = rt + ':' + str(route_family)
+ vrf_rt_tables = self._tables_for_rt.get(rt_rf_id)
+ if vrf_rt_tables:
+ interested_tables.update(vrf_rt_tables)
+
+ if interested_tables:
+ # We iterate over all VRF tables that are interested in the RT
+ # of the given path and import this path into them.
+ route_disc = vpn_path.nlri.route_disc
+ for vrf_table in interested_tables:
+ if not (vpn_path.source is None
+ and route_disc == vrf_table.vrf_conf.route_dist):
+ update_vrf_dest = vrf_table.import_vpn_path(vpn_path)
+ # Queue the destination for further processing.
+ if update_vrf_dest is not None:
+ self._signal_bus.\
+ dest_changed(update_vrf_dest)
+ else:
+ # If we do not have any VRF with import RT that match with path RT
+ LOG.debug('No VRF table found that imports RTs: %s' % path_rts)
+
+ def add_to_vrf(self, route_dist, prefix, next_hop, route_family):
+ """Adds `prefix` to VRF identified by `route_dist` with given
+ `next_hop`.
+
+ Returns assigned VPN label.
+ """
+ from ryu.services.protocols.bgp.speaker.core import BgpCoreError
+
+ assert route_dist and prefix and next_hop
+ if route_family not in (VRF_RF_IPV4, VRF_RF_IPV6):
+ raise ValueError('Given route_family %s is not supported.' %
+ route_family)
+
+ vrf_table = None
+ table_id = (route_dist, route_family)
+ if route_family == VRF_RF_IPV4:
+ vrf_table = self._tables.get(table_id)
+ if vrf_table is None:
+ raise BgpCoreError(desc='VRF table for RD: %s does not '
+ 'exist.' % route_dist)
+ if not is_valid_ipv4_prefix(prefix) or not is_valid_ipv4(next_hop):
+ raise BgpCoreError(desc='Invalid Ipv4 prefix or nexthop.')
+ prefix = nlri.Ipv4(prefix)
+ elif route_family == VRF_RF_IPV6:
+ vrf_table = self._tables.get(table_id)
+ if vrf_table is None:
+ raise BgpCoreError(desc='VRF table for RD: %s does not '
+ 'exist.' % route_dist)
+ if not is_valid_ipv6_prefix(prefix) or not is_valid_ipv6(next_hop):
+ raise BgpCoreError(desc='Invalid Ipv6 prefix or nexthop.')
+ prefix = nlri.Ipv6(prefix)
+ return vrf_table.insert_vrf_path(
+ prefix, next_hop=next_hop,
+ gen_lbl=True
+ )
+
+ def remove_from_vrf(self, route_dist, prefix, route_family):
+ """Removes `prefix` from VRF identified by `route_dist`.
+
+ Returns assigned VPN label.
+ """
+ from ryu.services.protocols.bgp.speaker.core import BgpCoreError
+ # Validate given
+ if route_family not in (VRF_RF_IPV4, VRF_RF_IPV6):
+ raise BgpCoreError(desc='Unsupported route family %s' %
+ route_family)
+ val_ipv4 = route_family == VRF_RF_IPV4\
+ and is_valid_ipv4_prefix(prefix)
+ val_ipv6 = route_family == VRF_RF_IPV6\
+ and is_valid_ipv6_prefix(prefix)
+
+ if not val_ipv4 and not val_ipv6:
+ raise BgpCoreError(desc='Invalid prefix or nexthop.')
+
+ table_id = (route_dist, route_family)
+ if route_family == VRF_RF_IPV4:
+ vrf_table = self._tables.get(table_id)
+ if not vrf_table:
+ raise BgpCoreError(desc='Vrf for route distinguisher %s does '
+ 'not exist.' % route_dist)
+ prefix = nlri.Ipv4(prefix)
+ else:
+ vrf_table = self._tables.get(table_id)
+ if not vrf_table:
+ raise BgpCoreError(desc='Vrf for route distinguisher %s does '
+ 'not exist.' % route_dist)
+ prefix = nlri.Ipv6(prefix)
+ # We do not check if we have a path to given prefix, we issue
+ # withdrawal. Hence multiple withdrawals have not side effect.
+ return vrf_table.insert_vrf_path(prefix, is_withdraw=True)
+
+ def clean_stale_routes(self, peer, route_family=None):
+ """Removes old routes from `peer` from `route_family` table.
+
+ Routes/paths version number is compared with `peer`s current version
+ number.
+ """
+
+ if route_family is not None:
+ if route_family not in SUPPORTED_GLOBAL_RF:
+ raise ValueError('Given route family %s is not supported.' %
+ route_family)
+
+ tables = [self._global_tables.get(route_family)]
+ else:
+ tables = self._global_tables.values()
+ for table in tables:
+ table.cleanup_paths_for_peer(peer)
diff --git a/ryu/services/protocols/bgp/info_base/__init__.py b/ryu/services/protocols/bgp/info_base/__init__.py
new file mode 100644
index 00000000..bfd9d930
--- /dev/null
+++ b/ryu/services/protocols/bgp/info_base/__init__.py
@@ -0,0 +1,3 @@
+"""
+ Package for Information Base of various kind and for different afi/safi.
+"""
diff --git a/ryu/services/protocols/bgp/info_base/base.py b/ryu/services/protocols/bgp/info_base/base.py
new file mode 100644
index 00000000..61385cfe
--- /dev/null
+++ b/ryu/services/protocols/bgp/info_base/base.py
@@ -0,0 +1,795 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ Defines some model classes related BGP.
+
+ These class include types used in saving information sent/received over BGP
+ sessions.
+"""
+import abc
+from abc import ABCMeta
+from abc import abstractmethod
+from copy import copy
+import logging
+
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv4_UC
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RtNlri
+from ryu.services.protocols.bgp.protocols.bgp.pathattr import ExtCommunity
+
+from ryu.services.protocols.bgp.base import OrderedDict
+from ryu.services.protocols.bgp.constants import VPN_TABLE
+from ryu.services.protocols.bgp.constants import VRF_TABLE
+from ryu.services.protocols.bgp.model import OutgoingRoute
+from ryu.services.protocols.bgp.processor import BPR_ONLY_PATH
+from ryu.services.protocols.bgp.processor import BPR_UNKNOWN
+
+
+LOG = logging.getLogger('bgpspeaker.info_base.base')
+
+
+class Table(object):
+ """A container for holding information about destination/prefixes.
+
+ Routing information base for a particular afi/safi.
+ This is a base class which should be sub-classed for different route
+ family. A table can be uniquely identified by (Route Family, Scope Id).
+ """
+ __metaclass__ = abc.ABCMeta
+ ROUTE_FAMILY = RF_IPv4_UC
+
+ def __init__(self, scope_id, core_service, signal_bus):
+ self._destinations = dict()
+ # Scope in which this table exists.
+ # If this table represents the VRF, then this could be a VPN ID.
+ # For global/VPN tables this should be None
+ self._scope_id = scope_id
+ self._signal_bus = signal_bus
+ self._core_service = core_service
+
+ @property
+ def route_family(self):
+ return self.__class__.ROUTE_FAMILY
+
+ @property
+ def core_service(self):
+ return self._core_service
+
+ @property
+ def scope_id(self):
+ return self._scope_id
+
+ @abstractmethod
+ def _create_dest(self, nlri):
+ """Creates destination specific for this table.
+ Returns destination that stores information of paths to *nlri*.
+ """
+ raise NotImplementedError()
+
+ def itervalues(self):
+ return self._destinations.itervalues()
+
+ def insert(self, path):
+ self._validate_path(path)
+ self._validate_nlri(path.nlri)
+ if path.is_withdraw:
+ updated_dest = self._insert_withdraw(path)
+ else:
+ updated_dest = self._insert_path(path)
+ return updated_dest
+
+ def insert_sent_route(self, sent_route):
+ self._validate_path(sent_route.path)
+ dest = self._get_or_create_dest(sent_route.path.nlri)
+ dest.add_sent_route(sent_route)
+
+ def _insert_path(self, path):
+ """Add new path to destination identified by given prefix.
+ """
+ assert path.is_withdraw is False
+ dest = self._get_or_create_dest(path.nlri)
+ # Add given path to matching Dest.
+ dest.add_new_path(path)
+ # Return updated destination.
+ return dest
+
+ def _insert_withdraw(self, path):
+ """Appends given path to withdraw list of Destination for given prefix.
+ """
+ assert path.is_withdraw is True
+ dest = self._get_or_create_dest(path.nlri)
+ # Add given path to matching destination.
+ dest.add_withdraw(path)
+ # Return updated destination.
+ return dest
+
+ def cleanup_paths_for_peer(self, peer):
+ """Remove old paths from whose source is `peer`
+
+ Old paths have source version number that is less than current peer
+ version number. Also removes sent paths to this peer.
+ """
+ LOG.debug('Cleaning paths from table %s for peer %s' % (self, peer))
+ for dest in self.itervalues():
+ # Remove paths learned from this source
+ paths_deleted = dest.remove_old_paths_from_source(peer)
+ # Remove sent paths to this peer
+ had_sent = dest.remove_sent_route(peer)
+ if had_sent:
+ LOG.debug('Removed sent route %s for %s' % (dest.nlri, peer))
+ # If any paths are removed we enqueue respective destination for
+ # future processing.
+ if paths_deleted:
+ self._signal_bus.dest_changed(dest)
+
+ def clean_uninteresting_paths(self, interested_rts):
+ """Cleans table of any path that do not have any RT in common
+ with `interested_rts`.
+ Parameters:
+ - `interested_rts`: (set) of RT that are of interest/that need to
+ be preserved
+ """
+ LOG.debug('Cleaning table %s for given interested RTs %s' %
+ (self, interested_rts))
+ uninteresting_dest_count = 0
+ for dest in self.itervalues():
+ added_withdraw = \
+ dest.withdraw_unintresting_paths(interested_rts)
+ if added_withdraw:
+ self._signal_bus.dest_changed(dest)
+ uninteresting_dest_count += 1
+ return uninteresting_dest_count
+
+ def delete_dest_by_nlri(self, nlri):
+ """Deletes the destination identified by given prefix.
+
+ Returns the deleted destination if a match is found. If not match is
+ found return None.
+ """
+ self._validate_nlri(nlri)
+ dest = self._get_dest(nlri)
+ if dest:
+ self._destinations.pop(dest)
+ return dest
+
+ def delete_dest(self, dest):
+ del self._destinations[self._table_key(dest.nlri)]
+
+ def _validate_nlri(self, nlri):
+ """Validated *nlri* is the type that this table stores/supports.
+ """
+ if not nlri or not (nlri.route_family == self.route_family):
+ raise ValueError('Invalid Vpnv4 prefix given.')
+
+ def _validate_path(self, path):
+ """Check if given path is an instance of *Path*.
+
+ Raises ValueError if given is not a instance of *Path*.
+ """
+ if not path or not (path.route_family == self.route_family):
+ raise ValueError('Invalid path. Expected instance of'
+ ' Vpnv4 route family path, got %s.' % path)
+
+ def _get_or_create_dest(self, nlri):
+ table_key = self._table_key(nlri)
+ dest = self._destinations.get(table_key)
+ # If destination for given prefix does not exist we create it.
+ if dest is None:
+ dest = self._create_dest(nlri)
+ self._destinations[table_key] = dest
+ return dest
+
+ def _get_dest(self, nlri):
+ table_key = self._table_key(nlri)
+ dest = self._destinations.get(table_key)
+ return dest
+
+ def is_for_vrf(self):
+ """Returns true if this table instance represents a VRF.
+ """
+ return self.scope_id is not None
+
+ def __str__(self):
+ return 'Table(scope_id: %s, rf: %s)' % (self.scope_id,
+ self.route_family)
+
+ @abstractmethod
+ def _table_key(self, nlri):
+ """Return a key that will uniquely identify this NLRI inside
+ this table.
+ """
+ raise NotImplementedError()
+
+
+class NonVrfPathProcessingMixin(object):
+ """Mixin reacting to best-path selection algorithm on main table
+ level. Intended to use with "Destination" subclasses.
+ Applies to most of Destinations except for VrfDest
+ because they are processed at VRF level, so different logic applies.
+ """
+
+ def _best_path_lost(self):
+ self._best_path = None
+
+ if self._sent_routes:
+ # We have to send update-withdraw to all peers to whom old best
+ # path was sent.
+ for sent_route in self._sent_routes.values():
+ sent_path = sent_route.path
+ withdraw_clone = sent_path.clone(for_withdrawal=True)
+ outgoing_route = OutgoingRoute(withdraw_clone)
+ sent_route.sent_peer.enque_outgoing_msg(outgoing_route)
+ LOG.debug('Sending withdrawal to %s for %s' %
+ (sent_route.sent_peer, outgoing_route))
+
+ # Have to clear sent_route list for this destination as
+ # best path is removed.
+ self._sent_routes = {}
+
+ def _new_best_path(self, new_best_path):
+ old_best_path = self._best_path
+ self._best_path = new_best_path
+ LOG.debug('New best path selected for destination %s' % (self))
+
+ # If old best path was withdrawn
+ if (old_best_path and old_best_path not in self._known_path_list
+ and self._sent_routes):
+ # Have to clear sent_route list for this destination as
+ # best path is removed.
+ self._sent_routes = {}
+
+ # Communicate that we have new best path to all qualifying
+ # bgp-peers.
+ pm = self._core_service.peer_manager
+ pm.comm_new_best_to_bgp_peers(new_best_path)
+
+
+class Destination(object):
+ """State about a particular destination.
+
+ For example, an IP prefix. This is the data-structure that is hung of the
+ a routing information base table *Table*.
+ """
+
+ __metaclass__ = abc.ABCMeta
+ ROUTE_FAMILY = RF_IPv4_UC
+
+ def __init__(self, table, nlri):
+ # Validate arguments.
+ if table.route_family != self.__class__.ROUTE_FAMILY:
+ raise ValueError('Table and destination route family '
+ 'do not match.')
+
+ # Back-pointer to the table that contains this destination.
+ self._table = table
+
+ self._core_service = table.core_service
+
+ self._nlri = nlri
+
+ # List of all known processed paths,
+ self._known_path_list = []
+
+ # List of new un-processed paths.
+ self._new_path_list = []
+
+ # Pointer to best-path. One from the the known paths.
+ self._best_path = None
+
+ # Reason current best path was chosen as best path.
+ self._best_path_reason = None
+
+ # List of withdrawn paths.
+ self._withdraw_list = []
+
+ # List of SentRoute objects. This is the Adj-Rib-Out for this
+ # destination. (key/value: peer/sent_route)
+ self._sent_routes = {}
+
+ # This is an (optional) list of paths that were created as a
+ # result of exporting this route to other tables.
+ # self.exported_paths = None
+
+ # Automatically generated
+ #
+ # On work queue for BGP processor.
+ # self.next_dest_to_process
+ # self.prev_dest_to_process
+
+ @property
+ def route_family(self):
+ return self.__class__.ROUTE_FAMILY
+
+ @property
+ def nlri(self):
+ return self._nlri
+
+ @property
+ def best_path(self):
+ return self._best_path
+
+ @property
+ def best_path_reason(self):
+ return self._best_path_reason
+
+ @property
+ def known_path_list(self):
+ return self._known_path_list[:]
+
+ @property
+ def sent_routes(self):
+ return self._sent_routes.values()
+
+ def add_new_path(self, new_path):
+ self._validate_path(new_path)
+ self._new_path_list.append(new_path)
+
+ def add_withdraw(self, withdraw):
+ self._validate_path(withdraw)
+ self._withdraw_list.append(withdraw)
+
+ def add_sent_route(self, sent_route):
+ self._sent_routes[sent_route.sent_peer] = sent_route
+
+ def remove_sent_route(self, peer):
+ if self.was_sent_to(peer):
+ del self._sent_routes[peer]
+ return True
+ return False
+
+ def was_sent_to(self, peer):
+ if peer in self._sent_routes.keys():
+ return True
+ return False
+
+ def _process(self):
+ """Calculate best path for this destination.
+
+ A destination is processed when known paths to this destination has
+ changed. We might have new paths or withdrawals of last known paths.
+ Removes withdrawals and adds new learned paths from known path list.
+ Uses bgp best-path calculation algorithm on new list of known paths to
+ choose new best-path. Communicates best-path to core service.
+ """
+ LOG.debug('Processing destination: %s', self)
+ new_best_path, reason = self._process_paths()
+ self._best_path_reason = reason
+
+ if self._best_path == new_best_path:
+ return
+
+ if new_best_path is None:
+ # we lost best path
+ assert not self._known_path_list, repr(self._known_path_list)
+ return self._best_path_lost()
+ else:
+ return self._new_best_path(new_best_path)
+
+ @abstractmethod
+ def _best_path_lost(self):
+ raise NotImplementedError()
+
+ @abstractmethod
+ def _new_best_path(self, new_best_path):
+ raise NotImplementedError()
+
+ @classmethod
+ def _validate_path(cls, path):
+ if not path or path.route_family != cls.ROUTE_FAMILY:
+ raise ValueError(
+ 'Invalid path. Expected %s path got %s' %
+ (cls.ROUTE_FAMILY, path)
+ )
+
+ def process(self):
+ self._process()
+ if not self._known_path_list and not self._best_path:
+ self._remove_dest_from_table()
+
+ def _remove_dest_from_table(self):
+ self._table.delete_dest(self)
+
+ def remove_old_paths_from_source(self, source):
+ """Removes known old paths from *source*.
+
+ Returns *True* if any of the known paths were found to be old and
+ removed/deleted.
+ """
+ assert(source and hasattr(source, 'version_num'))
+ removed_paths = []
+ # Iterate over the paths in reverse order as we want to delete paths
+ # whose source is this peer.
+ source_ver_num = source.version_num
+ for path_idx in range(len(self._known_path_list) - 1, -1, -1):
+ path = self._known_path_list[path_idx]
+ if (path.source == source and
+ path.source_version_num < source_ver_num):
+ # If this peer is source of any paths, remove those path.
+ del(self._known_path_list[path_idx])
+ removed_paths.append(path)
+ return removed_paths
+
+ def withdraw_if_sent_to(self, peer):
+ """Sends a withdraw for this destination to given `peer`.
+
+ Check the records if we indeed advertise this destination to given peer
+ and if so, creates a withdraw for advertised route and sends it to the
+ peer.
+ Parameter:
+ - `peer`: (Peer) peer to send withdraw to
+ """
+ from ryu.services.protocols.bgp.speaker.peer import Peer
+ if not isinstance(peer, Peer):
+ raise TypeError('Currently we only support sending withdrawal'
+ ' to instance of peer')
+ sent_route = self._sent_routes.pop(peer, None)
+ if not sent_route:
+ return False
+
+ sent_path = sent_route.path
+ withdraw_clone = sent_path.clone(for_withdrawal=True)
+ outgoing_route = OutgoingRoute(withdraw_clone)
+ sent_route.sent_peer.enque_outgoing_msg(outgoing_route)
+ return True
+
+ def _process_paths(self):
+ """Calculates best-path among known paths for this destination.
+
+ Returns:
+ - Best path
+
+ Modifies destination's state related to stored paths. Removes withdrawn
+ paths from known paths. Also, adds new paths to known paths.
+ """
+ # First remove the withdrawn paths.
+ # Note: If we want to support multiple paths per destination we may
+ # have to maintain sent-routes per path.
+ self._remove_withdrawals()
+
+ # Have to select best-path from available paths and new paths.
+ # If we do not have any paths, then we no longer have best path.
+ if not self._known_path_list and len(self._new_path_list) == 1:
+ # If we do not have any old but one new path
+ # it becomes best path.
+ self._known_path_list.append(self._new_path_list[0])
+ del(self._new_path_list[0])
+ return self._known_path_list[0], BPR_ONLY_PATH
+
+ # If we have a new version of old/known path we use it and delete old
+ # one.
+ self._remove_old_paths()
+
+ # Collect all new paths into known paths.
+ self._known_path_list.extend(self._new_path_list)
+
+ # Clear new paths as we copied them.
+ del(self._new_path_list[:])
+
+ # If we do not have any paths to this destination, then we do not have
+ # new best path.
+ if not self._known_path_list:
+ return None, BPR_UNKNOWN
+
+ # Compute new best path
+ current_best_path, reason = self._compute_best_known_path()
+ return current_best_path, reason
+
+ def _remove_withdrawals(self):
+ """Removes withdrawn paths.
+
+ Note:
+ We may have disproportionate number of withdraws compared to know paths
+ since not all paths get installed into the table due to bgp policy and
+ we can receive withdraws for such paths and withdrawals may not be
+ stopped by the same policies.
+ """
+
+ LOG.debug('Removing %s withdrawals' % len(self._withdraw_list))
+
+ # If we have no withdrawals, we have nothing to do.
+ if not self._withdraw_list:
+ return
+
+ # If we have some withdrawals and no know-paths, it means it is safe to
+ # delete these withdraws.
+ if not self._known_path_list:
+ LOG.debug('Found %s withdrawals for path(s) that did not get'
+ ' installed.' % len(self._withdraw_list))
+ del(self._withdraw_list[:])
+ return
+
+ # If we have some known paths and some withdrawals, we find matches and
+ # delete them first.
+ matches = set()
+ w_matches = set()
+ # Match all withdrawals from destination paths.
+ for withdraw in self._withdraw_list:
+ match = None
+ for path in self._known_path_list:
+ # We have a match if the source are same.
+ if path.source == withdraw.source:
+ match = path
+ matches.add(path)
+ w_matches.add(withdraw)
+ # One withdraw can remove only one path.
+ break
+ # We do no have any match for this withdraw.
+ if not match:
+ LOG.debug('No matching path for withdraw found, may be path '
+ 'was not installed into table: %s' %
+ withdraw)
+ # If we have partial match.
+ if len(matches) != len(self._withdraw_list):
+ LOG.debug('Did not find match for some withdrawals. Number of '
+ 'matches(%s), number of withdrawals (%s)' %
+ (len(matches), len(self._withdraw_list)))
+
+ # Clear matching paths and withdrawals.
+ for match in matches:
+ self._known_path_list.remove(match)
+ for w_match in w_matches:
+ self._withdraw_list.remove(w_match)
+
+ def _remove_old_paths(self):
+ """Identifies which of known paths are old and removes them.
+
+ Known paths will no longer have paths whose new version is present in
+ new paths.
+ """
+ new_paths = self._new_path_list
+ known_paths = self._known_path_list
+ for new_path in new_paths:
+ old_paths = []
+ for path in known_paths:
+ # Here we just check if source is same and not check if path
+ # version num. as new_paths are implicit withdrawal of old
+ # paths and when doing RouteRefresh (not EnhancedRouteRefresh)
+ # we get same paths again.
+ if new_path.source == path.source:
+ old_paths.append(path)
+ break
+
+ for old_path in old_paths:
+ known_paths.remove(old_path)
+ LOG.debug('Implicit withdrawal of old path, since we have'
+ ' learned new path from same source: %s' % old_path)
+
+ def _compute_best_known_path(self):
+ """Computes the best path among known paths.
+
+ Returns current best path among `known_paths`.
+ """
+ if not self._known_path_list:
+ from ryu.services.protocols.bgp.processor import BgpProcessorError
+ raise BgpProcessorError(desc='Need at-least one known path to'
+ ' compute best path')
+
+ # We pick the first path as current best path. This helps in breaking
+ # tie between two new paths learned in one cycle for which best-path
+ # calculation steps lead to tie.
+ current_best_path = self._known_path_list[0]
+ best_path_reason = BPR_ONLY_PATH
+ for next_path in self._known_path_list[1:]:
+ from ryu.services.protocols.bgp.processor import compute_best_path
+ # Compare next path with current best path.
+ new_best_path, reason = \
+ compute_best_path(self._core_service.asn, current_best_path,
+ next_path)
+ best_path_reason = reason
+ if new_best_path is not None:
+ current_best_path = new_best_path
+
+ return current_best_path, best_path_reason
+
+ def withdraw_unintresting_paths(self, interested_rts):
+ """Withdraws paths that are no longer interesting.
+
+ For all known paths that do not have any route target in common with
+ given `interested_rts` we add a corresponding withdraw.
+
+ Returns True if we added any withdraws.
+ """
+ add_withdraws = False
+ for path in self._known_path_list:
+ if not path.has_rts_in(interested_rts):
+ self.withdraw_path(path)
+ add_withdraws = True
+ return add_withdraws
+
+ def withdraw_path(self, path):
+ if path not in self.known_path_list:
+ raise ValueError("Path not known, no need to withdraw")
+ withdraw = path.clone(for_withdrawal=True)
+ self._withdraw_list.append(withdraw)
+
+ def to_dict(self):
+ return {'table': str(self._table),
+ 'nlri': str(self._nlri),
+ 'paths': self._known_path_list[:],
+ 'withdraws': self._get_num_withdraws()}
+
+ def __str__(self):
+ return ('Destination(table: %s, nlri: %s, paths: %s, withdraws: %s,'
+ ' new paths: %s)' % (self._table, str(self._nlri),
+ len(self._known_path_list),
+ len(self._withdraw_list),
+ len(self._new_path_list)))
+
+ def _get_num_valid_paths(self):
+ return len(self._known_path_list)
+
+ def _get_num_withdraws(self):
+ return len(self._withdraw_list)
+
+
+class Path(object):
+ """Represents a way of reaching an IP destination.
+
+ Also contains other meta-data given to us by a specific source (such as a
+ peer).
+ """
+ __metaclass__ = ABCMeta
+ __slots__ = ('_source', '_path_attr_map', '_nlri', '_source_version_num',
+ '_exported_from', '_nexthop', 'next_path', 'prev_path',
+ '_is_withdraw', 'med_set_by_target_neighbor')
+ ROUTE_FAMILY = RF_IPv4_UC
+
+ def __init__(self, source, nlri, src_ver_num, pattrs=None, nexthop=None,
+ is_withdraw=False, med_set_by_target_neighbor=False):
+ """Initializes Ipv4 path.
+
+ If this path is not a withdraw, then path attribute and nexthop both
+ should be provided.
+ Parameters:
+ - `source`: (Peer/str) source of this path.
+ - `nlri`: (Vpnv4) Nlri instance for Vpnv4 route family.
+ - `src_ver_num`: (int) version number of *source* when this path
+ was learned.
+ - `pattrs`: (OrderedDict) various path attributes for this path.
+ - `nexthop`: (str) nexthop advertised for this path.
+ - `is_withdraw`: (bool) True if this represents a withdrawal.
+ """
+ self.med_set_by_target_neighbor = med_set_by_target_neighbor
+ if nlri.route_family != self.__class__.ROUTE_FAMILY:
+ raise ValueError('NLRI and Path route families do not'
+ ' match (%s, %s).' %
+ (nlri.route_family, self.__class__.ROUTE_FAMILY))
+
+ # Currently paths injected directly into VRF has only one source
+ # src_peer can be None to denote NC else has to be instance of Peer.
+ # Paths can be exported from one VRF and then imported into another
+ # VRF, in such cases it source is denoted as string VPN_TABLE.
+ if not (source is None or
+ hasattr(source, 'version_num') or
+ source in (VRF_TABLE, VPN_TABLE)):
+ raise ValueError('Invalid or Unsupported source for path: %s' %
+ source)
+
+ # If this path is not a withdraw path, than it should have path-
+ # attributes and nexthop.
+ if not is_withdraw and not (pattrs and nexthop):
+ raise ValueError('Need to provide nexthop and patattrs '
+ 'for path that is not a withdraw.')
+
+ # The entity (peer) that gave us this path.
+ self._source = source
+
+ # Path attribute of this path.
+ if pattrs:
+ self._path_attr_map = copy(pattrs)
+ else:
+ self._path_attr_map = OrderedDict()
+
+ # NLRI that this path represents.
+ self._nlri = nlri
+
+ # If given nlri is withdrawn.
+ self._is_withdraw = is_withdraw
+
+ # @see Source.version_num
+ self._source_version_num = src_ver_num
+
+ self._nexthop = nexthop
+
+ # Automatically generated.
+ #
+ # self.next_path
+ # self.prev_path
+
+ # The Destination from which this path was exported, if any.
+ self._exported_from = None
+
+ @property
+ def source_version_num(self):
+ return self._source_version_num
+
+ @property
+ def source(self):
+ return self._source
+
+ @property
+ def route_family(self):
+ return self.__class__.ROUTE_FAMILY
+
+ @property
+ def nlri(self):
+ return self._nlri
+
+ @property
+ def is_withdraw(self):
+ return self._is_withdraw
+
+ @property
+ def pathattr_map(self):
+ return copy(self._path_attr_map)
+
+ @property
+ def nexthop(self):
+ return self._nexthop
+
+ def get_pattr(self, pattr_type, default=None):
+ """Returns path attribute of given type.
+
+ Returns None if we do not attribute of type *pattr_type*.
+ """
+ return self._path_attr_map.get(pattr_type, default)
+
+ def clone(self, for_withdrawal=False):
+ pathattrs = None
+ if not for_withdrawal:
+ pathattrs = self.pathattr_map
+ clone = self.__class__(
+ self.source,
+ self.nlri,
+ self.source_version_num,
+ pattrs=pathattrs,
+ nexthop=self.nexthop,
+ is_withdraw=for_withdrawal
+ )
+ return clone
+
+ def get_rts(self):
+ extcomm_attr = self._path_attr_map.get(ExtCommunity.ATTR_NAME)
+ if extcomm_attr is None:
+ rts = []
+ else:
+ rts = extcomm_attr.rt_list[:]
+ return rts
+
+ def has_rts_in(self, interested_rts):
+ """Returns True if this `Path` has any `ExtCommunity` attribute
+ route target common with `interested_rts`.
+ """
+ assert isinstance(interested_rts, set)
+ curr_rts = self.get_rts()
+ # Add default RT to path RTs so that we match interest for peers who
+ # advertised default RT
+ curr_rts.append(RtNlri.DEFAULT_RT)
+
+ return not interested_rts.isdisjoint(curr_rts)
+
+ def __str__(self):
+ return (
+ 'Path(source: %s, nlri: %s, source ver#: %s, '
+ 'path attrs.: %s, nexthop: %s, is_withdraw: %s)' %
+ (
+ self._source, self._nlri, self._source_version_num,
+ self._path_attr_map, self._nexthop, self._is_withdraw
+ )
+ )
+
+ def __repr__(self):
+ return ('Path(%s, %s, %s, %s, %s, %s)' % (
+ self._source, self._nlri, self._source_version_num,
+ self._path_attr_map, self._nexthop, self._is_withdraw))
diff --git a/ryu/services/protocols/bgp/info_base/rtc.py b/ryu/services/protocols/bgp/info_base/rtc.py
new file mode 100644
index 00000000..fd65b13e
--- /dev/null
+++ b/ryu/services/protocols/bgp/info_base/rtc.py
@@ -0,0 +1,73 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Defines data types and models required specifically for RTC support.
+"""
+
+import logging
+
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_RTC_UC
+
+from ryu.services.protocols.bgp.info_base.base import Destination
+from ryu.services.protocols.bgp.info_base.base import NonVrfPathProcessingMixin
+from ryu.services.protocols.bgp.info_base.base import Path
+from ryu.services.protocols.bgp.info_base.base import Table
+
+LOG = logging.getLogger('bgpspeaker.info_base.rtc')
+
+
+class RtcTable(Table):
+ """Global table to store RT membership information.
+
+ Uses `RtDest` to store destination information for each known RT NLRI path.
+ """
+ ROUTE_FAMILY = RF_RTC_UC
+
+ def __init__(self, core_service, signal_bus):
+ Table.__init__(self, None, core_service, signal_bus)
+
+ def _table_key(self, rtc_nlri):
+ """Return a key that will uniquely identify this RT NLRI inside
+ this table.
+ """
+ return str(rtc_nlri.origin_as) + ':' + rtc_nlri.route_target
+
+ def _create_dest(self, nlri):
+ return RtcDest(self, nlri)
+
+ def __str__(self):
+ return 'RtcTable(scope_id: %s, rf: %s)' % (self.scope_id,
+ self.route_family)
+
+
+class RtcDest(Destination, NonVrfPathProcessingMixin):
+ ROUTE_FAMILY = RF_RTC_UC
+
+ def _new_best_path(self, new_best_path):
+ NonVrfPathProcessingMixin._new_best_path(self, new_best_path)
+
+ def _best_path_lost(self):
+ NonVrfPathProcessingMixin._best_path_lost(self)
+
+
+class RtcPath(Path):
+ ROUTE_FAMILY = RF_RTC_UC
+
+ def __init__(self, source, nlri, src_ver_num, pattrs=None,
+ nexthop='0.0.0.0', is_withdraw=False,
+ med_set_by_target_neighbor=False):
+ Path.__init__(self, source, nlri, src_ver_num, pattrs, nexthop,
+ is_withdraw, med_set_by_target_neighbor)
diff --git a/ryu/services/protocols/bgp/info_base/vpn.py b/ryu/services/protocols/bgp/info_base/vpn.py
new file mode 100644
index 00000000..3b0c81f2
--- /dev/null
+++ b/ryu/services/protocols/bgp/info_base/vpn.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Defines base data types and models required specifically for VPN support.
+"""
+
+import abc
+import logging
+
+from ryu.services.protocols.bgp.info_base.base import Destination
+from ryu.services.protocols.bgp.info_base.base import NonVrfPathProcessingMixin
+from ryu.services.protocols.bgp.info_base.base import Path
+from ryu.services.protocols.bgp.info_base.base import Table
+
+LOG = logging.getLogger('bgpspeaker.info_base.vpn')
+
+
+class VpnTable(Table):
+ """Global table to store VPNv4 routing information.
+
+ Uses `VpnvXDest` to store destination information for each known vpnvX
+ paths.
+ """
+ ROUTE_FAMILY = None
+ VPN_DEST_CLASS = None
+
+ def __init__(self, core_service, signal_bus):
+ super(VpnTable, self).__init__(None, core_service, signal_bus)
+
+ def _table_key(self, vpn_nlri):
+ """Return a key that will uniquely identify this vpnvX NLRI inside
+ this table.
+ """
+ return vpn_nlri.route_disc + ':' + vpn_nlri.prefix
+
+ def _create_dest(self, nlri):
+ return self.VPN_DEST_CLASS(self, nlri)
+
+ def __str__(self):
+ return '%s(scope_id: %s, rf: %s)' % (
+ self.__class__.__name__, self.scope_id, self.route_family
+ )
+
+
+class VpnPath(Path):
+ __metaclass__ = abc.ABCMeta
+ ROUTE_FAMILY = None
+ VRF_PATH_CLASS = None
+ NLRI_CLASS = None
+
+ def clone_to_vrf(self, is_withdraw=False):
+ vrf_nlri = self.NLRI_CLASS(self._nlri.prefix)
+
+ pathattrs = None
+ if not is_withdraw:
+ pathattrs = self.pathattr_map
+
+ vrf_path = self.VRF_PATH_CLASS(
+ self.VRF_PATH_CLASS.create_puid(
+ self._nlri.route_disc,
+ self._nlri.prefix
+ ),
+ self.source, vrf_nlri,
+ self.source_version_num,
+ pattrs=pathattrs,
+ nexthop=self.nexthop,
+ is_withdraw=is_withdraw,
+ label_list=self._nlri.label_list)
+ return vrf_path
+
+
+class VpnDest(Destination, NonVrfPathProcessingMixin):
+ """Base class for VPN destinations."""
+
+ __metaclass__ = abc.ABCMeta
+
+ def _best_path_lost(self):
+ old_best_path = self._best_path
+ NonVrfPathProcessingMixin._best_path_lost(self)
+
+ # Best-path might have been imported into VRF tables, we have to
+ # withdraw from them, if the source is a peer.
+ if old_best_path:
+ withdraw_clone = old_best_path.clone(for_withdrawal=True)
+ tm = self._core_service.table_manager
+ tm.import_single_vpn_path_to_all_vrfs(
+ withdraw_clone, path_rts=old_best_path.get_rts()
+ )
+
+ def _new_best_path(self, best_path):
+ NonVrfPathProcessingMixin._new_best_path(self, best_path)
+
+ # Extranet feature requires that we import new best path into VRFs.
+ tm = self._core_service.table_manager
+ tm.import_single_vpn_path_to_all_vrfs(
+ self._best_path, self._best_path.get_rts())
diff --git a/ryu/services/protocols/bgp/info_base/vpnv4.py b/ryu/services/protocols/bgp/info_base/vpnv4.py
new file mode 100644
index 00000000..0785801f
--- /dev/null
+++ b/ryu/services/protocols/bgp/info_base/vpnv4.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Defines data types and models required specifically for VPNv4 support.
+"""
+
+import logging
+
+from ryu.services.protocols.bgp.protocols.bgp.nlri import Ipv4
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv4_VPN
+
+from ryu.services.protocols.bgp.info_base.vpn import VpnDest
+from ryu.services.protocols.bgp.info_base.vpn import VpnPath
+from ryu.services.protocols.bgp.info_base.vpn import VpnTable
+
+LOG = logging.getLogger('bgpspeaker.info_base.vpnv4')
+
+
+class Vpnv4Dest(VpnDest):
+ """VPNv4 Destination
+
+ Store IPv4 Paths.
+ """
+ ROUTE_FAMILY = RF_IPv4_VPN
+
+
+class Vpnv4Table(VpnTable):
+ """Global table to store VPNv4 routing information.
+
+ Uses `Vpnv4Dest` to store destination information for each known vpnv4
+ paths.
+ """
+ ROUTE_FAMILY = RF_IPv4_VPN
+ VPN_DEST_CLASS = Vpnv4Dest
+
+
+class Vpnv4Path(VpnPath):
+ """Represents a way of reaching an VPNv4 destination."""
+ ROUTE_FAMILY = RF_IPv4_VPN
+ VRF_PATH_CLASS = None # defined in init - anti cyclic import hack
+ NLRI_CLASS = Ipv4
+
+ def __init__(self, *args, **kwargs):
+ super(Vpnv4Path, self).__init__(*args, **kwargs)
+ from ryu.services.protocols.bgp.speaker.info_base.vrf4 import Vrf4Path
+ self.VRF_PATH_CLASS = Vrf4Path
diff --git a/ryu/services/protocols/bgp/info_base/vpnv6.py b/ryu/services/protocols/bgp/info_base/vpnv6.py
new file mode 100644
index 00000000..1ed11dbf
--- /dev/null
+++ b/ryu/services/protocols/bgp/info_base/vpnv6.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Defines data types and models required specifically for VPNv6 support.
+"""
+
+import logging
+
+from ryu.services.protocols.bgp.protocols.bgp.nlri import Ipv6
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv6_VPN
+
+from ryu.services.protocols.bgp.info_base.vpn import VpnDest
+from ryu.services.protocols.bgp.info_base.vpn import VpnPath
+from ryu.services.protocols.bgp.info_base.vpn import VpnTable
+
+LOG = logging.getLogger('bgpspeaker.info_base.vpnv6')
+
+
+class Vpnv6Dest(VpnDest):
+ """VPNv6 destination
+
+ Stores IPv6 paths.
+ """
+ ROUTE_FAMILY = RF_IPv6_VPN
+
+
+class Vpnv6Table(VpnTable):
+ """Global table to store VPNv6 routing information
+
+ Uses `Vpnv6Dest` to store destination information for each known vpnv6
+ paths.
+ """
+ ROUTE_FAMILY = RF_IPv6_VPN
+ VPN_DEST_CLASS = Vpnv6Dest
+
+
+class Vpnv6Path(VpnPath):
+ """Represents a way of reaching an VPNv4 destination."""
+ ROUTE_FAMILY = RF_IPv6_VPN
+ VRF_PATH_CLASS = None # defined in init - anti cyclic import hack
+ NLRI_CLASS = Ipv6
+
+ def __init__(self, *args, **kwargs):
+ super(Vpnv6Path, self).__init__(*args, **kwargs)
+ from ryu.services.protocols.bgp.speaker.info_base.vrf6 import Vrf6Path
+ self.VRF_PATH_CLASS = Vrf6Path
diff --git a/ryu/services/protocols/bgp/info_base/vrf.py b/ryu/services/protocols/bgp/info_base/vrf.py
new file mode 100644
index 00000000..b2618a9d
--- /dev/null
+++ b/ryu/services/protocols/bgp/info_base/vrf.py
@@ -0,0 +1,530 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Defines base data types and models required specifically for VRF support.
+"""
+
+import abc
+import logging
+
+from ryu.services.protocols.bgp.base import OrderedDict
+from ryu.services.protocols.bgp.constants import VPN_TABLE
+from ryu.services.protocols.bgp.constants import VRF_TABLE
+from ryu.services.protocols.bgp.info_base.base import Destination
+from ryu.services.protocols.bgp.info_base.base import Path
+from ryu.services.protocols.bgp.info_base.base import Table
+from ryu.services.protocols.bgp.utils.stats import LOCAL_ROUTES
+from ryu.services.protocols.bgp.utils.stats import REMOTE_ROUTES
+from ryu.services.protocols.bgp.utils.stats import RESOURCE_ID
+from ryu.services.protocols.bgp.utils.stats import RESOURCE_NAME
+from ryu.services.protocols.bgp.protocols.bgp import pathattr
+
+LOG = logging.getLogger('bgpspeaker.info_base.vrf')
+
+
+class VrfTable(Table):
+ """Virtual Routing and Forwarding information base.
+ Keeps destination imported to given vrf in represents.
+ """
+
+ __metaclass__ = abc.ABCMeta
+ ROUTE_FAMILY = None
+ VPN_ROUTE_FAMILY = None
+ NLRI_CLASS = None
+ VRF_PATH_CLASS = None
+ VRF_DEST_CLASS = None
+
+ def __init__(self, vrf_conf, core_service, signal_bus):
+ Table.__init__(self, vrf_conf.route_dist, core_service, signal_bus)
+ self._vrf_conf = vrf_conf
+ self._import_maps = []
+ self.init_import_maps(vrf_conf.import_maps)
+
+ def init_import_maps(self, import_maps):
+ LOG.debug(
+ "Initializing import maps (%s) for %s" % (import_maps, repr(self))
+ )
+ del self._import_maps[:]
+ importmap_manager = self._core_service.importmap_manager
+ for name in import_maps:
+ import_map = importmap_manager.get_import_map_by_name(name)
+ if import_map is None:
+ raise KeyError('No import map with name %s' % name)
+ self._import_maps.append(import_map)
+
+ @property
+ def import_rts(self):
+ return self._vrf_conf.import_rts
+
+ @property
+ def vrf_conf(self):
+ return self._vrf_conf
+
+ def _table_key(self, nlri):
+ """Return a key that will uniquely identify this NLRI inside
+ this table.
+ """
+ return str(nlri)
+
+ def _create_dest(self, nlri):
+ return self.VRF_DEST_CLASS(self, nlri)
+
+ def append_import_map(self, import_map):
+ self._import_maps.append(import_map)
+
+ def remove_import_map(self, import_map):
+ self._import_maps.remove(import_map)
+
+ def get_stats_summary_dict(self):
+ """Returns count of local and remote paths."""
+
+ remote_route_count = 0
+ local_route_count = 0
+ for dest in self.itervalues():
+ for path in dest.known_path_list:
+ if (hasattr(path.source, 'version_num')
+ or path.source == VPN_TABLE):
+ remote_route_count += 1
+ else:
+ local_route_count += 1
+ return {RESOURCE_ID: self._vrf_conf.id,
+ RESOURCE_NAME: self._vrf_conf.name,
+ REMOTE_ROUTES: remote_route_count,
+ LOCAL_ROUTES: local_route_count}
+
+ def import_vpn_paths_from_table(self, vpn_table, import_rts=None):
+ for vpn_dest in vpn_table.itervalues():
+ vpn_path = vpn_dest.best_path
+ if not vpn_path:
+ continue
+
+ if import_rts is None:
+ import_rts = set(self.import_rts)
+ else:
+ import_rts = set(import_rts)
+
+ path_rts = vpn_path.get_rts()
+ if import_rts.intersection(path_rts):
+ # TODO(PH): When (re-)implementing extranet, check what should
+ # be the label reported back to NC for local paths coming from
+ # other VRFs.
+ self.import_vpn_path(vpn_path)
+
+ def import_vpn_path(self, vpn_path):
+ """Imports `vpnv(4|6)_path` into `vrf(4|6)_table`.
+
+ :Parameters:
+ - `vpn_path`: (Path) VPN path that will be cloned and imported
+ into VRF.
+ Note: Does not do any checking if this import is valid.
+ """
+ assert vpn_path.route_family == self.VPN_ROUTE_FAMILY
+ # If source of given vpnv4 path is NC we import it to given VRF
+ # table because of extranet setting. Hence we identify source of
+ # EXTRANET prefixes as VRF_TABLE, else VPN_TABLE.
+ source = vpn_path.source
+ if not source:
+ source = VRF_TABLE
+
+ vrf_nlri = self.NLRI_CLASS(vpn_path.nlri.prefix)
+
+ vpn_nlri = vpn_path.nlri
+ puid = self.VRF_PATH_CLASS.create_puid(vpn_nlri.route_disc,
+ vpn_nlri.prefix)
+ vrf_path = self.VRF_PATH_CLASS(
+ puid,
+ source,
+ vrf_nlri,
+ vpn_path.source_version_num,
+ pattrs=vpn_path.pathattr_map,
+ nexthop=vpn_path.nexthop,
+ is_withdraw=vpn_path.is_withdraw,
+ label_list=vpn_path.nlri.label_list
+ )
+ if self._is_vrf_path_already_in_table(vrf_path):
+ return None
+
+ if self._is_vrf_path_filtered_out_by_import_maps(vrf_path):
+ return None
+ else:
+ vrf_dest = self.insert(vrf_path)
+ self._signal_bus.dest_changed(vrf_dest)
+
+ def _is_vrf_path_filtered_out_by_import_maps(self, vrf_path):
+ for import_map in self._import_maps:
+ if import_map.match(vrf_path):
+ return True
+
+ return False
+
+ def _is_vrf_path_already_in_table(self, vrf_path):
+ dest = self._get_dest(vrf_path.nlri)
+ if dest is None:
+ return False
+ return vrf_path in dest.known_path_list
+
+ def apply_import_maps(self):
+ changed_dests = []
+ for dest in self.itervalues():
+ assert isinstance(dest, VrfDest)
+ for import_map in self._import_maps:
+ for path in dest.known_path_list:
+ if import_map.match(path):
+ dest.withdraw_path(path)
+ changed_dests.append(dest)
+ return changed_dests
+
+ def insert_vrf_path(self, ip_nlri, next_hop=None,
+ gen_lbl=False, is_withdraw=False):
+ assert ip_nlri
+ pattrs = None
+ label_list = []
+ vrf_conf = self.vrf_conf
+ if not is_withdraw:
+ # Create a dictionary for path-attrs.
+ pattrs = OrderedDict()
+
+ # MpReachNlri and/or MpUnReachNlri attribute info. is contained
+ # in the path. Hence we do not add these attributes here.
+ from ryu.services.protocols.bgp.core import EXPECTED_ORIGIN
+
+ pattrs[pathattr.Origin.ATTR_NAME] = \
+ pathattr.Origin(EXPECTED_ORIGIN)
+ pattrs[pathattr.AsPath.ATTR_NAME] = pathattr.AsPath([])
+ pattrs[pathattr.ExtCommunity.ATTR_NAME] = pathattr.ExtCommunity(
+ rt_list=vrf_conf.export_rts, soo_list=vrf_conf.soo_list)
+ if vrf_conf.multi_exit_disc:
+ pattrs[pathattr.Med.ATTR_NAME] = pathattr.Med(
+ vrf_conf.multi_exit_disc
+ )
+
+ table_manager = self._core_service.table_manager
+ if gen_lbl and next_hop:
+ # Label per next_hop demands we use a different label
+ # per next_hop. Here connected interfaces are advertised per
+ # VRF.
+ label_key = (vrf_conf.route_dist, next_hop)
+ nh_label = table_manager.get_nexthop_label(label_key)
+ if not nh_label:
+ nh_label = table_manager.get_next_vpnv4_label()
+ table_manager.set_nexthop_label(label_key, nh_label)
+ label_list.append(nh_label)
+
+ elif gen_lbl:
+ # If we do not have next_hop, get a new label.
+ label_list.append(table_manager.get_next_vpnv4_label())
+
+ puid = self.VRF_PATH_CLASS.create_puid(
+ vrf_conf.route_dist, ip_nlri.prefix
+ )
+ path = self.VRF_PATH_CLASS(
+ puid, None, ip_nlri, 0, pattrs=pattrs,
+ nexthop=next_hop, label_list=label_list,
+ is_withdraw=is_withdraw
+ )
+
+ # Insert the path into VRF table, get affected destination so that we
+ # can process it further.
+ eff_dest = self.insert(path)
+ # Enqueue the eff_dest for further processing.
+ self._signal_bus.dest_changed(eff_dest)
+ return label_list
+
+ def clean_uninteresting_paths(self, interested_rts=None):
+ if interested_rts is None:
+ interested_rts = set(self.vrf_conf.import_rts)
+ return super(VrfTable, self).clean_uninteresting_paths(interested_rts)
+
+
+class VrfDest(Destination):
+ """Base class for VRF destination."""
+ __metaclass__ = abc.ABCMeta
+
+ def __init__(self, table, nlri):
+ super(VrfDest, self).__init__(table, nlri)
+ self._route_disc = self._table.vrf_conf.route_dist
+
+ def _best_path_lost(self):
+ # Have to send update messages for withdraw of best-path to Network
+ # controller or Global table.
+ old_best_path = self._best_path
+ self._best_path = None
+
+ if old_best_path is None:
+ return
+
+ if old_best_path.source is not None:
+ # Send update-withdraw msg. to Sink. Create withdraw path
+ # out of old best path and queue it into flexinet sinks.
+ old_best_path = old_best_path.clone(for_withdrawal=True)
+ self._core_service.update_flexinet_peers(old_best_path,
+ self._route_disc)
+ else:
+ # Create withdraw-path out of old best path.
+ gpath = old_best_path.clone_to_vpn(self._route_disc,
+ for_withdrawal=True)
+ # Insert withdraw into global table and enqueue the destination
+ # for further processing.
+ tm = self._core_service.table_manager
+ tm.learn_path(gpath)
+
+ def _new_best_path(self, best_path):
+ LOG.debug('New best path selected for destination %s' % (self))
+
+ old_best_path = self._best_path
+ assert (best_path != old_best_path)
+ self._best_path = best_path
+ # Distribute new best-path to flexinet-peers.
+ if best_path.source is not None:
+ # Since route-refresh just causes the version number to
+ # go up and this changes best-path, we check if new-
+ # best-path is really different than old-best-path that
+ # warrants sending update to flexinet peers.
+
+ def really_diff():
+ old_labels = old_best_path.label_list
+ new_labels = best_path.label_list
+ return old_best_path.nexthop != best_path.nexthop \
+ or set(old_labels) != set(new_labels)
+
+ if not old_best_path or (old_best_path and really_diff()):
+ # Create OutgoingRoute and queue it into NC sink.
+ self._core_service.update_flexinet_peers(
+ best_path, self._route_disc
+ )
+ else:
+ # If NC is source, we create new path and insert into global
+ # table.
+ gpath = best_path.clone_to_vpn(self._route_disc)
+ tm = self._core_service.table_manager
+ tm.learn_path(gpath)
+ LOG.debug('VRF table %s has new best path: %s' %
+ (self._route_disc, self.best_path))
+
+ def _remove_withdrawals(self):
+ """Removes withdrawn paths.
+
+ Note:
+ We may have disproportionate number of withdraws compared to know paths
+ since not all paths get installed into the table due to bgp policy and
+ we can receive withdraws for such paths and withdrawals may not be
+ stopped by the same policies.
+ """
+
+ LOG.debug('Removing %s withdrawals' % len(self._withdraw_list))
+
+ # If we have not withdrawals, we have nothing to do.
+ if not self._withdraw_list:
+ return
+
+ # If we have some withdrawals and no know-paths, it means it is safe to
+ # delete these withdraws.
+ if not self._known_path_list:
+ LOG.debug('Found %s withdrawals for path(s) that did not get'
+ ' installed.' % len(self._withdraw_list))
+ del (self._withdraw_list[:])
+ return
+
+ # If we have some known paths and some withdrawals, we find matches and
+ # delete them first.
+ matches = []
+ w_matches = []
+ # Match all withdrawals from destination paths.
+ for withdraw in self._withdraw_list:
+ match = None
+ for path in self._known_path_list:
+ # We have a match if the source are same.
+ if path.puid == withdraw.puid:
+ match = path
+ matches.append(path)
+ w_matches.append(withdraw)
+ # One withdraw can remove only one path.
+ break
+ # We do no have any match for this withdraw.
+ if not match:
+ LOG.debug('No matching path for withdraw found, may be path '
+ 'was not installed into table: %s' %
+ withdraw)
+ # If we have partial match.
+ if len(matches) != len(self._withdraw_list):
+ LOG.debug('Did not find match for some withdrawals. Number of '
+ 'matches(%s), number of withdrawals (%s)' %
+ (len(matches), len(self._withdraw_list)))
+
+ # Clear matching paths and withdrawals.
+ for match in matches:
+ self._known_path_list.remove(match)
+ for w_match in w_matches:
+ self._withdraw_list.remove(w_match)
+
+ def _remove_old_paths(self):
+ """Identifies which of known paths are old and removes them.
+
+ Known paths will no longer have paths whose new version is present in
+ new paths.
+ """
+ new_paths = self._new_path_list
+ known_paths = self._known_path_list
+ for new_path in new_paths:
+ old_paths = []
+ for path in known_paths:
+ # Here we just check if source is same and not check if path
+ # version num. as new_paths are implicit withdrawal of old
+ # paths and when doing RouteRefresh (not EnhancedRouteRefresh)
+ # we get same paths again.
+ if (new_path.puid == path.puid):
+ old_paths.append(path)
+ break
+
+ for old_path in old_paths:
+ known_paths.remove(old_path)
+ LOG.debug('Implicit withdrawal of old path, since we have'
+ ' learned new path from same source: %s' % old_path)
+
+ def _validate_path(self, path):
+ if not path or not hasattr(path, 'label_list'):
+ raise ValueError('Invalid value of path. Expected type '
+ 'with attribute label_list got %s' % path)
+
+
+class VrfPath(Path):
+ """Represents a way of reaching an IP destination with a VPN.
+ """
+ __slots__ = ('_label_list', '_puid')
+ __metaclass__ = abc.ABCMeta
+
+ ROUTE_FAMILY = None
+ VPN_PATH_CLASS = None
+ VPN_NLRI_CLASS = None
+
+ def __init__(self, puid, source, nlri, src_ver_num,
+ pattrs=None, nexthop=None,
+ is_withdraw=False, label_list=None):
+ """Initializes a Vrf path.
+
+ Parameters:
+ - `puid`: (str) path ID, identifies VPN path from which this
+ VRF path was imported.
+ - `label_list`: (list) List of labels for this path.
+ Note: other parameters are as documented in super class.
+ """
+ Path.__init__(self, source, nlri, src_ver_num, pattrs, nexthop,
+ is_withdraw)
+ if label_list is None:
+ label_list = []
+ self._label_list = label_list
+ self._puid = puid
+
+ @property
+ def puid(self):
+ return self._puid
+
+ @property
+ def origin_rd(self):
+ tokens = self.puid.split(':')
+ return tokens[0] + ':' + tokens[1]
+
+ @property
+ def label_list(self):
+ return self._label_list[:]
+
+ @staticmethod
+ def create_puid(route_disc, ip_prefix):
+ assert route_disc and ip_prefix
+ return route_disc + ':' + ip_prefix
+
+ def clone(self, for_withdrawal=False):
+ pathattrs = None
+ if not for_withdrawal:
+ pathattrs = self.pathattr_map
+
+ clone = self.__class__(
+ self.puid,
+ self._source,
+ self.nlri,
+ self.source_version_num,
+ pattrs=pathattrs,
+ nexthop=self.nexthop,
+ is_withdraw=for_withdrawal,
+ label_list=self.label_list
+ )
+ return clone
+
+ def clone_to_vpn(self, route_disc, for_withdrawal=False):
+ vpn_nlri = self.VPN_NLRI_CLASS(
+ self.label_list, route_disc, self._nlri.prefix
+ )
+
+ pathattrs = None
+ if not for_withdrawal:
+ pathattrs = self.pathattr_map
+ vpnv_path = self.VPN_PATH_CLASS(
+ self.source, vpn_nlri,
+ self.source_version_num,
+ pattrs=pathattrs,
+ nexthop=self.nexthop,
+ is_withdraw=for_withdrawal
+ )
+ return vpnv_path
+
+ def __eq__(self, b_path):
+ if not isinstance(b_path, self.__class__):
+ return False
+ if not self.route_family == b_path.route_family:
+ return False
+ if not self.puid == b_path.puid:
+ return False
+ if not self.label_list == b_path.label_list:
+ return False
+ if not self.nexthop == b_path.nexthop:
+ return False
+ if not self.pathattr_map == b_path.pathattr_map:
+ return False
+
+ return True
+
+
+class ImportMap(object):
+ def match(self, vrf_path):
+ raise NotImplementedError()
+
+
+class VrfNlriImportMap(ImportMap):
+ VRF_PATH_CLASS = None
+ NLRI_CLASS = None
+
+ def __init__(self, prefix):
+ assert self.VRF_PATH_CLASS is not None
+ assert self.NLRI_CLASS is not None
+ self._nlri = self.NLRI_CLASS(prefix)
+
+ def match(self, vrf_path):
+ if vrf_path.route_family != self.VRF_PATH_CLASS.ROUTE_FAMILY:
+ LOG.error(
+ "vrf_paths route_family doesn\'t match importmaps"
+ "route_family. Applied to wrong table?")
+ return False
+
+ return vrf_path.nlri == self._nlri
+
+
+class VrfRtImportMap(ImportMap):
+ def __init__(self, rt):
+ self._rt = rt
+
+ def match(self, vrf_path):
+ extcomm = vrf_path.pathattr_map.get('extcommunity')
+ return extcomm is not None and self._rt in extcomm.rt_list
diff --git a/ryu/services/protocols/bgp/info_base/vrf4.py b/ryu/services/protocols/bgp/info_base/vrf4.py
new file mode 100644
index 00000000..4e2d7f2f
--- /dev/null
+++ b/ryu/services/protocols/bgp/info_base/vrf4.py
@@ -0,0 +1,60 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Defines data types and models required specifically for VRF (for IPv4)
+ support. Represents data structures for VRF not VPN/global.
+ (Inside VRF you have IPv4 prefixes and inside VPN you have VPNv4 prefixes)
+"""
+
+import logging
+
+from ryu.services.protocols.bgp.protocols.bgp.nlri import Ipv4
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv4_UC
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv4_VPN
+from ryu.services.protocols.bgp.protocols.bgp.nlri import Vpnv4
+
+from ryu.services.protocols.bgp.info_base.vpnv4 import Vpnv4Path
+from ryu.services.protocols.bgp.info_base.vrf import VrfDest
+from ryu.services.protocols.bgp.info_base.vrf import VrfNlriImportMap
+from ryu.services.protocols.bgp.info_base.vrf import VrfPath
+from ryu.services.protocols.bgp.info_base.vrf import VrfTable
+
+LOG = logging.getLogger('bgpspeaker.info_base.vrf4')
+
+
+class Vrf4Path(VrfPath):
+ """Represents a way of reaching an IP destination with a VPN."""
+ ROUTE_FAMILY = RF_IPv4_UC
+ VPN_PATH_CLASS = Vpnv4Path
+ VPN_NLRI_CLASS = Vpnv4
+
+
+class Vrf4Dest(VrfDest):
+ ROUTE_FAMILY = RF_IPv4_UC
+
+
+class Vrf4Table(VrfTable):
+ """Virtual Routing and Forwarding information base for IPv4."""
+ ROUTE_FAMILY = RF_IPv4_UC
+ VPN_ROUTE_FAMILY = RF_IPv4_VPN
+ NLRI_CLASS = Ipv4
+ VRF_PATH_CLASS = Vrf4Path
+ VRF_DEST_CLASS = Vrf4Dest
+
+
+class Vrf4NlriImportMap(VrfNlriImportMap):
+ VRF_PATH_CLASS = Vrf4Path
+ NLRI_CLASS = Ipv4
diff --git a/ryu/services/protocols/bgp/info_base/vrf6.py b/ryu/services/protocols/bgp/info_base/vrf6.py
new file mode 100644
index 00000000..c011d79f
--- /dev/null
+++ b/ryu/services/protocols/bgp/info_base/vrf6.py
@@ -0,0 +1,61 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Defines data types and models required specifically for VRF (for IPv6)
+ support. Represents data structures for VRF not VPN/global.
+ (Inside VRF you have IPv4 prefixes and inside VPN you have VPNv6 prefixes)
+"""
+
+import logging
+
+from ryu.services.protocols.bgp.protocols.bgp.nlri import Ipv6
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv6_UC
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv6_VPN
+from ryu.services.protocols.bgp.protocols.bgp.nlri import Vpnv6
+
+from ryu.services.protocols.bgp.info_base.vpnv6 import Vpnv6Path
+from ryu.services.protocols.bgp.info_base.vrf import VrfDest
+from ryu.services.protocols.bgp.info_base.vrf import VrfNlriImportMap
+from ryu.services.protocols.bgp.info_base.vrf import VrfPath
+from ryu.services.protocols.bgp.info_base.vrf import VrfTable
+
+LOG = logging.getLogger('bgpspeaker.info_base.vrf6')
+
+
+class Vrf6Path(VrfPath):
+ """Represents a way of reaching an IP destination with a VPN."""
+ ROUTE_FAMILY = RF_IPv6_UC
+ VPN_PATH_CLASS = Vpnv6Path
+ VPN_NLRI_CLASS = Vpnv6
+
+
+class Vrf6Dest(VrfDest):
+ """Destination for IPv6 VRFs."""
+ ROUTE_FAMILY = RF_IPv6_UC
+
+
+class Vrf6Table(VrfTable):
+ """Virtual Routing and Forwarding information base for IPv6."""
+ ROUTE_FAMILY = RF_IPv6_UC
+ VPN_ROUTE_FAMILY = RF_IPv6_VPN
+ NLRI_CLASS = Ipv6
+ VRF_PATH_CLASS = Vrf6Path
+ VRF_DEST_CLASS = Vrf6Dest
+
+
+class Vrf6NlriImportMap(VrfNlriImportMap):
+ VRF_PATH_CLASS = Vrf6Path
+ NLRI_CLASS = Ipv6
diff --git a/ryu/services/protocols/bgp/model.py b/ryu/services/protocols/bgp/model.py
new file mode 100644
index 00000000..f7e55f16
--- /dev/null
+++ b/ryu/services/protocols/bgp/model.py
@@ -0,0 +1,148 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ Defines some model classes related BGP.
+
+ These class include types used in saving information sent/received over BGP
+ sessions.
+"""
+import logging
+
+
+LOG = logging.getLogger('bgpspeaker.model')
+
+
+class Counter(object):
+ """Simple counter for keeping count of several keys."""
+ def __init__(self):
+ self._counters = {}
+
+ def incr(self, counter_name, incr_by=1):
+ self._counters[counter_name] = \
+ self._counters.get(counter_name, 0) + incr_by
+
+ def get_count(self, counter_name):
+ return self._counters.get(counter_name, 0)
+
+ def get_counters(self):
+ return self._counters.copy()
+
+
+class OutgoingRoute(object):
+ """Holds state about a route that is queued for being sent to a given sink.
+ """
+
+ __slots__ = ('_path', '_for_route_refresh',
+ 'sink', 'next_outgoing_route', 'prev_outgoing_route',
+ 'next_sink_out_route', 'prev_sink_out_route')
+
+ def __init__(self, path, for_route_refresh=False):
+ assert(path)
+
+ self.sink = None
+
+ self._path = path
+
+ # Is this update in response for route-refresh request.
+ # No sent-route is queued for the destination for this update.
+ self._for_route_refresh = for_route_refresh
+
+ # Automatically generated, for list off of Destination.
+ #
+ # self.next_outgoing_route
+ # self.prev_outgoing_route
+
+ # Automatically generated for list off of sink.
+ #
+ # self.next_sink_out_route
+ # self.prev_sink_out_route
+
+ @property
+ def path(self):
+ return self._path
+
+ @property
+ def for_route_refresh(self):
+ return self._for_route_refresh
+
+ def __str__(self):
+ return ('OutgoingRoute(path: %s, for_route_refresh: %s)' %
+ (self.path, self.for_route_refresh))
+
+
+class FlexinetOutgoingRoute(object):
+ """Holds state about a route that is queued for being sent to a given sink.
+
+ In this case the sink is flexinet peer and this route information is from
+ a VRF which holds Ipv4(v6) NLRIs.
+ """
+
+ __slots__ = ('_path', 'sink', 'next_outgoing_route', 'prev_outgoing_route',
+ 'next_sink_out_route', 'prev_sink_out_route', '_route_disc')
+
+ def __init__(self, path, route_disc):
+ from ryu.services.protocols.bgp.speaker.info_base.vrf4 import Vrf4Path
+ from ryu.services.protocols.bgp.speaker.info_base.vrf6 import Vrf6Path
+ assert path.route_family in (Vrf4Path.ROUTE_FAMILY,
+ Vrf6Path.ROUTE_FAMILY)
+
+ self.sink = None
+ self._path = path
+ self._route_disc = route_disc
+
+ # Automatically generated, for list off of Destination.
+ #
+ # self.next_outgoing_route
+ # self.prev_outgoing_route
+
+ # Automatically generated for list off of sink.
+ #
+ # self.next_sink_out_route
+ # self.prev_sink_out_route
+
+ @property
+ def path(self):
+ return self._path
+
+ @property
+ def route_disc(self):
+ return self._route_disc
+
+ def __str__(self):
+ return ('FlexinetOutgoingRoute(path: %s, route_disc: %s)' %
+ (self.path, self.route_disc))
+
+
+class SentRoute(object):
+ """Holds the information that has been sent to one or more sinks
+ about a particular BGP destination.
+ """
+
+ def __init__(self, path, peer):
+ assert(path and hasattr(peer, 'version_num'))
+
+ self.path = path
+
+ # Peer to which this path was sent.
+ self._sent_peer = peer
+
+ # Automatically generated.
+ #
+ # self.next_sent_route
+ # self.prev_sent_route
+
+ @property
+ def sent_peer(self):
+ return self._sent_peer
diff --git a/ryu/services/protocols/bgp/net_ctrl.py b/ryu/services/protocols/bgp/net_ctrl.py
new file mode 100644
index 00000000..5acccebe
--- /dev/null
+++ b/ryu/services/protocols/bgp/net_ctrl.py
@@ -0,0 +1,397 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Network Controller interface to BGP.
+
+ Network controller w.r.t BGPS for APGW Automation project is named as APGW
+ Agent and Route Server.
+"""
+import logging
+import socket
+import traceback
+
+from ryu.services.protocols.bgp import api
+from ryu.services.protocols.bgp.api.base import ApiException
+from ryu.services.protocols.bgp.api.base import NEXT_HOP
+from ryu.services.protocols.bgp.api.base import ORIGIN_RD
+from ryu.services.protocols.bgp.api.base import PREFIX
+from ryu.services.protocols.bgp.api.base import ROUTE_DISTINGUISHER
+from ryu.services.protocols.bgp.api.base import VPN_LABEL
+from ryu.services.protocols.bgp.base import Activity
+from ryu.services.protocols.bgp.base import add_bgp_error_metadata
+from ryu.services.protocols.bgp.base import BGPSException
+from ryu.services.protocols.bgp.base import FlexinetPeer
+from ryu.services.protocols.bgp.base import NET_CTRL_ERROR_CODE
+from ryu.services.protocols.bgp.constants import VRF_TABLE
+from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF
+from ryu.services.protocols.bgp.rtconf.vrfs import VrfConf
+from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4
+
+
+# Logger instance for this module.
+LOG = logging.getLogger('bgpspeaker.net_ctrl')
+
+# Network controller service socket constants.
+NC_RPC_BIND_IP = 'apgw_rpc_bind_ip'
+NC_RPC_BIND_PORT = 'apgw_rpc_bind_port'
+
+# Notification symbols
+NOTF_ADD_REMOTE_PREFX = 'prefix.add_remote'
+NOTF_DELETE_REMOTE_PREFX = 'prefix.delete_remote'
+NOTF_ADD_LOCAL_PREFX = 'prefix.add_local'
+NOTF_DELETE_LOCAL_PREFX = 'prefix.delete_local'
+NOTF_LOG = 'logging'
+
+# MessagePackRPC message type constants
+RPC_MSG_REQUEST = 0
+RPC_MSG_RESPONSE = 1
+RPC_MSG_NOTIFY = 2
+
+#
+# Indexes for various RPC message types.
+#
+RPC_IDX_MSG_TYP = 0
+RPC_IDX_MSG_ID = 1
+RPC_IDX_REQ_SYM = 2
+RPC_IDX_REQ_PARAM = 3
+RPC_IDX_RES_ERR = 2
+RPC_IDX_RES_RST = 3
+RPC_IDX_NTF_SYM = 1
+RPC_IDX_NTF_PARAM = 2
+
+# RPC socket receive buffer size in bytes.
+RPC_SOCK_BUFF_SIZE = 4096
+
+
+@add_bgp_error_metadata(code=NET_CTRL_ERROR_CODE,
+ sub_code=1,
+ def_desc='Unknown Network controller exception')
+class NetworkControllerError(BGPSException):
+ """Common base class for exceptions related to RPC calls.
+ """
+ pass
+
+
+class RpcSession(Activity):
+ """Provides message-pack RPC abstraction for one session.
+
+ It contains message-pack packer, un-packer, message ID sequence
+ and utilities that use these. It also cares about socket communication w/
+ RPC peer.
+ """
+
+ def __init__(self, socket, outgoing_msg_sink_iter):
+ super(RpcSession, self).__init__()
+ import msgpack
+
+ self._packer = msgpack.Packer()
+ self._unpacker = msgpack.Unpacker()
+ self._next_msgid = 0
+ self._socket = socket
+ self._outgoing_msg_sink_iter = outgoing_msg_sink_iter
+
+ def stop(self):
+ super(RpcSession, self).stop()
+ LOG.critical(
+ 'RPC Session to %s stopped', str(self._socket.getpeername())
+ )
+
+ def _run(self):
+ # Process outgoing messages in new thread.
+ green_out = self._spawn('net_ctrl._process_outgoing',
+ self._process_outgoing_msg,
+ self._outgoing_msg_sink_iter)
+ # Process incoming messages in new thread.
+ green_in = self._spawn('net_ctrl._process_incoming',
+ self._process_incoming_msgs)
+ LOG.critical(
+ 'RPC Session to %s started', str(self._socket.getpeername())
+ )
+ green_in.wait()
+ green_out.wait()
+
+ def _next_msg_id(self):
+ this_id = self._next_msgid
+ self._next_msgid += 1
+ return this_id
+
+ def create_request(self, method, params):
+ msgid = self._next_msg_id()
+ return self._packer.pack([RPC_MSG_REQUEST, msgid, method, params])
+
+ def create_error_response(self, msgid, error):
+ if error is None:
+ raise NetworkControllerError(desc='Creating error without body!')
+ return self._packer.pack([RPC_MSG_RESPONSE, msgid, error, None])
+
+ def create_success_response(self, msgid, result):
+ if result is None:
+ raise NetworkControllerError(desc='Creating response without '
+ 'body!')
+ return self._packer.pack([RPC_MSG_RESPONSE, msgid, None, result])
+
+ def create_notification(self, method, params):
+ return self._packer.pack([RPC_MSG_NOTIFY, method, params])
+
+ def feed_and_get_messages(self, data):
+ self._unpacker.feed(data)
+ messages = []
+ for msg in self._unpacker:
+ messages.append(msg)
+ return messages
+
+ def feed_and_get_first_message(self, data):
+ self._unpacker.feed(data)
+ for msg in self._unpacker:
+ return msg
+
+ def send_notification(self, method, params):
+ rpc_msg = self.create_notification(method, params)
+ return self._sendall(rpc_msg)
+
+ def _process_incoming_msgs(self):
+ LOG.debug('NetworkController started processing incoming messages')
+ assert self._socket
+
+ while True:
+ # Wait for request/response/notification from peer.
+ msg_buff = self._recv()
+ if len(msg_buff) == 0:
+ LOG.info('Peer %r disconnected.' % self._socket)
+ break
+ messages = self.feed_and_get_messages(msg_buff)
+ for msg in messages:
+ if msg[0] == RPC_MSG_REQUEST:
+ try:
+ result = _handle_request(msg)
+ _send_success_response(self, self._socket, msg, result)
+ except BGPSException as e:
+ _send_error_response(self, self._socket, msg,
+ e.message)
+ elif msg[0] == RPC_MSG_RESPONSE:
+ _handle_response(msg)
+ elif msg[0] == RPC_MSG_NOTIFY:
+ _handle_notification(msg)
+ else:
+ LOG.error('Invalid message type: %r' % msg)
+ self.pause(0)
+
+ def _process_outgoing_msg(self, sink_iter):
+ """For every message we construct a corresponding RPC message to be
+ sent over the given socket inside given RPC session.
+
+ This function should be launched in a new green thread as
+ it loops forever.
+ """
+ LOG.debug('NetworkController processing outgoing request list.')
+ # TODO(Team): handle un-expected exception breaking the loop in
+ # graceful manner. Discuss this with other component developers.
+ # TODO(PH): We should try not to sent routes from bgp peer that is not
+ # in established state.
+ from ryu.services.protocols.bgp.speaker.model import \
+ FlexinetOutgoingRoute
+ while True:
+ # sink iter is Sink instance and next is blocking so this isn't
+ # active wait.
+ for outgoing_msg in sink_iter:
+ if isinstance(outgoing_msg, FlexinetOutgoingRoute):
+ rpc_msg = _create_prefix_notif(outgoing_msg, self)
+ else:
+ raise NotImplementedError(
+ 'Do not handle out going message'
+ ' of type %s' %
+ outgoing_msg.__class__)
+ if rpc_msg:
+ self._sendall(rpc_msg)
+ self.pause(0)
+
+ def _recv(self):
+ return self._sock_wrap(self._socket.recv)(RPC_SOCK_BUFF_SIZE)
+
+ def _sendall(self, msg):
+ return self._sock_wrap(self._socket.sendall)(msg)
+
+ def _sock_wrap(self, func):
+ def wrapper(*args, **kwargs):
+ try:
+ ret = func(*args, **kwargs)
+ except socket.error:
+ LOG.error(traceback.format_exc())
+ self._socket_error()
+ return
+ return ret
+
+ return wrapper
+
+ def _socket_error(self):
+ if self.started:
+ self.stop()
+
+
+def _create_prefix_notif(outgoing_msg, rpc_session):
+ """Constructs prefix notification with data from given outgoing message.
+
+ Given RPC session is used to create RPC notification message.
+ """
+ assert(outgoing_msg)
+ path = outgoing_msg.path
+ assert(path)
+ vpn_nlri = path.nlri
+
+ rpc_msg = None
+ assert path.source is not None
+ if path.source != VRF_TABLE:
+ # Extract relevant info for update-add/update-delete.
+ params = [{ROUTE_DISTINGUISHER: outgoing_msg.route_disc,
+ PREFIX: vpn_nlri.prefix,
+ NEXT_HOP: path.nexthop,
+ VPN_LABEL: path.label_list[0],
+ VRF_RF: VrfConf.rf_2_vrf_rf(path.route_family)}]
+ if not path.is_withdraw:
+ # Create notification to NetworkController.
+ rpc_msg = rpc_session.create_notification(NOTF_ADD_REMOTE_PREFX,
+ params)
+ else:
+ # Create update-delete request to NetworkController.`
+ rpc_msg = rpc_session.create_notification(NOTF_DELETE_REMOTE_PREFX,
+ params)
+ else:
+ # Extract relevant info for update-add/update-delete.
+ params = [{ROUTE_DISTINGUISHER: outgoing_msg.route_disc,
+ PREFIX: vpn_nlri.prefix,
+ NEXT_HOP: path.nexthop,
+ VRF_RF: VrfConf.rf_2_vrf_rf(path.route_family),
+ ORIGIN_RD: path.origin_rd}]
+ if not path.is_withdraw:
+ # Create notification to NetworkController.
+ rpc_msg = rpc_session.create_notification(NOTF_ADD_LOCAL_PREFX,
+ params)
+ else:
+ # Create update-delete request to NetworkController.`
+ rpc_msg = rpc_session.create_notification(NOTF_DELETE_LOCAL_PREFX,
+ params)
+
+ return rpc_msg
+
+
+def _validate_rpc_ip(rpc_server_ip):
+ """Validates given ip for use as rpc host bind address.
+ """
+ if not is_valid_ipv4(rpc_server_ip):
+ raise NetworkControllerError(desc='Invalid rpc ip address.')
+ return rpc_server_ip
+
+
+def _validate_rpc_port(port):
+ """Validates give port for use as rpc server port.
+ """
+ if not port:
+ raise NetworkControllerError(desc='Invalid rpc port number.')
+ if not isinstance(port, (int, long)) and isinstance(port, str):
+ port = int(port)
+
+ if port <= 0:
+ raise NetworkControllerError(desc='Invalid rpc port number %s' % port)
+ return port
+
+
+class _NetworkController(FlexinetPeer, Activity):
+ """Network controller peer.
+
+ Provides MessagePackRPC interface for flexinet peers like Network
+ controller to peer and have RPC session with BGPS process. This RPC
+ interface provides access to BGPS API.
+ """
+
+ def __init__(self):
+ FlexinetPeer.__init__(self)
+ Activity.__init__(self, name='NETWORK_CONTROLLER')
+ # Outstanding requests, i.e. requests for which we are yet to receive
+ # response from peer. We currently do not have any requests going out.
+ self._outstanding_reqs = {}
+ self._rpc_session = None
+
+ def _run(self, *args, **kwargs):
+ """Runs RPC server.
+
+ Wait for peer to connect and start rpc session with it.
+ For every connection we start and new rpc session.
+ """
+ apgw_rpc_bind_ip = _validate_rpc_ip(kwargs.pop(NC_RPC_BIND_IP))
+ apgw_rpc_bind_port = _validate_rpc_port(kwargs.pop(NC_RPC_BIND_PORT))
+
+ sock_addr = (apgw_rpc_bind_ip, apgw_rpc_bind_port)
+ LOG.debug('NetworkController started listening for connections...')
+
+ server_thread = self._listen_tcp(sock_addr, self._start_rpc_session)
+ self.pause(0)
+ server_thread.wait()
+
+ def _start_rpc_session(self, socket):
+ """Starts a new RPC session with given connection.
+ """
+ if self._rpc_session and self._rpc_session.started:
+ self._rpc_session.stop()
+
+ self._rpc_session = RpcSession(socket, self)
+ self._rpc_session.start()
+
+ def send_rpc_notification(self, method, params):
+ if (self.started and self._rpc_session is not None and
+ self._rpc_session.started):
+ return self._rpc_session.send_notification(method, params)
+
+
+def _handle_response(response):
+ raise NotImplementedError('BGPS is not making any request hence should not'
+ ' get any response. Response: %s' % response)
+
+
+def _handle_notification(notification):
+ LOG.debug('Notification from NetworkController<<: %s %s' %
+ (notification[RPC_IDX_NTF_SYM], notification[RPC_IDX_NTF_PARAM]))
+ operation, params = notification[1], notification[2]
+ return api.base.call(operation, **params[0])
+
+
+def _handle_request(request):
+ LOG.debug('Request from NetworkController<<: %s %s' %
+ (request[RPC_IDX_REQ_SYM], request[RPC_IDX_REQ_PARAM]))
+ operation, params = request[2], request[3]
+ kwargs = {}
+ if len(params) > 0:
+ kwargs = params[0]
+ try:
+ return api.base.call(operation, **kwargs)
+ except TypeError:
+ LOG.error(traceback.format_exc())
+ raise ApiException(desc='Invalid type for RPC parameter.')
+
+
+def _send_success_response(rpc_session, socket, request, result):
+ response = rpc_session.create_success_response(request[RPC_IDX_MSG_ID],
+ result)
+ socket.sendall(response)
+
+
+def _send_error_response(rpc_session, socket, request, emsg):
+ response = rpc_session.create_error_response(request[RPC_IDX_MSG_ID],
+ str(emsg))
+ socket.sendall(response)
+
+
+# Network controller singleton
+NET_CONTROLLER = _NetworkController()
diff --git a/ryu/services/protocols/bgp/operator/command.py b/ryu/services/protocols/bgp/operator/command.py
new file mode 100644
index 00000000..bd384f0c
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/command.py
@@ -0,0 +1,269 @@
+from collections import namedtuple
+import json
+import logging
+import pprint
+import re
+
+(STATUS_OK, STATUS_ERROR) = xrange(2)
+
+CommandsResponse = namedtuple('CommandsResponse', ['status', 'value'])
+
+LOG = logging.getLogger('bgpspeaker.operator.command')
+
+
+def default_help_formatter(quick_helps):
+ """Apply default formatting for help messages
+
+ :param quick_helps: list of tuples containing help info
+ """
+ ret = ''
+ for line in quick_helps:
+ cmd_path, param_hlp, cmd_hlp = line
+ ret += ' '.join(cmd_path) + ' '
+ if param_hlp:
+ ret += param_hlp + ' '
+ ret += '- ' + cmd_hlp + '\n'
+ return ret
+
+
+class Command(object):
+ """Command class is used as a node in tree of commands.
+
+ Each command can do some action or have some sub-commands, just like in IOS
+ Command with it's sub-commands form tree.
+ Each command can have one or more parameters. Parameters have to be
+ distinguishable from sub-commands.
+ One can inject dependency into command Cmd(api=my_object).
+ This dependency will be injected to every sub-command. And can be used
+ to interact with model/data etc.
+ Example of path in command tree `show count all`.
+ """
+
+ help_msg = ''
+ param_help_msg = None
+ command = ''
+ cli_resp_line_template = '{0}: {1}\n\n'
+
+ def __init__(self, api=None, parent=None,
+ help_formatter=default_help_formatter,
+ resp_formatter_name='cli'):
+ """:param api: object which is saved as self.api
+ and re-injected to every sub-command. You can use it to
+ manipulate your model from inside Commands'
+ :param parent: parent command instance.
+ :param help_formatter: function used to format
+ output of '?'command. Is re-injected to every
+ sub-command as well.
+ :param resp_formatter_name: used to select function to format
+ output of _action. cli_resp_formatter and json_resp_formatter
+ are defined by default, but you can define your own formatters.
+ If you use custom formatter(not cli nor json) remember to
+ implement it for every sub-command.
+ """
+
+ self.resp_formatter_name = resp_formatter_name
+
+ if hasattr(self, resp_formatter_name + '_resp_formatter'):
+ self.resp_formatter = \
+ getattr(self, resp_formatter_name + '_resp_formatter')
+ else:
+ self.resp_formatter = self.cli_resp_formatter
+
+ self.api = api
+ self.parent_cmd = parent
+ self.help_formatter = help_formatter
+ if not hasattr(self, 'subcommands'):
+ self.subcommands = {}
+
+ def __call__(self, params):
+ """You run command by calling it.
+
+ :param params: As params you give list of subcommand names
+ and params to final subcommand. Kind of like in
+ cisco ios cli, ie. show int eth1 / 1, where show is command,
+ int subcommand and eth1 / 1 is param for subcommand.
+ :return: returns tuple of CommandsResponse and class of
+ sub - command on which _action was called. (last sub - command)
+ CommandsResposne.status is action status,
+ and CommandsResponse.value is formatted response.
+ """
+ if len(params) == 0:
+ return self._action_wrapper([])
+
+ first_param = params[0]
+
+ if first_param == '?':
+ return self.question_mark()
+
+ if first_param in self.subcommands:
+ return self._instantiate_subcommand(first_param)(params[1:])
+
+ return self._action_wrapper(params)
+
+ @classmethod
+ def cli_resp_formatter(cls, resp):
+ """Override this method to provide custom formatting of cli response.
+ """
+ if not resp.value:
+ return ''
+
+ if resp.status == STATUS_OK:
+
+ if type(resp.value) in (str, bool, int, float, unicode):
+ return str(resp.value)
+
+ ret = ''
+ val = resp.value
+ if not isinstance(val, list):
+ val = [val]
+ for line in val:
+ for k, v in line.iteritems():
+ if isinstance(v, dict):
+ ret += cls.cli_resp_line_template.format(
+ k, '\n' + pprint.pformat(v)
+ )
+ else:
+ ret += cls.cli_resp_line_template.format(k, v)
+ return ret
+ else:
+ return "Error: {0}".format(resp.value)
+
+ @classmethod
+ def json_resp_formatter(cls, resp):
+ """Override this method to provide custom formatting of json response.
+ """
+ return json.dumps(resp.value)
+
+ @classmethod
+ def dict_resp_formatter(cls, resp):
+ return resp.value
+
+ def _action_wrapper(self, params):
+ filter_params = []
+ if '|' in params:
+ ind = params.index('|')
+ new_params = params[:ind]
+ filter_params = params[ind:]
+ params = new_params
+
+ action_resp = self.action(params)
+ if len(filter_params) > 1:
+ # we don't pass '|' around so filter_params[1:]
+ action_resp = self.filter_resp(action_resp, filter_params[1:])
+ action_resp = CommandsResponse(
+ action_resp.status,
+ self.resp_formatter(action_resp)
+ )
+ return action_resp, self.__class__
+
+ def action(self, params):
+ """Override this method to define what command should do.
+
+ :param params: list of text parameters applied to this command.
+ :return: returns CommandsResponse instance.
+ CommandsResposne.status can be STATUS_OK or STATUS_ERROR
+ CommandsResponse.value should be dict or str
+ """
+ return CommandsResponse(STATUS_ERROR, 'Not implemented')
+
+ def filter_resp(self, action_resp, filter_params):
+ """Filter response of action. Used to make printed results more
+ specific
+
+ :param action_resp: named tuple (CommandsResponse)
+ containing response from action.
+ :param filter_params: params used after '|' specific for given filter
+ :return: filtered response.
+ """
+ if action_resp.status == STATUS_OK:
+ try:
+ return CommandsResponse(
+ STATUS_OK,
+ TextFilter.filter(action_resp.value, filter_params)
+ )
+ except FilterError as e:
+ return CommandsResponse(STATUS_ERROR, str(e))
+ else:
+ return action_resp
+
+ def question_mark(self):
+ """Shows help for this command and it's sub-commands.
+ """
+ ret = []
+ if self.param_help_msg or len(self.subcommands) == 0:
+ ret.append(self._quick_help())
+
+ if len(self.subcommands) > 0:
+ for k, _ in sorted(self.subcommands.iteritems()):
+ command_path, param_help, cmd_help = \
+ self._instantiate_subcommand(k)._quick_help(nested=True)
+ if command_path or param_help or cmd_help:
+ ret.append((command_path, param_help, cmd_help))
+
+ return (
+ CommandsResponse(STATUS_OK, self.help_formatter(ret)),
+ self.__class__
+ )
+
+ def _quick_help(self, nested=False):
+ """:param nested: True if help is requested directly for this command
+ and False when help is requested for a list of possible
+ completions.
+ """
+ if nested:
+ return self.command_path(), None, self.help_msg
+ else:
+ return self.command_path(), self.param_help_msg, self.help_msg
+
+ def command_path(self):
+ if self.parent_cmd:
+ return self.parent_cmd.command_path() + [self.command]
+ else:
+ return [self.command]
+
+ def _instantiate_subcommand(self, key):
+ return self.subcommands[key](
+ api=self.api,
+ parent=self,
+ help_formatter=self.help_formatter,
+ resp_formatter_name=self.resp_formatter_name
+ )
+
+
+class TextFilter(object):
+
+ @classmethod
+ def filter(cls, action_resp_value, filter_params):
+ try:
+ action, expected_value = filter_params
+ except ValueError:
+ raise FilterError('Wrong number of filter parameters')
+ if action == 'regexp':
+
+ if isinstance(action_resp_value, list):
+ resp = list(action_resp_value)
+ iterator = enumerate(action_resp_value)
+ else:
+ resp = dict(action_resp_value)
+ iterator = action_resp_value.iteritems()
+
+ remove = []
+
+ for key, value in iterator:
+ if not re.search(expected_value, str(value)):
+ remove.append(key)
+
+ if isinstance(resp, list):
+ resp = [resp[key] for key, value in enumerate(resp)
+ if key not in remove]
+ else:
+ resp = {key: value for key, value in resp.iteritems()
+ if key not in remove}
+
+ return resp
+ else:
+ raise FilterError('Unknown filter')
+
+
+class FilterError(Exception):
+ pass
diff --git a/ryu/services/protocols/bgp/operator/commands/clear.py b/ryu/services/protocols/bgp/operator/commands/clear.py
new file mode 100644
index 00000000..be4a9e9d
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/commands/clear.py
@@ -0,0 +1,54 @@
+from ryu.services.protocols.bgp.operator.command import Command
+from ryu.services.protocols.bgp.operator.command import CommandsResponse
+from ryu.services.protocols.bgp.operator.command import STATUS_OK
+from ryu.services.protocols.bgp.operator.commands.responses import \
+ WrongParamResp
+
+
+class BGPCmd(Command):
+ help_msg = ('reset bgp connections, no afi/safi is '
+ 'treated as - all supported address-families')
+ param_help_msg = '<peer_ip> [<afi> <safi>]'
+ command = 'bgp'
+
+ def __init__(self, *args, **kwargs):
+ super(BGPCmd, self).__init__(*args, **kwargs)
+
+ self.subcommands = {'all': self.All}
+
+ def action(self, params):
+ if len(params) == 0:
+ return WrongParamResp()
+ peer = afi = safi = None
+ try:
+ peer = params[0]
+ afi = params[1]
+ safi = params[2]
+ except IndexError:
+ pass
+
+ self.api.route_refresh(peer, afi, safi)
+ return CommandsResponse(STATUS_OK, '')
+
+ class All(Command):
+ help_msg = 'reset all connections'
+ param_help_msg = '[<afi=> <safi=>]'
+ command = 'all'
+
+ def action(self, params):
+ peer = afi = safi = None
+ try:
+ afi = params[0]
+ safi = params[1]
+ except IndexError:
+ pass
+
+ self.api.route_refresh(peer, afi, safi)
+ return CommandsResponse(STATUS_OK, '')
+
+
+class ClearCmd(Command):
+ help_msg = 'allows to reset BGP connections'
+ command = 'clear'
+
+ subcommands = {'bgp': BGPCmd}
diff --git a/ryu/services/protocols/bgp/operator/commands/responses.py b/ryu/services/protocols/bgp/operator/commands/responses.py
new file mode 100644
index 00000000..f90dd98e
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/commands/responses.py
@@ -0,0 +1,34 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Defines classes related to incorrect parameters.
+"""
+from ryu.services.protocols.bgp.operator.command import CommandsResponse
+from ryu.services.protocols.bgp.operator.command import STATUS_ERROR
+from ryu.services.protocols.bgp.operator.internal_api import WrongParamError
+
+
+class WrongParamResp(object):
+ def __new__(cls, e=None):
+ return cls.wrong_param_resp_factory(e)
+
+ @staticmethod
+ def wrong_param_resp_factory(e=None):
+ if not e:
+ e = WrongParamError()
+ desc = 'wrong parameters: %s' % str(e)
+
+ return CommandsResponse(STATUS_ERROR, desc)
diff --git a/ryu/services/protocols/bgp/operator/commands/root.py b/ryu/services/protocols/bgp/operator/commands/root.py
new file mode 100644
index 00000000..cf6d5cd4
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/commands/root.py
@@ -0,0 +1,11 @@
+from ryu.services.protocols.bgp.operator.command import Command
+from ryu.services.protocols.bgp.operator.commands.clear import ClearCmd
+from ryu.services.protocols.bgp.operator.commands.set import SetCmd
+from ryu.services.protocols.bgp.operator.commands.show import ShowCmd
+
+
+class RootCmd(Command):
+ subcommands = {
+ 'show': ShowCmd,
+ 'set': SetCmd,
+ 'clear': ClearCmd}
diff --git a/ryu/services/protocols/bgp/operator/commands/set.py b/ryu/services/protocols/bgp/operator/commands/set.py
new file mode 100644
index 00000000..b28a80ac
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/commands/set.py
@@ -0,0 +1,65 @@
+import logging
+
+from ryu.services.protocols.bgp.operator.command import Command
+from ryu.services.protocols.bgp.operator.command import CommandsResponse
+from ryu.services.protocols.bgp.operator.command import STATUS_OK
+from ryu.services.protocols.bgp.operator.commands.responses import \
+ WrongParamResp
+
+
+class LoggingCmd(Command):
+ command = 'logging'
+ help_msg = 'turn on/off logging at current level'
+
+ def __init__(self, *args, **kwargs):
+ super(LoggingCmd, self).__init__(*args, **kwargs)
+ self.subcommands = {
+ 'on': self.On,
+ 'off': self.Off,
+ 'level': self.Level
+ }
+
+ class On(Command):
+ command = 'on'
+ help_msg = 'turn-on the logging at the current level'
+
+ def action(self, params):
+ logging.getLogger('bgpspeaker').addHandler(self.api.log_handler)
+ return CommandsResponse(STATUS_OK, True)
+
+ class Off(Command):
+ command = 'off'
+ help_msg = 'turn-off the logging'
+
+ def action(self, params):
+ logging.getLogger('bgpspeaker').removeHandler(self.api.log_handler)
+ return CommandsResponse(STATUS_OK, True)
+
+ class Level(Command):
+ command = 'level'
+ help_msg = 'set logging level'
+ param_help_msg = '[debug/info/error]'
+
+ def action(self, params):
+ lvls = {
+ 'debug': logging.DEBUG,
+ 'error': logging.ERROR,
+ 'info': logging.INFO
+ }
+ if len(params) == 1 and params[0] in lvls:
+ self.api.log_handler.setLevel(
+ lvls.get(params[0], logging.ERROR)
+ )
+ return CommandsResponse(STATUS_OK, True)
+ else:
+ return WrongParamResp()
+
+
+class SetCmd(Command):
+ help_msg = 'allows to set runtime settings'
+ command = 'set'
+
+ subcommands = {'logging': LoggingCmd}
+
+ def action(self, params):
+ return CommandsResponse(STATUS_OK, True)
diff --git a/ryu/services/protocols/bgp/operator/commands/show/__init__.py b/ryu/services/protocols/bgp/operator/commands/show/__init__.py
new file mode 100644
index 00000000..388a1e7a
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/commands/show/__init__.py
@@ -0,0 +1,56 @@
+from ryu.services.protocols.bgp.operator.command import Command
+from ryu.services.protocols.bgp.operator.command import CommandsResponse
+from ryu.services.protocols.bgp.operator.command import STATUS_OK
+from ryu.services.protocols.bgp.operator.commands.show import count
+from ryu.services.protocols.bgp.operator.commands.show import importmap
+from ryu.services.protocols.bgp.operator.commands.show import memory
+from ryu.services.protocols.bgp.operator.commands.show import neighbor
+from ryu.services.protocols.bgp.operator.commands.show import rib
+from ryu.services.protocols.bgp.operator.commands.show import vrf
+
+
+class ShowCmd(Command):
+ help_msg = 'shows runtime state information'
+ command = 'show'
+
+ def __init__(self, *args, **kwargs):
+ super(ShowCmd, self).__init__(*args, **kwargs)
+ self.subcommands = {
+ 'count': self.Count,
+ 'logging': self.Logging,
+ 'rib': self.Rib,
+ 'vrf': self.Vrf,
+ 'memory': self.Memory,
+ 'neighbor': self.Neighbor,
+ 'importmap': self.Importmap
+ }
+
+ def action(self, params):
+ return CommandsResponse(STATUS_OK, None)
+
+ class Count(count.Count):
+ pass
+
+ class Rib(rib.Rib):
+ pass
+
+ class Vrf(vrf.Vrf):
+ pass
+
+ class Importmap(importmap.Importmap):
+ pass
+
+ class Memory(memory.Memory):
+ pass
+
+ class Neighbor(neighbor.Neighbor):
+ pass
+
+ class Logging(Command):
+ command = 'logging'
+ help_msg = 'shows if logging is on/off and current logging level.'
+
+ def action(self, params):
+ ret = {'logging': self.api.check_logging(),
+ 'level': self.api.check_logging_level()}
+ return CommandsResponse(STATUS_OK, ret)
diff --git a/ryu/services/protocols/bgp/operator/commands/show/count.py b/ryu/services/protocols/bgp/operator/commands/show/count.py
new file mode 100644
index 00000000..f0cd5b89
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/commands/show/count.py
@@ -0,0 +1,53 @@
+import logging
+
+from ryu.services.protocols.bgp.operator.command import Command
+from ryu.services.protocols.bgp.operator.command import CommandsResponse
+from ryu.services.protocols.bgp.operator.command import STATUS_ERROR
+from ryu.services.protocols.bgp.operator.command import STATUS_OK
+from ryu.services.protocols.bgp.operator.commands.responses import \
+ WrongParamResp
+
+LOG = logging.getLogger('bgpspeaker.operator.commands.show.count')
+
+
+class Count(Command):
+ help_msg = 'show counters'
+ param_help_msg = '<vpn-name> <route-family>{ipv4, ipv6}'
+ command = 'count'
+ cli_resp_line_template = 'BGP route count for VPN {0} is {1}\n'
+
+ def __init__(self, *args, **kwargs):
+ super(Count, self).__init__(*args, **kwargs)
+ self.subcommands = {
+ 'all': self.All
+ }
+
+ def action(self, params):
+ if len(params) < 1:
+ return CommandsResponse(STATUS_ERROR, 'Not enough params')
+ else:
+ vrf_name = params[0]
+ if len(params) == 2:
+ vrf_rf = params[1]
+ else:
+ vrf_rf = 'ipv4'
+
+ from ryu.services.protocols.bgp.operator.internal_api import \
+ WrongParamError
+ try:
+ return CommandsResponse(
+ STATUS_OK,
+ self.api.count_single_vrf_routes(vrf_name, vrf_rf)
+ )
+ except WrongParamError as e:
+ return WrongParamResp(e)
+
+ class All(Command):
+ help_msg = 'shows number of routes for all VRFs'
+ command = 'all'
+ cli_resp_line_template = 'BGP route count for VPN {0} is {1}\n'
+
+ def action(self, params):
+ if len(params) > 0:
+ return WrongParamResp()
+ return CommandsResponse(STATUS_OK, self.api.count_all_vrf_routes())
diff --git a/ryu/services/protocols/bgp/operator/commands/show/importmap.py b/ryu/services/protocols/bgp/operator/commands/show/importmap.py
new file mode 100644
index 00000000..d33c6706
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/commands/show/importmap.py
@@ -0,0 +1,42 @@
+from ryu.services.protocols.bgp.operator.command import Command
+from ryu.services.protocols.bgp.operator.command import CommandsResponse
+from ryu.services.protocols.bgp.operator.command import STATUS_ERROR
+from ryu.services.protocols.bgp.operator.command import STATUS_OK
+from ryu.services.protocols.bgp.operator.commands.responses import \
+ WrongParamResp
+
+from ryu.services.protocols.bgp.operator.views.bgp import CoreServiceDetailView
+
+
+class Importmap(Command):
+ help_msg = 'show importmaps'
+ param_help_msg = 'all | <name>'
+ command = 'importmap'
+
+ def __init__(self, *args, **kwargs):
+ super(Importmap, self).__init__(*args, **kwargs)
+
+ def action(self, params):
+ if len(params) != 1:
+ return WrongParamResp()
+
+ core_service = self.api.get_core_service()
+ core_service_view = CoreServiceDetailView(core_service)
+ importmap_manager = core_service_view.rel('importmap_manager')
+ importmaps_view = importmap_manager.rel('importmaps')
+
+ importmap_name = params[0]
+ if importmap_name == 'all':
+ encoded = importmaps_view.encode()
+ else:
+ encoded = importmaps_view.encode().get(importmap_name)
+ if encoded is None:
+ return CommandsResponse(
+ STATUS_ERROR,
+ 'Wrong importmap name.'
+ )
+
+ return CommandsResponse(
+ STATUS_OK,
+ encoded
+ )
diff --git a/ryu/services/protocols/bgp/operator/commands/show/memory.py b/ryu/services/protocols/bgp/operator/commands/show/memory.py
new file mode 100644
index 00000000..c519adff
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/commands/show/memory.py
@@ -0,0 +1,89 @@
+import gc
+import sys
+
+from ryu.services.protocols.bgp.operator.command import Command
+from ryu.services.protocols.bgp.operator.command import CommandsResponse
+from ryu.services.protocols.bgp.operator.command import STATUS_ERROR
+from ryu.services.protocols.bgp.operator.command import STATUS_OK
+
+
+class Memory(Command):
+ help_msg = 'show memory information'
+ command = 'memory'
+
+ def __init__(self, *args, **kwargs):
+ super(Memory, self).__init__(*args, **kwargs)
+ self.subcommands = {
+ 'summary': self.Summary}
+
+ class Summary(Command):
+ help_msg = 'shows total memory used and how it is getting used'
+ command = 'summary'
+
+ def action(self, params):
+ count = {}
+ size = {}
+ total_size = 0
+ unreachable = gc.collect()
+ for obj in gc.get_objects():
+ inst_name = type(obj).__name__
+ c = count.get(inst_name, None)
+ if not c:
+ count[inst_name] = 0
+ s = size.get(inst_name, None)
+ if not s:
+ size[inst_name] = 0
+
+ count[inst_name] += 1
+ s = sys.getsizeof(obj)
+ size[inst_name] += s
+ total_size += s
+
+ # Total size in MB
+
+ total_size = total_size / 1000000
+ ret = {
+ 'unreachable': unreachable,
+ 'total': total_size,
+ 'summary': []}
+
+ for class_name, s in size.items():
+ # Calculate size in MB
+ size_mb = s / 1000000
+ # We are only interested in class which take-up more than a MB
+ if size_mb > 0:
+ ret['summary'].append(
+ {
+ 'class': class_name,
+ 'instances': count.get(class_name, None),
+ 'size': size_mb
+ }
+ )
+
+ return CommandsResponse(STATUS_OK, ret)
+
+ @classmethod
+ def cli_resp_formatter(cls, resp):
+ if resp.status == STATUS_ERROR:
+ return Command.cli_resp_formatter(resp)
+ val = resp.value
+ ret = 'Unreachable objects: {0}\n'.format(
+ val.get('unreachable', None)
+ )
+ ret += 'Total memory used (MB): {0}\n'.format(
+ val.get('total', None)
+ )
+ ret += 'Classes with instances that take-up more than one MB:\n'
+ ret += '{0:<20s} {1:>16s} {2:>16s}\n'.format(
+ 'Class',
+ '#Instance',
+ 'Size(MB)'
+ )
+
+ for s in val.get('summary', []):
+ ret += '{0:<20s} {1:>16d} {2:>16d}\n'.format(
+ s.get('class', None), s.get('instances', None),
+ s.get('size', None)
+ )
+
+ return ret
diff --git a/ryu/services/protocols/bgp/operator/commands/show/neighbor.py b/ryu/services/protocols/bgp/operator/commands/show/neighbor.py
new file mode 100644
index 00000000..2e8b8715
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/commands/show/neighbor.py
@@ -0,0 +1,135 @@
+import logging
+import pprint
+
+from ryu.services.protocols.bgp.operator.command import Command
+from ryu.services.protocols.bgp.operator.command import CommandsResponse
+from ryu.services.protocols.bgp.operator.command import STATUS_ERROR
+from ryu.services.protocols.bgp.operator.command import STATUS_OK
+from ryu.services.protocols.bgp.operator.commands.responses import \
+ WrongParamResp
+from ryu.services.protocols.bgp.operator.views.bgp import CoreServiceDetailView
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv4_UC
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv4_VPN
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv6_UC
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv6_VPN
+
+LOG = logging.getLogger('bgpspeaker.operator.commands.show.summary')
+
+
+class NeighborSummary(Command):
+ help_msg = 'show summarized neighbor information'
+ command = 'summary'
+
+ def action(self, params):
+ requested_peers = []
+ if len(params) > 0:
+ requested_peers = [str(p) for p in params]
+
+ core_service = self.api.get_core_service()
+ core_service_view = CoreServiceDetailView(core_service)
+ peers_view = core_service_view.rel('peer_manager').rel('peers_summary')
+
+ def filter_requested(peer_id, peer_obj):
+ return not requested_peers or peer_id in requested_peers
+
+ peers_view.apply_filter(filter_requested)
+ ret = peers_view.encode()
+ return CommandsResponse(STATUS_OK, ret)
+
+
+class SentRoutes(Command):
+ help_msg = 'paths sent and not withdrawn to given peer'
+ command = 'sent-routes'
+ param_help_msg = '<ip_addr> <addr_family>{vpnv4, vpnv6, ipv4, ipv6, all}'
+
+ def action(self, params):
+ if len(params) != 2:
+ return WrongParamResp()
+ ip_addr, addr_family = params
+
+ if addr_family == 'ipv4':
+ rf = RF_IPv4_UC
+ elif addr_family == 'ipv6':
+ rf = RF_IPv6_UC
+ elif addr_family == 'vpnv4':
+ rf = RF_IPv4_VPN
+ elif addr_family == 'vpnv6':
+ rf = RF_IPv6_VPN
+ elif addr_family == 'all':
+ rf = None
+ else:
+ return WrongParamResp('wrong addr_family name')
+
+ ret = self._retrieve_paths(addr_family, rf, ip_addr).encode()
+ ret = {
+ path['nlri']['formatted_nlri']: path
+ for path in ret
+ }
+
+ return CommandsResponse(STATUS_OK, ret)
+
+ def _retrieve_paths(self, addr_family, route_family, ip_addr):
+ global_tables_view = self._retrieve_global_tables_view(
+ addr_family,
+ route_family
+ )
+ sent = global_tables_view.c_rel('destinations').c_rel('sent_routes')
+ sent.apply_filter(
+ lambda route: route.sent_peer.ip_address == ip_addr
+ )
+ paths = sent.c_rel('path')
+ paths.apply_filter(
+ lambda path: not path.is_withdraw
+ )
+ return paths
+
+ def _retrieve_global_tables_view(self, addr_family, route_family):
+ core_service = self.api.get_core_service()
+ core_sv = CoreServiceDetailView(core_service)
+ table_manager_view = core_sv.rel('table_manager')
+ global_tables_view = table_manager_view.rel('global_tables')
+ global_tables_view.apply_filter(
+ lambda k, v: addr_family == 'all' or k == route_family
+ )
+ return global_tables_view
+
+ @classmethod
+ def cli_resp_formatter(cls, resp):
+ if resp.status == STATUS_ERROR:
+ return Command.cli_resp_formatter(resp)
+
+ return '\n{0}'.format(pprint.pformat(resp.value))
+
+
+class ReceivedRoutes(SentRoutes):
+ help_msg = 'paths received and not withdrawn by given peer'
+ command = 'received-routes'
+
+ def _retrieve_paths(self, addr_family, route_family, ip_addr):
+ global_tables_view = self._retrieve_global_tables_view(
+ addr_family,
+ route_family
+ )
+ paths = global_tables_view.c_rel(
+ 'destinations'
+ ).c_rel('known_path_list')
+
+ def path_filter(path):
+ return path.source is not None and \
+ path.source.ip_address == ip_addr and \
+ not path.is_withdraw
+
+ paths.apply_filter(
+ path_filter
+ )
+ return paths
+
+
+class Neighbor(Command):
+ help_msg = 'show neighbor information'
+ command = 'neighbor'
+ subcommands = {
+ 'summary': NeighborSummary,
+ 'sent-routes': SentRoutes,
+ 'received-routes': ReceivedRoutes
+ }
diff --git a/ryu/services/protocols/bgp/operator/commands/show/rib.py b/ryu/services/protocols/bgp/operator/commands/show/rib.py
new file mode 100644
index 00000000..8469eef5
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/commands/show/rib.py
@@ -0,0 +1,65 @@
+from route_formatter_mixin import RouteFormatterMixin
+
+from ryu.services.protocols.bgp.operator.command import Command
+from ryu.services.protocols.bgp.operator.command import CommandsResponse
+from ryu.services.protocols.bgp.operator.command import STATUS_ERROR
+from ryu.services.protocols.bgp.operator.command import STATUS_OK
+
+from ryu.services.protocols.bgp.operator.commands.responses import \
+ WrongParamResp
+
+
+class RibBase(Command, RouteFormatterMixin):
+ supported_families = ['vpnv4', 'rtfilter', 'vpnv6']
+
+
+class Rib(RibBase):
+ help_msg = 'show all routes for address family (only vpnv4 supported)'
+ param_help_msg = '<address-family>'
+ command = 'rib'
+
+ def __init__(self, *args, **kwargs):
+ super(Rib, self).__init__(*args, **kwargs)
+ self.subcommands = {
+ 'all': self.All}
+
+ def action(self, params):
+ if len(params) != 1 or params[0] not in self.supported_families:
+ return WrongParamResp()
+ from ryu.services.protocols.bgp.speaker.operator.internal_api \
+ import WrongParamError
+ try:
+ return CommandsResponse(
+ STATUS_OK,
+ self.api.get_single_rib_routes(params[0])
+ )
+ except WrongParamError as e:
+ return WrongParamResp(e)
+
+ @classmethod
+ def cli_resp_formatter(cls, resp):
+ if resp.status == STATUS_ERROR:
+ return RibBase.cli_resp_formatter(resp)
+ return cls._format_family_header() + cls._format_family(resp.value)
+
+ class All(RibBase):
+ help_msg = 'show routes for all RIBs'
+ command = 'all'
+
+ def action(self, params):
+ if len(params) != 0:
+ return WrongParamResp()
+ ret = {}
+ for family in self.supported_families:
+ ret[family] = self.api.get_single_rib_routes(family)
+ return CommandsResponse(STATUS_OK, ret)
+
+ @classmethod
+ def cli_resp_formatter(cls, resp):
+ if resp.status == STATUS_ERROR:
+ return RibBase.cli_resp_formatter(resp)
+ ret = cls._format_family_header()
+ for family, data in resp.value.iteritems():
+ ret += 'Family: {0}\n'.format(family)
+ ret += cls._format_family(data)
+ return ret
diff --git a/ryu/services/protocols/bgp/operator/commands/show/route_formatter_mixin.py b/ryu/services/protocols/bgp/operator/commands/show/route_formatter_mixin.py
new file mode 100644
index 00000000..ff69e069
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/commands/show/route_formatter_mixin.py
@@ -0,0 +1,43 @@
+import StringIO
+
+
+class RouteFormatterMixin(object):
+
+ @classmethod
+ def _format_family_header(cls):
+ ret = ''
+ ret += ('Status codes: * valid, > best\n')
+ ret += ' {0:<3s} {1:<32s} {2:<20s} {3:<10s} {4:<20s} {5:<}\n'.format(
+ '', 'Network', 'Next Hop', 'Reason', 'Metric', 'Path')
+ return ret
+
+ @classmethod
+ def _format_family(cls, dest_list):
+ msg = StringIO.StringIO()
+
+ def _append_path_info(buff, path, is_best, show_prefix):
+ aspath = path.get('aspath')
+ bpr = path.get('bpr')
+ next_hop = path.get('nexthop')
+ med = path.get('metric')
+ # Construct path status string.
+ path_status = '*'
+ if is_best:
+ path_status += '>'
+
+ # Check if we want to show prefix.
+ prefix = ''
+ if show_prefix:
+ prefix = path.get('prefix')
+
+ # Append path info to String buffer.
+ buff.write(' {0:<3s} {1:<32s} {2:<20s} {3:<20s} {4:<10s} {5:<}\n'.
+ format(path_status, prefix, next_hop, bpr, str(med),
+ ', '.join(map(str, aspath))))
+
+ for dist in dest_list:
+ for idx, path in enumerate(dist.get('paths')):
+ _append_path_info(msg, path, path['best'], (idx == 0))
+ ret = msg.getvalue()
+ msg.close()
+ return ret
diff --git a/ryu/services/protocols/bgp/operator/commands/show/vrf.py b/ryu/services/protocols/bgp/operator/commands/show/vrf.py
new file mode 100644
index 00000000..4b34c096
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/commands/show/vrf.py
@@ -0,0 +1,162 @@
+import logging
+import pprint
+
+from ryu.services.protocols.bgp.operator.command import Command
+from ryu.services.protocols.bgp.operator.command import CommandsResponse
+from ryu.services.protocols.bgp.operator.command import STATUS_ERROR
+from ryu.services.protocols.bgp.operator.command import STATUS_OK
+from ryu.services.protocols.bgp.operator.commands.responses import \
+ WrongParamResp
+from ryu.services.protocols.bgp.operator.views.conf import ConfDetailView
+from ryu.services.protocols.bgp.operator.views.conf import ConfDictView
+from route_formatter_mixin import RouteFormatterMixin
+
+LOG = logging.getLogger('bgpspeaker.operator.commands.show.vrf')
+
+
+class Routes(Command, RouteFormatterMixin):
+ help_msg = 'show routes present for vrf'
+ param_help_msg = '<vpn-name> <route-family>(ipv4, ipv6)'
+ command = 'routes'
+
+ def __init__(self, *args, **kwargs):
+ super(Routes, self).__init__(*args, **kwargs)
+ self.subcommands = {
+ 'all': self.All,
+ }
+
+ def action(self, params):
+ if len(params) != 2:
+ return WrongParamResp()
+ vrf_name = params[0]
+ vrf_rf = params[1]
+ if vrf_rf not in ('ipv4', 'ipv6'):
+ return WrongParamResp('route-family not one of (ipv4, ipv6)')
+
+ from ryu.services.protocols.bgp.speaker.operator.internal_api import \
+ WrongParamError
+
+ try:
+ return CommandsResponse(
+ STATUS_OK,
+ self.api.get_single_vrf_routes(vrf_name, vrf_rf)
+ )
+ except WrongParamError as e:
+ return CommandsResponse(
+ STATUS_ERROR,
+ 'wrong parameters: %s' % str(e)
+ )
+
+ @classmethod
+ def cli_resp_formatter(cls, resp):
+ if resp.status == STATUS_ERROR:
+ return super(Routes, cls).cli_resp_formatter(resp)
+ return cls._format_family_header() + cls._format_family(resp.value)
+
+ class All(Command, RouteFormatterMixin):
+ help_msg = 'show routes for all VRFs'
+ command = 'all'
+
+ def action(self, params):
+ if len(params) != 0:
+ return WrongParamResp()
+ return CommandsResponse(
+ STATUS_OK,
+ self.api.get_all_vrf_routes()
+ )
+
+ @classmethod
+ def cli_resp_formatter(cls, resp):
+ if resp.status == STATUS_ERROR:
+ return Command.cli_resp_formatter(resp)
+ ret = cls._format_family_header()
+ for family, data in resp.value.iteritems():
+ ret += 'VPN: {0}\n'.format(family)
+ ret += cls._format_family(data)
+ return ret
+
+
+class CountRoutesMixin(object):
+ def _count_routes(self, vrf_name, vrf_rf):
+ return len(self.api.get_single_vrf_routes(vrf_name, vrf_rf))
+
+
+class Summary(Command, CountRoutesMixin):
+ help_msg = 'show configuration and summary of vrf'
+ param_help_msg = '<rd> <route_family>| all'
+ command = 'summary'
+
+ def __init__(self, *args, **kwargs):
+ super(Summary, self).__init__(*args, **kwargs)
+ self.subcommands = {
+ 'all': self.All
+ }
+
+ def action(self, params):
+ if len(params) == 0:
+ return WrongParamResp('Not enough params')
+
+ vrf_confs = self.api.get_vrfs_conf()
+ if len(params) < 2:
+ vrf_rf = 'ipv4'
+ else:
+ vrf_rf = params[1]
+
+ vrf_key = params[0], vrf_rf
+
+ if vrf_key in vrf_confs:
+ view = ConfDetailView(vrf_confs[vrf_key])
+ encoded = view.encode()
+ encoded['routes_count'] = self._count_routes(params[0], vrf_rf)
+ else:
+ return WrongParamResp('No vrf matched by %s' % str(vrf_key))
+
+ return CommandsResponse(
+ STATUS_OK,
+ encoded
+ )
+
+ @classmethod
+ def cli_resp_formatter(cls, resp):
+ if resp.status == STATUS_ERROR:
+ return Command.cli_resp_formatter(resp)
+ return pprint.pformat(resp.value)
+
+ class All(Command, CountRoutesMixin):
+ command = 'all'
+ help_msg = 'shows all vrfs configurations and summary'
+
+ def action(self, params):
+ vrf_confs = self.api.get_vrfs_conf()
+ view = ConfDictView(vrf_confs)
+ encoded = view.encode()
+ for vrf_key, conf in encoded.iteritems():
+ vrf_name, vrf_rf = vrf_key
+ conf['routes_count'] = self._count_routes(
+ vrf_name,
+ vrf_rf
+ )
+
+ encoded = {str(k): v for k, v in encoded.iteritems()}
+ return CommandsResponse(
+ STATUS_OK,
+ encoded
+ )
+
+ def _count_routes(self, vrf_name, vrf_rf):
+ return len(self.api.get_single_vrf_routes(vrf_name, vrf_rf))
+
+
+class Vrf(Routes):
+ """Main node for vrf related commands. Acts also as Routes node (that's why
+ it inherits from it) for legacy reasons.
+ """
+ help_msg = 'vrf related commands subtree'
+ command = 'vrf'
+
+ def __init__(self, *args, **kwargs):
+ super(Vrf, self).__init__(*args, **kwargs)
+ self.subcommands.update({
+ 'routes': Routes,
+ 'summary': Summary
+ })
diff --git a/ryu/services/protocols/bgp/operator/internal_api.py b/ryu/services/protocols/bgp/operator/internal_api.py
new file mode 100644
index 00000000..fa307b8f
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/internal_api.py
@@ -0,0 +1,157 @@
+import logging
+import traceback
+
+from ryu.services.protocols.bgp.protocols.bgp import nlri
+from ryu.services.protocols.bgp.protocols.bgp.pathattr import AsPath
+from ryu.services.protocols.bgp.protocols.bgp.pathattr import Med
+from ryu.services.protocols.bgp.base import add_bgp_error_metadata
+from ryu.services.protocols.bgp.base import BGPSException
+from ryu.services.protocols.bgp.base import SUPPORTED_GLOBAL_RF
+from ryu.services.protocols.bgp.core_manager import CORE_MANAGER
+
+
+LOG = logging.getLogger('bgpspeaker.operator.internal_api')
+
+INTERNAL_API_ERROR = 100
+INTERNAL_API_SUB_ERROR = 101
+
+
+class InternalApi(object):
+
+ def __init__(self, log_handler=None):
+ self.log_handler = log_handler
+
+ def count_all_vrf_routes(self):
+ vrf_tables = self._get_vrf_tables()
+ ret = {}
+ for key in vrf_tables.keys():
+ vrf_name, vrf_rf = key
+ ret.update(self.count_single_vrf_routes(vrf_name, vrf_rf))
+ return ret
+
+ def count_single_vrf_routes(self, vrf_name, vrf_rf):
+ vrf = self._get_vrf_tables().get((vrf_name, vrf_rf))
+ if vrf is None:
+ raise WrongParamError('wrong vpn key %s' % str((vrf_name, vrf_rf)))
+ vrf_name = vrf_name.encode('ascii', 'ignore')
+
+ route_count = \
+ len([d for d in vrf.itervalues() if d.best_path])
+ return {str((vrf_name, vrf_rf)): route_count}
+
+ def get_vrfs_conf(self):
+ return CORE_MANAGER.vrfs_conf.vrfs_by_rd_rf_id
+
+ def get_all_vrf_routes(self):
+ vrfs = self._get_vrf_tables()
+ ret = {}
+ for (vrf_id, vrf_rf), table in sorted(vrfs.iteritems()):
+ ret[str((vrf_id, vrf_rf))] = self._get_single_vrf_routes(table)
+ return ret
+
+ def get_single_vrf_routes(self, vrf_id, vrf_rf):
+ vrf = self._get_vrf_table(vrf_id, vrf_rf)
+ if not vrf:
+ raise WrongParamError('wrong vpn name %s' % str((vrf_id, vrf_rf)))
+ return [self._dst_to_dict(d) for d in vrf.itervalues()]
+
+ def _get_single_vrf_routes(self, vrf_table):
+ return [self._dst_to_dict(d) for d in vrf_table.itervalues()]
+
+ def _get_vrf_table(self, vrf_name, vrf_rf):
+ return CORE_MANAGER.get_core_service()\
+ .table_manager.get_vrf_table(vrf_name, vrf_rf)
+
+ def _get_vrf_tables(self):
+ return CORE_MANAGER.get_core_service().table_manager.get_vrf_tables()
+
+ def get_single_rib_routes(self, addr_family):
+ rfs = {
+ 'vpnv4': nlri.get_rf(1, 128),
+ 'vpnv6': nlri.get_rf(2, 128),
+ 'rtfilter': nlri.get_rf(1, 132)
+ }
+ if addr_family not in rfs:
+ raise WrongParamError('Unknown or unsupported family')
+
+ rf = rfs.get(addr_family)
+ table_manager = self.get_core_service().table_manager
+ gtable = table_manager.get_global_table_by_route_family(rf)
+ if gtable is not None:
+ return [self._dst_to_dict(dst)
+ for dst in sorted(gtable.itervalues())]
+ else:
+ return []
+
+ def _dst_to_dict(self, dst):
+ ret = {'paths': [],
+ 'prefix': dst.nlri.formatted_nlri_str}
+
+ def _path_to_dict(dst, path):
+ aspath = path.get_pattr(AsPath.ATTR_NAME).path_seg_list
+ if aspath is None or len(aspath) == 0:
+ aspath = ''
+
+ nexthop = path.nexthop
+ # Get the MED path attribute
+ med = path.get_pattr(Med.ATTR_NAME)
+ med = med.value if med else ''
+ # Get best path reason
+ bpr = dst.best_path_reason if path == dst.best_path else ''
+ return {'best': (path == dst.best_path),
+ 'bpr': bpr,
+ 'prefix': path.nlri.formatted_nlri_str,
+ 'nexthop': nexthop,
+ 'metric': med,
+ 'aspath': aspath}
+
+ for path in dst.known_path_list:
+ ret['paths'].append(_path_to_dict(dst, path))
+
+ return ret
+
+ def check_logging(self):
+ if self.log_handler and self._has_log_handler(self.log_handler):
+ return True
+ else:
+ return False
+
+ def check_logging_level(self):
+ return logging.getLevelName(self.log_handler.level)
+
+ def _has_log_handler(self, log_handler):
+ if log_handler in logging.getLogger('bgpspeaker').handlers:
+ return True
+ return False
+
+ def route_refresh(self, peer_ip=None, afi=None, safi=None):
+ if not peer_ip:
+ peer_ip = 'all'
+
+ try:
+ route_families = []
+ if afi is None and safi is None:
+ route_families.extend(SUPPORTED_GLOBAL_RF)
+ else:
+ route_family = nlri.get_rf(afi, safi)
+ if (route_family not in SUPPORTED_GLOBAL_RF):
+ raise WrongParamError('Not supported address-family'
+ ' %s, %s' % (afi, safi))
+ route_families.append(route_family)
+
+ pm = CORE_MANAGER.get_core_service().peer_manager
+ pm.make_route_refresh_request(peer_ip, *route_families)
+ except Exception as e:
+ LOG.error(traceback.format_exc())
+ raise WrongParamError(str(e))
+ return None
+
+ def get_core_service(self):
+ return CORE_MANAGER.get_core_service()
+
+
+@add_bgp_error_metadata(code=INTERNAL_API_ERROR,
+ sub_code=INTERNAL_API_SUB_ERROR,
+ def_desc='Unknown internal api exception.')
+class WrongParamError(BGPSException):
+ pass
diff --git a/ryu/services/protocols/bgp/operator/views/__init__.py b/ryu/services/protocols/bgp/operator/views/__init__.py
new file mode 100644
index 00000000..e8589ffc
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/views/__init__.py
@@ -0,0 +1 @@
+__author__ = 'yak'
diff --git a/ryu/services/protocols/bgp/operator/views/base.py b/ryu/services/protocols/bgp/operator/views/base.py
new file mode 100644
index 00000000..bf62732f
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/views/base.py
@@ -0,0 +1,302 @@
+"""
+This module's purpose is to enable us to present internals of objects
+in well-defined way to operator. To do this we can define "views"
+on some objects. View is a definition of how to present object
+and relations to other objects which also have their views defined.
+
+By using views we can avoid making all interesting internal values
+public. They will stay private and only "view" will access them
+(think friend-class from C++)
+"""
+import logging
+import types
+
+from ryu.services.protocols.bgp.operator.views import fields
+
+LOG = logging.getLogger('bgpspeaker.operator.views.base')
+
+
+class RdyToFlattenCollection(object):
+ pass
+
+
+class RdyToFlattenList(list, RdyToFlattenCollection):
+ pass
+
+
+class RdyToFlattenDict(dict, RdyToFlattenCollection):
+ pass
+
+
+class OperatorAbstractView(object):
+ """Abstract base class for operator views. It isn't meant to be
+ instantiated.
+ """
+
+ def __init__(self, obj, filter_func=None):
+ """Init
+
+ :param obj: data model for view. In other words object we
+ are creating view for. In case of ListView it should be
+ a list and in case of DictView it should be a dict.
+ :param filter_func: function to filter models
+ """
+ self._filter_func = filter_func
+ self._fields = self._collect_fields()
+ self._obj = obj
+
+ @classmethod
+ def _collect_fields(cls):
+ names = [attr for attr in dir(cls)
+ if isinstance(getattr(cls, attr), fields.Field)]
+ return {name: getattr(cls, name) for name in names}
+
+ def combine_related(self, field_name):
+ """Combines related views. In case of DetailView it just returns
+ one-element list containing related view wrapped in
+ CombinedViewsWrapper.
+
+ In case of ListView and DictView it returns a list of related views
+ for every element of model collection also wrapped
+ in CombinedViewsWrapper.
+
+ :param field_name: field name of related view
+ :returns: vectorized form of related views. You can access them
+ as if you had only one view and you will receive flattened list
+ of responses from related views. Look at docstring of
+ CombinedViewsWrapper
+ """
+ raise NotImplementedError()
+
+ def c_rel(self, *args, **kwargs):
+ """Shortcut for combine_related. Look above
+ """
+ return self.combine_related(*args, **kwargs)
+
+ def get_field(self, field_name):
+ """Get value of data field.
+
+ :return: value of data-field of this view
+ """
+ raise NotImplementedError()
+
+ def encode(self):
+ """Representation of view which is using only python standard types.
+
+ :return: dict representation of this views data. However it
+ doesn't have to be a dict. In case of ListView it would
+ return a list. It should return wrapped types
+ for list - RdyToFlattenList, for dict - RdyToFlattenDict
+ """
+ raise NotImplementedError()
+
+ @property
+ def model(self):
+ """Getter for data model being presented by this view. Every view is
+ associatetd with some data model.
+
+ :return: underlaying data of this view
+ """
+ raise NotImplementedError()
+
+ def apply_filter(self, filter_func):
+ """Sets filter function to apply on model
+
+ :param filter_func: function which takes the model and returns it
+ filtered
+ """
+ self._filter_func = filter_func
+
+ def clear_filter(self):
+ self._filter_func = None
+
+
+class OperatorDetailView(OperatorAbstractView):
+ def combine_related(self, field_name):
+ f = self._fields[field_name]
+ return CombinedViewsWrapper([f.retrieve_and_wrap(self._obj)])
+
+ def get_field(self, field_name):
+ f = self._fields[field_name]
+ return f.get(self._obj)
+
+ def encode(self):
+ return {field_name: field.get(self._obj)
+ for field_name, field in self._fields.iteritems()
+ if isinstance(field, fields.DataField)}
+
+ def rel(self, field_name):
+ f = self._fields[field_name]
+ return f.retrieve_and_wrap(self._obj)
+
+ @property
+ def model(self):
+ return self._obj
+
+
+class OperatorListView(OperatorAbstractView):
+ def __init__(self, obj, filter_func=None):
+ assert isinstance(obj, list)
+ obj = RdyToFlattenList(obj)
+ super(OperatorListView, self).__init__(obj, filter_func)
+
+ def combine_related(self, field_name):
+ f = self._fields[field_name]
+ return CombinedViewsWrapper(RdyToFlattenList(
+ map(lambda obj: f.retrieve_and_wrap(obj), self.model)
+ ))
+
+ def get_field(self, field_name):
+ f = self._fields[field_name]
+ return RdyToFlattenList([f.get(obj) for obj in self.model])
+
+ def encode(self):
+ return RdyToFlattenList(
+ [{field_name: field.get(obj)
+ for field_name, field in self._fields.iteritems()
+ if isinstance(field, fields.DataField)}
+ for obj in self.model]
+ )
+
+ @property
+ def model(self):
+ if self._filter_func is not None:
+ return RdyToFlattenList(filter(self._filter_func, self._obj))
+ else:
+ return self._obj
+
+
+class OperatorDictView(OperatorAbstractView):
+ def __init__(self, obj, filter_func=None):
+ assert isinstance(obj, dict)
+ obj = RdyToFlattenDict(obj)
+ super(OperatorDictView, self).__init__(obj, filter_func)
+
+ def combine_related(self, field_name):
+ f = self._fields[field_name]
+ return CombinedViewsWrapper(RdyToFlattenList(
+ map(lambda obj: f.retrieve_and_wrap(obj), self.model.itervalues()))
+ )
+
+ def get_field(self, field_name):
+ f = self._fields[field_name]
+ return RdyToFlattenDict(
+ {key: f.get(obj) for key, obj in self.model.iteritems()}
+ )
+
+ def encode(self):
+ return RdyToFlattenDict(
+ {key: {field_name: field.get(obj)
+ for field_name, field in self._fields.iteritems()
+ if isinstance(field, fields.DataField)}
+ for key, obj in self.model.iteritems()}
+ )
+
+ @property
+ def model(self):
+ if self._filter_func is not None:
+ new_model = RdyToFlattenDict()
+ for k, v in self._obj.iteritems():
+ if self._filter_func(k, v):
+ new_model[k] = v
+ return new_model
+ else:
+ return self._obj
+
+
+class CombinedViewsWrapper(RdyToFlattenList):
+ """List-like wrapper for views. It provides same interface as any other
+ views but enables as to access all views in bulk.
+ It wraps and return responses from all views as a list. Be aware that
+ in case of DictViews wrapped in CombinedViewsWrapper you loose
+ information about dict keys.
+ """
+
+ def __init__(self, obj):
+ super(CombinedViewsWrapper, self).__init__(obj)
+ self._obj = obj
+
+ def combine_related(self, field_name):
+ return CombinedViewsWrapper(
+ list(_flatten(
+ [obj.combine_related(field_name) for obj in self._obj]
+ ))
+ )
+
+ def c_rel(self, *args, **kwargs):
+ return self.combine_related(*args, **kwargs)
+
+ def encode(self):
+ return list(_flatten([obj.encode() for obj in self._obj]))
+
+ def get_field(self, field_name):
+ return list(_flatten([obj.get_field(field_name) for obj in self._obj]))
+
+ @property
+ def model(self):
+ return list(_flatten([obj.model for obj in self._obj]))
+
+ def apply_filter(self, filter_func):
+ for obj in self._obj:
+ obj.apply_filter(filter_func)
+
+ def clear_filter(self):
+ for obj in self._obj:
+ obj.clear_filter()
+
+
+def _flatten(l, max_level=10):
+ """Generator function going deep in tree-like structures
+ (i.e. dicts in dicts or lists in lists etc.) and returning all elements as
+ a flat list. It's flattening only lists and dicts which are subclasses of
+ RdyToFlattenCollection. Regular lists and dicts are treated as a
+ single items.
+
+ :param l: some iterable to be flattened
+ :return: flattened iterator
+ """
+ if max_level >= 0:
+ _iter = l.values() if isinstance(l, types.DictType) else l
+ for el in _iter:
+ if isinstance(el, RdyToFlattenCollection):
+ for sub in _flatten(el, max_level=max_level - 1):
+ yield sub
+ else:
+ yield el
+ else:
+ yield l
+
+
+def _create_collection_view(detail_view_class, name, encode=None,
+ view_class=None):
+ assert issubclass(detail_view_class, OperatorDetailView)
+ class_fields = detail_view_class._collect_fields()
+ if encode is not None:
+ class_fields.update({'encode': encode})
+ return type(name, (view_class,), class_fields)
+
+
+# function creating ListView from DetailView
+def create_dict_view_class(detail_view_class, name):
+ encode = None
+ if 'encode' in dir(detail_view_class):
+ def encode(self):
+ return RdyToFlattenDict({key: detail_view_class(obj).encode()
+ for key, obj in self.model.iteritems()})
+
+ return _create_collection_view(
+ detail_view_class, name, encode, OperatorDictView
+ )
+
+
+# function creating DictView from DetailView
+def create_list_view_class(detail_view_class, name):
+ encode = None
+ if 'encode' in dir(detail_view_class):
+ def encode(self):
+ return RdyToFlattenList([detail_view_class(obj).encode()
+ for obj in self.model])
+
+ return _create_collection_view(
+ detail_view_class, name, encode, OperatorListView
+ )
diff --git a/ryu/services/protocols/bgp/operator/views/bgp.py b/ryu/services/protocols/bgp/operator/views/bgp.py
new file mode 100644
index 00000000..6f53d4f4
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/views/bgp.py
@@ -0,0 +1,273 @@
+from ryu.services.protocols.bgp.operator.views.base import \
+ create_dict_view_class
+from ryu.services.protocols.bgp.operator.views.base import \
+ create_list_view_class
+from ryu.services.protocols.bgp.operator.views.base import OperatorDetailView
+from ryu.services.protocols.bgp.operator.views import fields
+
+from ryu.services.protocols.bgp.protocols.bgp import pathattr
+
+
+class CoreServiceDetailView(OperatorDetailView):
+ rf_state = fields.RelatedViewField(
+ 'rf_state',
+ 'bgpspeaker.operator.views.bgp.RfStateDetailView'
+ )
+ importmap_manager = fields.RelatedDictViewField(
+ '_importmap_manager',
+ 'bgpspeaker.operator.views.other.ImportmapManagerDetailView'
+ )
+ table_manager = fields.RelatedViewField(
+ '_table_manager',
+ 'bgpspeaker.operator.views.bgp.TableManagerDetailView'
+ )
+ peer_manager = fields.RelatedViewField(
+ '_peer_manager',
+ 'bgpspeaker.operator.views.bgp.PeerManagerDetailView'
+ )
+ router_id = fields.DataField('router_id')
+
+
+class TableManagerDetailView(OperatorDetailView):
+ tables = fields.RelatedDictViewField(
+ '_tables',
+ 'bgpspeaker.operator.views.bgp.TableDictView'
+ )
+ tables_for_rt = fields.RelatedDictViewField(
+ '_tables_for_rt',
+ 'bgpspeaker.operator.views.bgp.TableDictView'
+ )
+ global_tables = fields.RelatedDictViewField(
+ '_global_tables',
+ 'bgpspeaker.operator.views.bgp.TableDictView'
+ )
+ asbr_label_range = fields.DataField('_asbr_label_range')
+ next_hop_label = fields.DataField('_next_hop_label')
+ next_vpnv4_label = fields.DataField('_next_vpnv4_label')
+
+
+class PeerManagerDetailView(OperatorDetailView):
+ peers = fields.RelatedListViewField(
+ '_peers',
+ 'bgpspeaker.operator.views.bgp.PeerDictView'
+ )
+ peers_summary = fields.RelatedListViewField(
+ '_peers',
+ 'bgpspeaker.operator.views.bgp.PeerDictSummaryView'
+ )
+
+
+class RfStateDetailView(OperatorDetailView):
+ pass
+
+
+class PeerStateDetailView(OperatorDetailView):
+ bgp_state = fields.DataField('_bgp_state')
+ last_error = fields.DataField('_last_bgp_error')
+
+ def encode(self):
+ ret = super(PeerStateDetailView, self).encode()
+ ret.update(self._obj.get_stats_summary_dict())
+ return ret
+
+
+class PeerDetailView(OperatorDetailView):
+ remote_as = fields.DataField('remote_as')
+ ip_address = fields.DataField('ip_address')
+ enabled = fields.DataField('enabled')
+ neigh_conf = fields.RelatedViewField(
+ '_neigh_conf',
+ 'bgpspeaker.operator.views.conf.ConfDetailView'
+ )
+ common_conf = fields.RelatedViewField(
+ '_common_conf',
+ 'bgpspeaker.operator.views.conf.ConfDetailView'
+ )
+ state = fields.RelatedViewField(
+ 'state',
+ 'bgpspeaker.operator.views.bgp.PeerStateDetailView'
+ )
+
+ def encode(self):
+ ret = super(PeerDetailView, self).encode()
+ ret.update({
+ 'stats': self.rel('state').encode(),
+ 'settings': self.rel('neigh_conf').encode()
+ })
+ return ret
+
+
+class PeerDetailSummaryView(PeerDetailView):
+ def encode(self):
+ return {
+ 'conf': self.rel('neigh_conf').encode(),
+ 'info': self.rel('state').encode()
+ }
+
+
+class PeerRfDetailView(OperatorDetailView):
+ rf = fields.DataField('rf')
+ enabled = fields.DataField('enabled')
+ peer = fields.RelatedViewField(
+ 'peer',
+ 'bgpspeaker.operator.views.bgp.PeerDetailView'
+ )
+
+
+class TableDetailView(OperatorDetailView):
+ scope_id = fields.DataField('scope_id')
+ route_family = fields.DataField('route_family')
+ destinations = fields.RelatedDictViewField(
+ '_destinations',
+ 'bgpspeaker.operator.views.bgp.DestinationDictView'
+ )
+
+
+class PathDetailView(OperatorDetailView):
+ source_version_num = fields.DataField('source_version_num')
+ route_family = fields.RelatedViewField(
+ 'route_family', 'bgpspeaker.operator.views.bgp.RouteFamilyView'
+ )
+ nlri = fields.RelatedViewField(
+ 'nlri',
+ 'bgpspeaker.operator.views.bgp.NlriDetailView'
+ )
+ is_withdraw = fields.DataField('is_withdraw')
+ nexthop = fields.DataField('nexthop')
+ pathattr_map = fields.DataField('pathattr_map')
+ source = fields.RelatedViewField(
+ 'source',
+ 'bgpspeaker.operator.views.bgp.PeerDetailView'
+ )
+
+ def encode(self):
+ ret = super(PathDetailView, self).encode()
+ ret['nlri'] = self.rel('nlri').encode()
+ ret['route_family'] = self.rel('route_family').encode()
+ as_path = self.get_field('pathattr_map').get(pathattr.AsPath.ATTR_NAME)
+ origin = self.get_field('pathattr_map').get(pathattr.Origin.ATTR_NAME)
+ metric = self.get_field('pathattr_map').get(pathattr.Med.ATTR_NAME)
+ local_pref = self.get_field('pathattr_map').get(
+ pathattr.LocalPref.ATTR_NAME
+ )
+
+ ret['as_path'] = as_path.value if as_path else None
+ ret['origin'] = origin.value if origin else None
+ ret['metric'] = metric.value if metric else None
+ ret['local_pref'] = local_pref.value if local_pref else None
+ ext = ret['pathattr_map'].get(pathattr.ExtCommunity.ATTR_NAME)
+ del ret['pathattr_map']
+ if ext:
+ ret['rt_list'] = ext.rt_list
+ ret['soo_list'] = ext.soo_list
+ return ret
+
+
+class SentRouteDetailView(OperatorDetailView):
+ path = fields.RelatedViewField(
+ 'path',
+ 'bgpspeaker.operator.views.bgp.PathDetailView',
+ )
+ peer = fields.RelatedViewField(
+ '_sent_peer',
+ 'bgpspeaker.operator.views.bgp.PeerDetailView'
+ )
+
+
+class DestinationDetailView(OperatorDetailView):
+ table = fields.RelatedViewField(
+ '_table',
+ 'bgpspeaker.operator.views.bgp.TableDetailView',
+ )
+ best_path = fields.RelatedViewField(
+ 'best_path',
+ 'bgpspeaker.operator.views.bgp.PathDetailView'
+ )
+ known_path_list = fields.RelatedListViewField(
+ 'known_path_list',
+ 'bgpspeaker.operator.views.bgp.PathListView'
+ )
+ new_path_list = fields.RelatedListViewField(
+ '_new_path_list',
+ 'bgpspeaker.operator.views.bgp.PathListView'
+ )
+ withdraw_list = fields.RelatedListViewField(
+ '_withdraw_list',
+ 'bgpspeaker.operator.views.bgp.PathListView'
+ )
+ sent_routes = fields.RelatedListViewField(
+ 'sent_routes',
+ 'bgpspeaker.operator.views.bgp.SentRouteListView'
+ )
+ nlri = fields.DataField('nlri')
+ route_family = fields.DataField('route_family')
+
+
+class IpNlriDetailView(OperatorDetailView):
+ formatted_nlri = fields.DataField('formatted_nlri_str')
+ prefix = fields.DataField('prefix')
+
+
+class VpnNlriDetailView(IpNlriDetailView):
+ labels = fields.DataField('label_list')
+ rd = fields.DataField('route_disc')
+
+
+class NlriDetailView(OperatorDetailView):
+ def __new__(cls, obj, filter_func=None):
+ from ryu.services.protocols.bgp.protocols.bgp.nlri import Vpnv4, Vpnv6
+ from ryu.services.protocols.bgp.protocols.bgp.nlri import Ipv4, Ipv6
+ if isinstance(obj, (Vpnv4, Vpnv6)):
+ return VpnNlriDetailView(obj)
+ elif isinstance(obj, (Ipv4, Ipv6)):
+ return IpNlriDetailView(obj)
+ else:
+ return OperatorDetailView(obj, filter_func)
+
+ def encode(self):
+ return self._obj.formatted_nlri_str
+
+
+class RouteFamilyView(OperatorDetailView):
+ afi = fields.DataField('afi')
+ safi = fields.DataField('safi')
+
+##################################################################
+# Declarations of list and dict views based on detail views above
+##################################################################
+PeerListView = create_list_view_class(PeerDetailView, 'PeerListView')
+PeerDictView = create_dict_view_class(PeerDetailView, 'PeerDictView')
+
+PeerListSummaryView = create_list_view_class(
+ PeerDetailSummaryView,
+ 'PeerListSummaryView'
+)
+
+PeerDictSummaryView = create_dict_view_class(
+ PeerDetailSummaryView,
+ 'PeerDictSummaryView'
+)
+
+TableDictView = create_dict_view_class(TableDetailView, 'TableDictView')
+
+
+DestinationListView = create_list_view_class(
+ DestinationDetailView, 'DestinationListView'
+)
+
+DestinationDictView = create_dict_view_class(
+ DestinationDetailView, 'DestinationDictView'
+)
+
+PathListView = create_list_view_class(PathDetailView, 'PathListView')
+PathDictView = create_dict_view_class(PathDetailView, 'PathDictView')
+
+SentRouteListView = create_list_view_class(
+ SentRouteDetailView,
+ 'SentRouteListView'
+)
+
+SentRouteDictView = create_dict_view_class(
+ SentRouteDetailView,
+ 'SentRouteDictView'
+)
diff --git a/ryu/services/protocols/bgp/operator/views/conf.py b/ryu/services/protocols/bgp/operator/views/conf.py
new file mode 100644
index 00000000..f348fca0
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/views/conf.py
@@ -0,0 +1,14 @@
+from ryu.services.protocols.bgp.operator.views.base import \
+ create_dict_view_class
+from ryu.services.protocols.bgp.operator.views.base import OperatorDetailView
+from ryu.services.protocols.bgp.operator.views import fields
+
+
+class ConfDetailView(OperatorDetailView):
+ settings = fields.DataField('_settings')
+
+ def encode(self):
+ return self.get_field('settings')
+
+
+ConfDictView = create_dict_view_class(ConfDetailView, 'ConfDictView')
diff --git a/ryu/services/protocols/bgp/operator/views/fields.py b/ryu/services/protocols/bgp/operator/views/fields.py
new file mode 100644
index 00000000..ad219549
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/views/fields.py
@@ -0,0 +1,69 @@
+import importlib
+import inspect
+
+
+class Field(object):
+ def __init__(self, field_name):
+ self.field_name = field_name
+
+ def get(self, obj):
+ return getattr(obj, self.field_name)
+
+
+class RelatedViewField(Field):
+ def __init__(self, field_name, operator_view_class):
+ super(RelatedViewField, self).__init__(field_name)
+ self.__operator_view_class = operator_view_class
+
+ @property
+ def _operator_view_class(self):
+ if inspect.isclass(self.__operator_view_class):
+ return self.__operator_view_class
+ elif isinstance(self.__operator_view_class, basestring):
+ try:
+ module_name, class_name =\
+ self.__operator_view_class.rsplit('.', 1)
+ return class_for_name(module_name, class_name)
+ except (AttributeError, ValueError, ImportError):
+ raise WrongOperatorViewClassError(
+ 'There is no "%s" class' % self.__operator_view_class
+ )
+
+ def retrieve_and_wrap(self, obj):
+ related_obj = self.get(obj)
+ return self.wrap(related_obj)
+
+ def wrap(self, obj):
+ return self._operator_view_class(obj)
+
+
+class RelatedListViewField(RelatedViewField):
+ pass
+
+
+class RelatedDictViewField(RelatedViewField):
+ pass
+
+
+class DataField(Field):
+ pass
+
+
+class OptionalDataField(DataField):
+ def get(self, obj):
+ if hasattr(obj, self.field_name):
+ return getattr(obj, self.field_name)
+ else:
+ return None
+
+
+class WrongOperatorViewClassError(Exception):
+ pass
+
+
+def class_for_name(module_name, class_name):
+ # load the module, will raise ImportError if module cannot be loaded
+ m = importlib.import_module(module_name)
+ # get the class, will raise AttributeError if class cannot be found
+ c = getattr(m, class_name)
+ return c
diff --git a/ryu/services/protocols/bgp/operator/views/other.py b/ryu/services/protocols/bgp/operator/views/other.py
new file mode 100644
index 00000000..4ffec432
--- /dev/null
+++ b/ryu/services/protocols/bgp/operator/views/other.py
@@ -0,0 +1,34 @@
+from ryu.services.protocols.bgp.operator.views.base import \
+ create_dict_view_class
+from ryu.services.protocols.bgp.operator.views.base import OperatorDetailView
+from ryu.services.protocols.bgp.operator.views import fields
+
+
+class ImportmapManagerDetailView(OperatorDetailView):
+ importmaps = fields.RelatedDictViewField(
+ '_import_maps_by_name',
+ 'bgpspeaker.operator.views.other.ImportmapDictView'
+ )
+
+
+class ImportmapDetailView(OperatorDetailView):
+ nlri = fields.OptionalDataField('_nlri')
+ rt = fields.OptionalDataField('_rt')
+
+ def encode(self):
+ ret = {}
+ nlri = self.get_field('nlri')
+ if nlri is not None:
+ ret.update({'nlri': nlri})
+
+ rt = self.get_field('rt')
+ if rt is not None:
+ ret.update({'rt': rt})
+
+ return ret
+
+
+ImportmapDictView = create_dict_view_class(
+ ImportmapDetailView,
+ 'ImportmapDictView'
+)
diff --git a/ryu/services/protocols/bgp/peer.py b/ryu/services/protocols/bgp/peer.py
new file mode 100644
index 00000000..87838b39
--- /dev/null
+++ b/ryu/services/protocols/bgp/peer.py
@@ -0,0 +1,1481 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ BGP peer related classes and utils.
+"""
+from collections import namedtuple
+import logging
+import socket
+import time
+import traceback
+
+from ryu.services.protocols.bgp.base import Activity
+from ryu.services.protocols.bgp.base import OrderedDict
+from ryu.services.protocols.bgp.base import Sink
+from ryu.services.protocols.bgp.base import Source
+from ryu.services.protocols.bgp.base import SUPPORTED_GLOBAL_RF
+from ryu.services.protocols.bgp import constants as const
+from ryu.services.protocols.bgp.model import OutgoingRoute
+from ryu.services.protocols.bgp.model import SentRoute
+from ryu.services.protocols.bgp.net_ctrl import NET_CONTROLLER
+from ryu.services.protocols.bgp.rtconf.neighbors import NeighborConfListener
+from ryu.services.protocols.bgp.signals.emit import BgpSignalBus
+from ryu.services.protocols.bgp.speaker import BgpProtocol
+from ryu.services.protocols.bgp.utils import bgp as bgp_utils
+from ryu.services.protocols.bgp.utils.evtlet import EventletIOFactory
+from ryu.services.protocols.bgp.utils import stats
+from ryu.services.protocols.bgp.protocols.bgp import exceptions
+from ryu.services.protocols.bgp.protocols.bgp import messages
+from ryu.services.protocols.bgp.protocols.bgp import nlri
+from ryu.services.protocols.bgp.protocols.bgp import pathattr
+
+
+LOG = logging.getLogger('bgpspeaker.peer')
+
+
+def is_valid_state(state):
+ """Returns True if given state is a valid bgp finite state machine state.
+ """
+ return state in const.BGP_FSM_VALID_STATES
+
+
+class PeerRf(object):
+ """State maintained per-RouteFamily for a Peer."""
+
+ def __init__(self, peer, route_family, enabled=False):
+ assert peer and route_family
+
+ self.enabled = enabled
+
+ # Back pointers.
+ self.peer = peer
+ self.rf = route_family
+
+
+PeerCounterNames = namedtuple(
+ 'PeerCounterNames',
+ ('RECV_PREFIXES',
+ 'RECV_UPDATES',
+ 'SENT_UPDATES',
+ 'RECV_NOTIFICATION',
+ 'SENT_NOTIFICATION',
+ 'SENT_REFRESH',
+ 'RECV_REFRESH',
+ 'FSM_ESTB_TRANSITIONS')
+)(
+ 'recv_prefixes',
+ 'recv_updates',
+ 'sent_updates',
+ 'recv_notification',
+ 'sent_notification',
+ 'sent_refresh',
+ 'recv_refresh',
+ 'fms_established_transitions'
+)
+
+
+class PeerState(object):
+ """A BGP neighbor state. Think of this class as of information and stats
+ container for Peer.
+ """
+
+ def __init__(self, peer, signal_bus):
+ # Back pointer to peer whose stats this instances represents.
+ self.peer = peer
+ # Current state of BGP finite state machine.
+ self._bgp_state = const.BGP_FSM_IDLE
+ self._established_time = 0
+ self._last_bgp_error = None
+ self.counters = {
+ 'recv_prefixes': 0,
+ 'recv_updates': 0,
+ 'sent_updates': 0,
+ 'recv_notification': 0,
+ 'sent_notification': 0,
+ 'sent_refresh': 0,
+ 'recv_refresh': 0,
+ 'fms_established_transitions': 0,
+ }
+ self._signal_bus = signal_bus
+
+ #TODO(JK): refactor other counters to use signals also
+ self._signal_bus.register_listener(
+ ('error', 'bgp', self.peer),
+ self._remember_last_bgp_error
+ )
+
+ self._signal_bus.register_listener(
+ BgpSignalBus.BGP_NOTIFICATION_RECEIVED + (self.peer,),
+ lambda _, msg: self.incr(PeerCounterNames.RECV_NOTIFICATION)
+ )
+
+ self._signal_bus.register_listener(
+ BgpSignalBus.BGP_NOTIFICATION_SENT + (self.peer,),
+ lambda _, msg: self.incr(PeerCounterNames.SENT_NOTIFICATION)
+ )
+
+ def _remember_last_bgp_error(self, identifier, data):
+ self._last_bgp_error = {k: v for k, v in data.iteritems()
+ if k != 'peer'}
+
+ @property
+ def recv_prefix(self):
+ # Number of prefixes received from peer.
+ return self.counters[PeerCounterNames.RECV_PREFIXES]
+
+ @property
+ def bgp_state(self):
+ return self._bgp_state
+
+ @bgp_state.setter
+ def bgp_state(self, new_state):
+ old_state = self._bgp_state
+ if old_state == new_state:
+ return
+
+ self._bgp_state = new_state
+ NET_CONTROLLER.send_rpc_notification(
+ 'neighbor.state',
+ {
+ 'ip_address': self.peer.ip_address,
+ 'state': new_state
+ }
+ )
+
+ # transition to Established from another state
+ if new_state == const.BGP_FSM_ESTABLISHED:
+ self.incr(PeerCounterNames.FSM_ESTB_TRANSITIONS)
+ self._established_time = time.time()
+ NET_CONTROLLER.send_rpc_notification(
+ 'neighbor.up', {'ip_address': self.peer.ip_address}
+ )
+ # transition from Established to another state
+ elif old_state == const.BGP_FSM_ESTABLISHED:
+ self._established_time = 0
+ NET_CONTROLLER.send_rpc_notification(
+ 'neighbor.down', {'ip_address': self.peer.ip_address}
+ )
+
+ LOG.debug('Peer %s BGP FSM went from %s to %s' %
+ (self.peer.ip_address, old_state, self.bgp_state))
+
+ def incr(self, counter_name, incr_by=1):
+ if counter_name not in self.counters:
+ raise ValueError('Un-recognized counter name: %s' % counter_name)
+ counter = self.counters.setdefault(counter_name, 0)
+ counter += incr_by
+ self.counters[counter_name] = counter
+
+ def get_count(self, counter_name):
+ if counter_name not in self.counters:
+ raise ValueError('Un-recognized counter name: %s' % counter_name)
+ return self.counters.get(counter_name, 0)
+
+ @property
+ def total_msg_sent(self):
+ """Returns total number of UPDATE, NOTIFICATION and ROUTE_REFRESH
+ message sent to this peer.
+ """
+ return (self.get_count(PeerCounterNames.SENT_REFRESH) +
+ self.get_count(PeerCounterNames.SENT_UPDATES))
+
+ @property
+ def total_msg_recv(self):
+ """Returns total number of UPDATE, NOTIFCATION and ROUTE_REFRESH
+ messages received from this peer.
+ """
+ return (self.get_count(PeerCounterNames.RECV_UPDATES) +
+ self.get_count(PeerCounterNames.RECV_REFRESH) +
+ self.get_count(PeerCounterNames.RECV_NOTIFICATION))
+
+ def get_stats_summary_dict(self):
+ """Returns basic stats.
+
+ Returns a `dict` with various counts and stats, see below.
+ """
+ uptime = time.time() - self._established_time \
+ if self._established_time != 0 else -1
+ return {
+ stats.UPDATE_MSG_IN: self.get_count(PeerCounterNames.RECV_UPDATES),
+ stats.UPDATE_MSG_OUT: self.get_count(
+ PeerCounterNames.SENT_UPDATES
+ ),
+ stats.TOTAL_MSG_IN: self.total_msg_recv,
+ stats.TOTAL_MSG_OUT: self.total_msg_sent,
+ stats.FMS_EST_TRANS: self.get_count(
+ PeerCounterNames.FSM_ESTB_TRANSITIONS
+ ),
+ stats.UPTIME: uptime
+ }
+
+
+class Peer(Source, Sink, NeighborConfListener, Activity):
+ """A BGP neighbor/peer.
+
+ Listens on neighbor configuration changes and handles change events
+ appropriately. If peering is enabled tries 'actively'/'pro-actively' to
+ establish session with peer. Allows binding of `BgpProtocol` instances to
+ allow 'passive'/'reactive' establishment of bgp session with peer.
+ Maintains BGP state machine (may not be fully compliant with RFC). Handles
+ bgp UPDATE messages. Provides a queue to send update message to peer.
+ """
+
+ RTC_EOR_TIMER_NAME = 'RTC_EOR_Timer'
+
+ def __init__(self, common_conf, neigh_conf,
+ core_service, signal_bus, peer_manager):
+ peer_activity_name = 'Peer: %s' % neigh_conf.ip_address
+ Activity.__init__(self, name=peer_activity_name)
+ Source.__init__(self, version_num=1)
+ Sink.__init__(self)
+ # Add listener for configuration changes.
+ NeighborConfListener.__init__(self, neigh_conf)
+
+ # Current configuration of this peer.
+ self._neigh_conf = neigh_conf
+ self._common_conf = common_conf
+ self._core_service = core_service
+ self._signal_bus = signal_bus
+ self._peer_manager = peer_manager
+
+ # TODO(PH): revisit maintaining state/stats information.
+ # Peer state.
+ self.state = PeerState(self, self._signal_bus)
+ self._periodic_stats_logger = \
+ self._create_timer('Peer State Summary Stats Timer',
+ stats.log,
+ stats_resource=self._neigh_conf,
+ stats_source=self.state.get_stats_summary_dict)
+ if self._neigh_conf.stats_log_enabled:
+ self._periodic_stats_logger.start(self._neigh_conf.stats_time)
+
+ # State per route family, {RouteFamily: PeerRf,}.
+ self.rf_state = {}
+ # Get vpnv4 route family settings.
+ prf = PeerRf(self, nlri.RF_IPv4_VPN,
+ enabled=self._neigh_conf.cap_mbgp_vpnv4)
+ self.rf_state[nlri.RF_IPv4_VPN] = prf
+ # Get vpnv6 route family settings.
+ prf = PeerRf(self, nlri.RF_IPv6_VPN, self._neigh_conf.cap_mbgp_vpnv6)
+ self.rf_state[nlri.RF_IPv6_VPN] = prf
+
+ # Bound protocol instance
+ self._protocol = None
+
+ # Setting this event starts the connect_loop loop again
+ # Clearing this event will stop the connect_loop loop
+ self._connect_retry_event = EventletIOFactory.create_custom_event()
+
+ # Reference to threads related to enhanced refresh timers.
+ self._refresh_stalepath_timer = None
+ self._refresh_max_eor_timer = None
+
+ # Latest valid Open Message
+ self.curr_open_msg = None
+
+ # RTC end-of-rib timer
+ self._rtc_eor_timer = None
+ self._sent_init_non_rtc_update = False
+ self._init_rtc_nlri_path = []
+
+ @property
+ def remote_as(self):
+ return self._neigh_conf.remote_as
+
+ @property
+ def rtc_as(self):
+ return self._neigh_conf.rtc_as
+
+ @property
+ def ip_address(self):
+ return self._neigh_conf.ip_address
+
+ @property
+ def enabled(self):
+ return self._neigh_conf.enabled
+
+ @property
+ def med(self):
+ return self._neigh_conf.multi_exit_disc
+
+ def is_mpbgp_cap_valid(self, route_family):
+ if not self.in_established:
+ raise ValueError('Invalid request: Peer not in established state')
+ return self._protocol.is_mbgp_cap_valid(route_family)
+
+ def is_ebgp_peer(self):
+ """Returns *True* if this is a eBGP peer, else *False*."""
+ return self._common_conf.local_as != self._neigh_conf.remote_as
+
+ def in_established(self):
+ return self.state.bgp_state == const.BGP_FSM_ESTABLISHED
+
+ def in_idle(self):
+ return self.state.bgp_state == const.BGP_FSM_IDLE
+
+ def in_active(self):
+ return self.state.bgp_state == const.BGP_FSM_ACTIVE
+
+ def in_open_sent(self):
+ return self.state.bgp_state == const.BGP_FSM_OPEN_SENT
+
+ def in_open_confirm(self):
+ return self.state.bgp_state == const.BGP_FSM_OPEN_CONFIRM
+
+ def in_connect(self):
+ return self.state.bgp_state == const.BGP_FSM_CONNECT
+
+ def curr_fms_state(self):
+ return self.state.bgp_state
+
+ def is_mbgp_cap_valid(self, route_family):
+ if not self.in_established():
+ return False
+
+ return self._protocol.is_mbgp_cap_valid(route_family)
+
+ def on_chg_stats_time_conf_with_stats(self, evt):
+ # TODO(PH): provide implementation when updating neighbor is needed
+ pass
+
+ def on_chg_stats_enabled_conf_with_stats(self, evt):
+ # TODO(PH): provide implementation when updating neighbor is needed
+ pass
+
+ def on_update_enabled(self, conf_evt):
+ """Implements neighbor configuration change listener.
+ """
+ enabled = conf_evt.value
+ # If we do not have any protocol bound and configuration asks us to
+ # enable this peer, we try to establish connection again.
+ LOG.debug('Peer %s configuration update event, enabled: %s.' %
+ (self, enabled))
+ if enabled:
+ if self._protocol:
+ LOG.error('Tried to enable neighbor that is already enabled')
+ else:
+ # Restart connect loop if not already running.
+ if not self._connect_retry_event.is_set():
+ self._connect_retry_event.set()
+ LOG.debug('Starting connect loop as neighbor is enabled.')
+ else:
+ if self._protocol:
+ # Stopping protocol will eventually trigger connection_lost
+ # handler which will do some clean-up.
+ # But the greenlet that is in charge of the socket may be kill
+ # when we stop the protocol, hence we call connection_lost
+ # here as we triggered socket to close.
+ self._protocol.send_notification(
+ exceptions.AdminShutdown.CODE,
+ exceptions.AdminShutdown.SUB_CODE
+ )
+ self._protocol.stop()
+ # If this peer is not enabled any-more we stop trying to make any
+ # connection.
+ LOG.debug('Disabling connect-retry as neighbor was disabled (%s)' %
+ (not enabled))
+ self._connect_retry_event.clear()
+
+ def on_update_med(self, conf_evt):
+ LOG.debug('on_update_med fired')
+ if self._protocol is not None and self._protocol.started:
+ negotiated_afs = self._protocol.negotiated_afs
+ for af in negotiated_afs:
+ self._fire_route_refresh(af)
+
+ def __str__(self):
+ return 'Peer(ip: %s, asn: %s)' % (self._neigh_conf.ip_address,
+ self._neigh_conf.remote_as)
+
+ def _run(self, client_factory):
+ LOG.debug('Started peer %s' % self)
+ # Start sink processing in a separate thread
+ self._spawn('peer.process_outgoing', self._process_outgoing_msg_list)
+
+ # Tries actively to establish session.
+ self._connect_loop(client_factory)
+
+ def _send_outgoing_route_refresh_msg(self, rr_msg):
+ """Sends given message `rr_msg` to peer.
+
+ Parameters:
+ - rr_msg: (RouteRefresh) route refresh message to send to peer.
+
+ Update appropriate counters and set appropriate timers.
+ """
+ assert rr_msg.TYPE_CODE == messages.RouteRefresh.TYPE_CODE
+ self._protocol.send(rr_msg)
+ LOG.debug('RouteRefresh %s>> %s' %
+ (self._neigh_conf.ip_address, rr_msg))
+ # Collect update statistics for sent refresh request.
+ if rr_msg.demarcation == 0:
+ self.state.incr(PeerCounterNames.SENT_REFRESH)
+ # If SOR is sent, we set Max. EOR timer if needed.
+ elif (rr_msg.demarcation == 1 and
+ self._common_conf.refresh_max_eor_time != 0):
+ eor_timer = self._common_conf.refresh_max_eor_time
+ # Set timer to send EOR demarcation.
+ self._spawn_after('end-of-rib-timer', eor_timer,
+ self._enqueue_eor_msg, rr_msg)
+ LOG.debug('Enhanced RR max. EOR timer set.')
+
+ def _send_outgoing_route(self, outgoing_route):
+ """Constructs `Update` message from given `outgoing_route` and sends
+ it to peer.
+
+ Also, checks if any policies prevent sending this message.
+ Populates Adj-RIB-out with corresponding `SentRoute`.
+ """
+ # TODO(PH): optimized by sending several prefixes per update.
+ # Construct and send update message.
+ update_msg = self._construct_update(outgoing_route)
+ self._protocol.send(update_msg)
+ # Collect update statistics.
+ self.state.incr(PeerCounterNames.SENT_UPDATES)
+
+ # We have to create sent_route for every OutgoingRoute which is
+ # not a withdraw or was for route-refresh msg.
+ if (not outgoing_route.path.is_withdraw and
+ not outgoing_route.for_route_refresh):
+ # Update the destination with new sent route.
+ sent_route = SentRoute(outgoing_route.path, self)
+ tm = self._core_service.table_manager
+ tm.remember_sent_route(sent_route)
+
+ def _process_outgoing_msg_list(self):
+ while True:
+ outgoing_msg = None
+
+ if self._protocol is not None:
+ # We pick the first outgoing msg. available and send it.
+ outgoing_msg = self.outgoing_msg_list.pop_first()
+
+ # If we do not have any outgoing route, we wait.
+ if outgoing_msg is None:
+ self.outgoing_msg_event.clear()
+ self.outgoing_msg_event.wait()
+ continue
+
+ # Check currently supported out-going msgs.
+ assert isinstance(
+ outgoing_msg,
+ (messages.RouteRefresh, messages.Update, OutgoingRoute)
+ ), ('Peer cannot process object: %s in its outgoing queue'
+ % outgoing_msg)
+
+ # Send msg. to peer.
+ if isinstance(outgoing_msg, messages.RouteRefresh):
+ self._send_outgoing_route_refresh_msg(outgoing_msg)
+ elif isinstance(outgoing_msg, OutgoingRoute):
+ self._send_outgoing_route(outgoing_msg)
+
+ # EOR are enqueued as plain Update messages.
+ elif isinstance(outgoing_msg, messages.Update):
+ self._protocol.send(outgoing_msg)
+ LOG.debug('Update %s>> %s' % (self._neigh_conf.ip_address,
+ outgoing_msg))
+ self.state.incr(PeerCounterNames.SENT_UPDATES)
+
+ def request_route_refresh(self, *route_families):
+ """Request route refresh to peer for given `route_families`.
+
+ If no `route_families` are given, we make request for all supported
+ route families with this peer.
+ Parameters:
+ - `route_families`: list of route families to request route
+ refresh for.
+
+ If this peer is currently not in Established state, we raise exception.
+ If any of the `route_families` are invalid we raise exception.
+ """
+ # If this peer has not established session yet
+ if not self.in_established:
+ raise ValueError('Peer not in established state to satisfy'
+ ' this request.')
+
+ skip_validation = False
+ # If request is made for all supported route_families for current
+ # session, we collect all route_families for valid for current session.
+ if len(route_families) == 0:
+ route_families = []
+ # We skip validation of route families that we collect ourselves
+ # below.
+ skip_validation = True
+ for route_family in SUPPORTED_GLOBAL_RF:
+ if self.is_mbgp_cap_valid(route_family):
+ route_families.append(route_family)
+
+ for route_family in route_families:
+ if (skip_validation or
+ ((route_family in SUPPORTED_GLOBAL_RF) and
+ # We ignore request for route_family not valid
+ # for current session.
+ self._protocol.is_mbgp_cap_valid(route_family))):
+ rr_req = messages.RouteRefresh(route_family)
+ self.enque_outgoing_msg(rr_req)
+ LOG.debug('Enqueued Route Refresh message to '
+ 'peer %s for rf: %s' % (self, route_family))
+
+ def enque_end_of_rib(self, route_family):
+ pattr_map = {}
+ # MP_UNREACH_NLRI Attribute.
+ mpunreach_attr = pathattr.MpUnreachNlri(route_family, [])
+ pattr_map[pathattr.MpUnreachNlri.ATTR_NAME] = mpunreach_attr
+ update = messages.Update(pathattr_map=pattr_map)
+ self.enque_outgoing_msg(update)
+
+ def _session_next_hop(self, route_family):
+ """Returns nexthop address relevant to current session
+
+ Nexthop used can depend on capabilities of the session. If VPNv6
+ capability is active and session is on IPv4 connection, we have to use
+ IPv4 mapped IPv6 address. In other cases we can use connection end
+ point/local ip address.
+ """
+ # By default we use BGPS's interface IP with this peer as next_hop.
+ next_hop = self._neigh_conf.host_bind_ip
+ if route_family == nlri.RF_IPv6_VPN:
+ # Next hop ipv4_mapped ipv6
+ next_hop = nlri.ipv4_mapped_ipv6(next_hop)
+
+ return next_hop
+
+ def _construct_update(self, outgoing_route):
+ """Construct update message with Outgoing-routes path attribute
+ appropriately cloned/copied/updated.
+ """
+ # TODO(PH): Investigate how this case needs to be handled for iBGP.
+ update = None
+ path = outgoing_route.path
+ # Get copy of path's path attributes.
+ pathattr_map = path.pathattr_map
+ new_pathattr_map = OrderedDict()
+
+ # If this is withdraw update we copy MpUnReach path-attribute and
+ # create new Update message.
+ if path.is_withdraw:
+ # MP_UNREACH_NLRI Attribute.
+ mpunreach_attr = pathattr.MpUnreachNlri(
+ path.route_family, [path.nlri]
+ )
+ new_pathattr_map[pathattr.MpUnreachNlri.ATTR_NAME] = mpunreach_attr
+ else:
+ # Supported and un-supported/unknown attributes.
+ origin_attr = None
+ aspath_attr = None
+ mpnlri_attr = None
+ extcomm_attr = None
+ community_attr = None
+ localpref_attr = None
+ unkown_opttrans_attrs = None
+
+ # MP_REACH_NLRI Attribute.
+ # By default we use BGPS's interface IP with this peer as next_hop.
+ # TODO(PH): change to use protocol's local address.
+ # next_hop = self._neigh_conf.host_bind_ip
+ next_hop = self._session_next_hop(path.route_family)
+ # If this is a iBGP peer.
+ if not self.is_ebgp_peer() and path.source is not None:
+ # If the path came from a bgp peer and not from NC, according
+ # to RFC 4271 we should not modify next_hop.
+ next_hop = path.nexthop
+ # We construct mpreach-nlri attribute.
+ mpnlri_attr = pathattr.MpReachNlri(
+ path.route_family, next_hop, [path.nlri]
+ )
+
+ # ORIGIN Attribute.
+ # According to RFC this attribute value SHOULD NOT be changed by
+ # any other speaker.
+ origin_attr = pathattr_map.get(pathattr.Origin.ATTR_NAME)
+ assert origin_attr, 'Missing ORIGIN mandatory attribute.'
+
+ # AS_PATH Attribute.
+ # Construct AS-path-attr using paths aspath attr. with local AS as
+ # first item.
+ path_aspath = pathattr_map.get(pathattr.AsPath.ATTR_NAME)
+ assert path_aspath, 'Missing AS_PATH mandatory attribute.'
+ # Deep copy aspath_attr value
+ path_seg_list = path_aspath.path_seg_list
+ # If this is a iBGP peer.
+ if not self.is_ebgp_peer():
+ # When a given BGP speaker advertises the route to an internal
+ # peer, the advertising speaker SHALL NOT modify the AS_PATH
+ # attribute associated with the route.
+ aspath_attr = pathattr.AsPath(path_seg_list)
+ else:
+ # When a given BGP speaker advertises the route to an external
+ # peer, the advertising speaker updates the AS_PATH attribute
+ # as follows:
+ # 1) if the first path segment of the AS_PATH is of type
+ # AS_SEQUENCE, the local system prepends its own AS num as
+ # the last element of the sequence (put it in the left-most
+ # position with respect to the position of octets in the
+ # protocol message). If the act of prepending will cause an
+ # overflow in the AS_PATH segment (i.e., more than 255
+ # ASes), it SHOULD prepend a new segment of type AS_SEQUENCE
+ # and prepend its own AS number to this new segment.
+ #
+ # 2) if the first path segment of the AS_PATH is of type AS_SET
+ # , the local system prepends a new path segment of type
+ # AS_SEQUENCE to the AS_PATH, including its own AS number in
+ # that segment.
+ #
+ # 3) if the AS_PATH is empty, the local system creates a path
+ # segment of type AS_SEQUENCE, places its own AS into that
+ # segment, and places that segment into the AS_PATH.
+ if (len(path_seg_list) > 0 and
+ isinstance(path_seg_list[0], list) and
+ len(path_seg_list[0]) < 255):
+ path_seg_list[0].insert(0, self._core_service.asn)
+ else:
+ path_seg_list.insert(0, [self._core_service.asn])
+ aspath_attr = pathattr.AsPath(path_seg_list)
+
+ # MULTI_EXIT_DISC Attribute.
+ # For eBGP session we can send multi-exit-disc if configured.
+ multi_exit_disc = None
+ if self.is_ebgp_peer():
+ multi_exit_disc = pathattr_map.get(pathattr.Med.ATTR_NAME)
+
+ # LOCAL_PREF Attribute.
+ if not self.is_ebgp_peer():
+ # For iBGP peers we are required to send local-pref attribute
+ # for connected or local prefixes. We send default local-pref.
+ localpref_attr = pathattr.LocalPref(100)
+
+ # COMMUNITY Attribute.
+ community_attr = pathattr_map.get(pathattr.Community.ATTR_NAME)
+
+ # EXTENDED COMMUNITY Attribute.
+ # Construct ExtCommunity path-attr based on given.
+ path_extcomm_attr = pathattr_map.get(
+ pathattr.ExtCommunity.ATTR_NAME
+ )
+ if path_extcomm_attr:
+ # SOO list can be configured per VRF and/or per Neighbor.
+ # NeighborConf has this setting we add this to existing list.
+ soo_list = path_extcomm_attr.soo_list
+ if self._neigh_conf.soo_list:
+ soo_list.extend(self._neigh_conf.soo_list)
+ extcomm_attr = pathattr.ExtCommunity(
+ path_extcomm_attr.rt_list,
+ soo_list
+ )
+
+ # UNKOWN Attributes.
+ # Get optional transitive path attributes
+ unkown_opttrans_attrs = bgp_utils.get_unknow_opttrans_attr(path)
+
+ # Ordering path attributes according to type as RFC says. We set
+ # MPReachNLRI first as advised by experts as a new trend in BGP
+ # implementation.
+ new_pathattr_map[pathattr.MpReachNlri.ATTR_NAME] = mpnlri_attr
+ new_pathattr_map[pathattr.Origin.ATTR_NAME] = origin_attr
+ new_pathattr_map[pathattr.AsPath.ATTR_NAME] = aspath_attr
+ if multi_exit_disc:
+ new_pathattr_map[pathattr.Med.ATTR_NAME] = multi_exit_disc
+ if localpref_attr:
+ new_pathattr_map[pathattr.LocalPref.ATTR_NAME] = localpref_attr
+ if community_attr:
+ new_pathattr_map[pathattr.Community.ATTR_NAME] = community_attr
+ if extcomm_attr:
+ new_pathattr_map[pathattr.ExtCommunity.ATTR_NAME] =\
+ extcomm_attr
+ if unkown_opttrans_attrs:
+ new_pathattr_map.update(unkown_opttrans_attrs)
+
+ update = messages.Update(pathattr_map=new_pathattr_map)
+ return update
+
+ def _connect_loop(self, client_factory):
+ """In the current greeenlet we try to establish connection with peer.
+
+ This greenlet will spin another greenlet to handle incoming data
+ from the peer once connection is established.
+ """
+ # If current configuration allow, enable active session establishment.
+ if self._neigh_conf.enabled:
+ self._connect_retry_event.set()
+
+ while 1:
+ self._connect_retry_event.wait()
+
+ # Reconnecting immediately after closing connection may be not very
+ # well seen by some peers (ALU?)
+ self.pause(1.0)
+ if self.state.bgp_state in \
+ (const.BGP_FSM_IDLE, const.BGP_FSM_ACTIVE):
+
+ # Check if we have to stop or retry
+ self.state.bgp_state = const.BGP_FSM_CONNECT
+ # If we have specific host interface to bind to, we will do so
+ # else we will bind to system default.
+ # Use current values.
+ bind_addr = (self._neigh_conf.host_bind_ip,
+ self._neigh_conf.host_bind_port)
+ peer_address = (self._neigh_conf.ip_address,
+ const.STD_BGP_SERVER_PORT_NUM)
+
+ LOG.debug('%s trying to connect to %s' % (self, peer_address))
+ tcp_conn_timeout = self._common_conf.tcp_conn_timeout
+ try:
+ self._connect_tcp(peer_address,
+ client_factory,
+ time_out=tcp_conn_timeout,
+ bind_address=bind_addr)
+ except socket.error:
+ self.state.bgp_state = const.BGP_FSM_ACTIVE
+ LOG.debug('Socket could not be created in time (%s secs),'
+ ' reason %s' % (tcp_conn_timeout,
+ traceback.format_exc()))
+ LOG.info('Will try to reconnect to %s after %s secs: %s' %
+ (self._neigh_conf.ip_address,
+ self._common_conf.bgp_conn_retry_time,
+ self._connect_retry_event.is_set()))
+
+ self.pause(self._common_conf.bgp_conn_retry_time)
+
+ def _set_protocol(self, proto):
+ self._protocol = proto
+
+ # Update state attributes
+ self.state.peer_ip, self.state.peer_port = \
+ self._protocol.get_peername()
+ self.state.local_ip, self.state.local_port = \
+ self._protocol.get_sockname()
+# self.state.bgp_state = self._protocol.state
+ # Stop connect_loop retry timer as we are now connected
+ if self._protocol and self._connect_retry_event.is_set():
+ self._connect_retry_event.clear()
+ LOG.debug('Connect retry event for %s is now set: %s' %
+ (self, self._connect_retry_event.is_set()))
+
+ if self._protocol and self.outgoing_msg_event.is_set():
+ # Start processing sink.
+ self.outgoing_msg_event.set()
+ LOG.debug('Processing of outgoing msg. started for %s.' % self)
+
+ def _send_collision_err_and_stop(self, protocol):
+ code = exceptions.CollisionResolution.CODE
+ subcode = exceptions.CollisionResolution.SUB_CODE
+ self._signal_bus.bgp_error(self, code, subcode, None)
+ protocol.send_notification(code, subcode)
+ protocol.stop()
+
+ def bind_protocol(self, proto):
+ """Tries to bind given protocol to this peer.
+
+ Should only be called by `proto` trying to bind.
+ Once bound this protocol instance will be used to communicate with
+ peer. If another protocol is already bound, connection collision
+ resolution takes place.
+ """
+ LOG.debug('Trying to bind protocol %s to peer %s' % (proto, self))
+ # Validate input.
+ if not isinstance(proto, BgpProtocol):
+ raise ValueError('Currently only supports valid instances of'
+ ' `BgpProtocol`')
+
+ if proto.state != const.BGP_FSM_OPEN_CONFIRM:
+ raise ValueError('Only protocols in OpenConfirm state can be'
+ ' bound')
+
+ # If we are not bound to any protocol
+ is_bound = False
+ if not self._protocol:
+ self._set_protocol(proto)
+ is_bound = True
+ else:
+ # If existing protocol is already established, we raise exception.
+ if self.state.bgp_state != const.BGP_FSM_IDLE:
+ LOG.debug('Currently in %s state, hence will send collision'
+ ' Notification to close this protocol.')
+ self._send_collision_err_and_stop(proto)
+ return
+
+ # If we have a collision that need to be resolved
+ assert proto.is_colliding(self._protocol), \
+ ('Tried to bind second protocol that is not colliding with '
+ 'first/bound protocol')
+ LOG.debug('Currently have one protocol in %s state and '
+ 'another protocol in %s state' %
+ (self._protocol.state, proto.state))
+ # Protocol that is already bound
+ first_protocol = self._protocol
+ assert ((first_protocol.is_reactive and not proto.is_reactive) or
+ (proto.is_reactive and not first_protocol.is_reactive))
+ # Connection initiated by peer.
+ reactive_proto = None
+ # Connection initiated locally.
+ proactive_proto = None
+ # Identify which protocol was initiated by which peer.
+ if proto.is_reactive:
+ reactive_proto = proto
+ proactive_proto = self._protocol
+ else:
+ reactive_proto = self._protocol
+ proactive_proto = proto
+
+ LOG.debug('Pro-active/Active protocol %s' % proactive_proto)
+ # We compare bgp local and remote router id and keep the protocol
+ # that was initiated by peer with highest id.
+ if proto.is_local_router_id_greater():
+ self._set_protocol(proactive_proto)
+ else:
+ self._set_protocol(reactive_proto)
+
+ if self._protocol is not proto:
+ # If new proto did not win collision we return False to
+ # indicate this.
+ is_bound = False
+ else:
+ # If first protocol did not win collision resolution we
+ # we send notification to peer and stop it
+ self._send_collision_err_and_stop(first_protocol)
+ is_bound = True
+
+ return is_bound
+
+ def create_open_msg(self):
+ """Create `Open` message using current settings.
+
+ Current setting include capabilities, timers and ids.
+ """
+ asnum = self._common_conf.local_as
+ bgpid = self._common_conf.router_id
+ holdtime = self._neigh_conf.hold_time
+ open_msg = messages.Open(
+ const.BGP_VERSION_NUM,
+ asnum,
+ holdtime,
+ bgpid,
+ self._neigh_conf.get_configured_capabilites()
+ )
+ return open_msg
+
+ def _validate_update_msg(self, update_msg):
+ """Validate update message as per RFC.
+
+ Here we validate the message after it has been parsed. Message
+ has already been validated against some errors inside parsing
+ library.
+ """
+ # TODO(PH): finish providing implementation, currently low priority
+ assert update_msg.TYPE_CODE == messages.Update.TYPE_CODE
+ # An UPDATE message may be received only in the Established state.
+ # Receiving an UPDATE message in any other state is an error.
+ if self.state.bgp_state != const.BGP_FSM_ESTABLISHED:
+ LOG.error('Received UPDATE message when not in ESTABLISHED'
+ ' state.')
+ raise exceptions.FiniteStateMachineError()
+
+ mp_reach_attr = update_msg.get_path_attr(
+ pathattr.MpReachNlri.ATTR_NAME
+ )
+ mp_unreach_attr = update_msg.get_path_attr(
+ pathattr.MpUnreachNlri.ATTR_NAME
+ )
+ if not (mp_reach_attr or mp_unreach_attr):
+ LOG.error('Received UPDATE msg. with no MpReachNlri or '
+ 'MpUnReachNlri attribute.')
+ raise exceptions.MalformedAttrList()
+
+ # Check if received MP_UNREACH path attribute is of available afi/safi
+ if mp_unreach_attr:
+ if not self.is_mpbgp_cap_valid(mp_unreach_attr.route_family):
+ LOG.error('Got UPDATE message with un-available afi/safi for'
+ ' MP_UNREACH path attribute (non-negotiated'
+ ' afi/safi) %s' % mp_unreach_attr.route_family)
+ # raise exceptions.OptAttrError()
+
+ if mp_reach_attr:
+ # Check if received MP_REACH path attribute is of available
+ # afi/safi
+ if not self.is_mpbgp_cap_valid(mp_reach_attr.route_family):
+ LOG.error('Got UPDATE message with un-available afi/safi for'
+ ' MP_UNREACH path attribute (non-negotiated'
+ ' afi/safi) %s' % mp_reach_attr.route_family)
+ # raise exceptions.OptAttrError()
+
+ # Check for missing well-known mandatory attributes.
+ aspath = update_msg.get_path_attr(pathattr.AsPath.ATTR_NAME)
+ if not aspath:
+ raise exceptions.MissingWellKnown(pathattr.AsPath.TYPE_CODE)
+
+ # We do not have a setting to enable/disable first-as check.
+ # We by default do first-as check below.
+ if (self.is_ebgp_peer() and
+ not aspath.has_matching_leftmost(self.remote_as)):
+ LOG.error('First AS check fails. Raise appropriate exception.')
+ raise exceptions.MalformedAsPath()
+
+ origin = update_msg.get_path_attr(pathattr.Origin.ATTR_NAME)
+ if not origin:
+ raise exceptions.MissingWellKnown(pathattr.Origin.TYPE_CODE)
+
+ # Validate Next hop.
+ # TODO(PH): Currently ignore other cases.
+ if (not mp_reach_attr.next_hop or
+ (mp_reach_attr.next_hop == self._neigh_conf.host_bind_ip)):
+ LOG.error('Nexthop of received UPDATE msg. (%s) same as local'
+ ' interface address %s.' %
+ (mp_reach_attr.next_hop,
+ self._neigh_conf.host_bind_ip))
+ return False
+
+ return True
+
+ def _handle_update_msg(self, update_msg):
+ """Extracts and processes new paths or withdrawals in given
+ `update_msg`.
+
+ Parameter:
+ - `update_msg`: update message to process.
+ - `valid_rts`: current valid/interesting rts to the application
+ according to configuration of all VRFs.
+ Assumes Multiprotocol Extensions capability is supported and enabled.
+ """
+ assert self.state.bgp_state == const.BGP_FSM_ESTABLISHED
+ self.state.incr(PeerCounterNames.RECV_UPDATES)
+ if not self._validate_update_msg(update_msg):
+ # If update message was not valid for some reason, we ignore its
+ # routes.
+ LOG.error('UPDATE message was invalid, hence ignoring its routes.')
+ return
+
+ # Increment count of update received.
+ mp_reach_attr = update_msg.get_path_attr(
+ pathattr.MpReachNlri.ATTR_NAME
+ )
+ mp_unreach_attr = update_msg.get_path_attr(
+ pathattr.MpUnreachNlri.ATTR_NAME
+ )
+ if mp_reach_attr:
+ # Extract advertised paths from given message.
+ self._extract_and_handle_new_paths(update_msg)
+
+ if mp_unreach_attr:
+ # Extract withdraws from given message.
+ self._extract_and_handle_withdraws(mp_unreach_attr)
+
+ def _extract_and_handle_new_paths(self, update_msg):
+ """Extracts new paths advertised in the given update message's
+ *MpReachNlri* attribute.
+
+ Assumes MPBGP capability is enabled and message was validated.
+ Parameters:
+ - update_msg: (Update) is assumed to be checked for all bgp
+ message errors.
+ - valid_rts: (iterable) current valid/configured RTs.
+
+ Extracted paths are added to appropriate *Destination* for further
+ processing.
+ """
+ umsg_pattrs = update_msg.pathattr_map
+ mpreach_nlri_attr = umsg_pattrs.pop(pathattr.MpReachNlri.ATTR_NAME)
+ assert mpreach_nlri_attr
+
+ msg_rf = mpreach_nlri_attr.route_family
+ # Check if this route family is among supported route families.
+ if msg_rf not in SUPPORTED_GLOBAL_RF:
+ LOG.info(('Received route for route family %s which is'
+ ' not supported. Ignoring paths from this UPDATE: %s') %
+ (msg_rf, update_msg))
+ return
+
+ aspath = umsg_pattrs.get(pathattr.AsPath.ATTR_NAME)
+ # Check if AS_PATH has loops.
+ if aspath.has_local_as(self._common_conf.local_as):
+ LOG.error('Update message AS_PATH has loops. Ignoring this'
+ ' UPDATE. %s' % update_msg)
+ return
+
+ if msg_rf in (nlri.RF_IPv4_VPN, nlri.RF_IPv6_UC):
+ # Check if we have Extended Communities Attribute.
+ # TODO(PH): Check if RT_NLRI afi/safi will ever have this attribute
+ ext_comm_attr = umsg_pattrs.get(pathattr.ExtCommunity.ATTR_NAME)
+ # Check if we have at-least one RT is of interest to us.
+ if not ext_comm_attr:
+ LOG.info('Missing Extended Communities Attribute. '
+ 'Ignoring paths from this UPDATE: %s' % update_msg)
+ return
+
+ msg_rts = ext_comm_attr.rt_list
+ # If we do not have any RTs associated with this msg., we do not
+ # extract any paths.
+ if not msg_rts:
+ LOG.info('Received route with no RTs. Ignoring paths in this'
+ ' UPDATE: %s' % update_msg)
+ return
+
+ # If none of the RTs in the message are of interest, we do not
+ # extract any paths.
+ interested_rts = self._core_service.global_interested_rts
+ if not interested_rts.intersection(msg_rts):
+ LOG.info('Received route with RT %s that is of no interest to'
+ ' any VRFs or Peers %s.'
+ ' Ignoring paths from this UPDATE: %s' %
+ (msg_rts, interested_rts, update_msg))
+ return
+
+ next_hop = mpreach_nlri_attr.next_hop
+ # Nothing to do if we do not have any new NLRIs in this message.
+ msg_nlri_list = mpreach_nlri_attr.nlri_list
+ if not msg_nlri_list:
+ LOG.debug('Update message did not have any new MP_REACH_NLRIs.')
+ return
+
+ # Create path instances for each NLRI from the update message.
+ for msg_nlri in msg_nlri_list:
+ new_path = bgp_utils.create_path(
+ self,
+ msg_nlri,
+ pattrs=umsg_pattrs,
+ nexthop=next_hop
+ )
+ LOG.debug('Extracted paths from Update msg.: %s' % new_path)
+ if msg_rf == nlri.RF_RTC_UC \
+ and self._init_rtc_nlri_path is not None:
+ self._init_rtc_nlri_path.append(new_path)
+ else:
+ # Update appropriate table with new paths.
+ tm = self._core_service.table_manager
+ tm.learn_path(new_path)
+
+ # If update message had any qualifying new paths, do some book-keeping.
+ if msg_nlri_list:
+ # Update prefix statistics.
+ self.state.incr(PeerCounterNames.RECV_PREFIXES,
+ incr_by=len(msg_nlri_list))
+ # Check if we exceed max. prefixes allowed for this neighbor.
+ if self._neigh_conf.exceeds_max_prefix_allowed(
+ self.state.get_count(PeerCounterNames.RECV_PREFIXES)):
+ LOG.error('Max. prefix allowed for this neighbor '
+ 'exceeded.')
+
+ def _extract_and_handle_withdraws(self, mp_unreach_attr):
+ """Extracts withdraws advertised in the given update message's
+ *MpUnReachNlri* attribute.
+
+ Assumes MPBGP capability is enabled.
+ Parameters:
+ - update_msg: (Update) is assumed to be checked for all bgp
+ message errors.
+
+ Extracted withdraws are added to appropriate *Destination* for further
+ processing.
+ """
+ msg_rf = mp_unreach_attr.route_family
+ # Check if this route family is among supported route families.
+ if msg_rf not in SUPPORTED_GLOBAL_RF:
+ LOG.info(
+ (
+ 'Received route for route family %s which is'
+ ' not supported. Ignoring withdraws form this UPDATE.'
+ ) % msg_rf
+ )
+ return
+
+ w_nlris = mp_unreach_attr.nlri_list
+ if not w_nlris:
+ # If this is EOR of some kind, handle it
+ self._handle_eor(msg_rf)
+
+ for w_nlri in w_nlris:
+ w_path = bgp_utils.create_path(
+ self,
+ w_nlri,
+ is_withdraw=True
+ )
+ # Update appropriate table with withdraws.
+ tm = self._core_service.table_manager
+ tm.learn_path(w_path)
+
+ def _handle_eor(self, route_family):
+ """Currently we only handle EOR for RTC address-family.
+
+ We send non-rtc initial updates if not already sent.
+ """
+ LOG.debug('Handling EOR for %s' % route_family)
+# assert (route_family in SUPPORTED_GLOBAL_RF)
+# assert self.is_mbgp_cap_valid(route_family)
+
+ if route_family == nlri.RF_RTC_UC:
+ self._unschedule_sending_init_updates()
+
+ # Learn all rt_nlri at the same time As RT are learned and RT
+ # filter get updated, qualifying NLRIs are automatically sent to
+ # peer including initial update
+ tm = self._core_service.table_manager
+ for rt_nlri in self._init_rtc_nlri_path:
+ tm.learn_path(rt_nlri)
+ # Give chance to process new RT_NLRI so that we have updated RT
+ # filter for all peer including this peer before we communicate
+ # NLRIs for other address-families
+ self.pause(0)
+ # Clear collection of initial RTs as we no longer need to wait for
+ # EOR for RT NLRIs and to indicate that new RT NLRIs should be
+ # handled in a regular fashion
+ self._init_rtc_nlri_path = None
+
+ def handle_msg(self, msg):
+ """BGP message handler.
+
+ BGP message handling is shared between protocol instance and peer. Peer
+ only handles limited messages under suitable state. Here we handle
+ KEEPALIVE, UPDATE and ROUTE_REFRESH messages. UPDATE and ROUTE_REFRESH
+ messages are handled only after session is established.
+ """
+ if msg.MSG_NAME == messages.Keepalive.MSG_NAME:
+ # If we receive a Keep Alive message in open_confirm state, we
+ # transition to established state.
+ if self.state.bgp_state == const.BGP_FSM_OPEN_CONFIRM:
+ self.state.bgp_state = const.BGP_FSM_ESTABLISHED
+ self._enqueue_init_updates()
+
+ elif msg.MSG_NAME == messages.Update.MSG_NAME:
+ assert self.state.bgp_state == const.BGP_FSM_ESTABLISHED
+ # Will try to process this UDPATE message further
+ self._handle_update_msg(msg)
+
+ elif msg.MSG_NAME == messages.RouteRefresh.MSG_NAME:
+ # If its route-refresh message
+ assert self.state.bgp_state == const.BGP_FSM_ESTABLISHED
+ self._handle_route_refresh_msg(msg)
+
+ else:
+ # Open/Notification messages are currently handled by protocol and
+ # nothing is done inside peer, so should not see them here.
+ raise ValueError('Peer does not support handling of %s'
+ ' message during % state' %
+ (msg.MSG_NAME, self.state.bgp_state()))
+
+ def _handle_err_sor_msg(self, afi, safi):
+ # Check if ERR capability is enabled for this peer.
+ if not self._protocol.is_enhanced_rr_cap_valid():
+ LOG.error('Received Start-of-RIB (SOR) even though ERR is not'
+ ' enabled')
+ return
+
+ # Increment the version number of this peer so that we can identify
+ # inconsistencies/stale routes.
+ self.version_num += 1
+
+ # Check if refresh_stalepath_time is enabled.
+ rst = self._common_conf.refresh_stalepath_time
+ if rst != 0:
+ # Set a timer to clean the stale paths at configured time.
+ # Clean/track inconsistent/stale routes.
+ route_family = nlri.get_rf(afi, safi)
+ if route_family in SUPPORTED_GLOBAL_RF:
+ self._refresh_stalepath_timer = self._spawn_after(
+ 'err-refresh-stale-path-timer', rst,
+ self._core_service.table_manager.clean_stale_routes, self,
+ route_family)
+ LOG.debug('Refresh Stale Path timer set (%s sec).' % rst)
+
+ def _handle_route_refresh_msg(self, msg):
+ afi = msg.route_family.afi
+ safi = msg.route_family.safi
+ demarcation = msg.demarcation
+
+ # If this normal route-refresh request.
+ if demarcation == 0:
+ self._handle_route_refresh_req(afi, safi)
+
+ # If this is start of RIB (SOR) message.
+ elif demarcation == 1:
+ self._handle_err_sor_msg(afi, safi)
+
+ # If this is end of RIB (EOR) message.
+ elif demarcation == 2:
+ # Clean/track inconsistent/stale routes.
+ route_family = nlri.get_rf(afi, safi)
+ if route_family in SUPPORTED_GLOBAL_RF:
+ tm = self._core_service.table_manager
+ tm.clean_stale_routes(self, route_family)
+
+ else:
+ LOG.error('Route refresh message has invalid demarcation %s' %
+ demarcation)
+
+ def _handle_route_refresh_req(self, afi, safi):
+ rr_af = nlri.get_rf(afi, safi)
+ self.state.incr(PeerCounterNames.RECV_REFRESH)
+
+ # Check if peer has asked for route-refresh for af that was advertised
+ if not self._protocol.is_route_family_adv(rr_af):
+ LOG.info('Peer asked for route - refresh for un - advertised '
+ 'address - family % s' % str(rr_af))
+ return
+
+ self._fire_route_refresh(rr_af)
+
+ def _fire_route_refresh(self, af):
+ # Check if enhanced route refresh is enabled/valid.
+ sor = None
+ if self._protocol.is_enhanced_rr_cap_valid():
+ # If enhanced route-refresh is valid/enabled, enqueue SOR.
+ sor = messages.RouteRefresh(af, demarcation=1)
+ self.enque_first_outgoing_msg(sor)
+
+ # Ask core to re-send sent routes
+ self._peer_manager.resend_sent(af, self)
+
+ # If enhanced route-refresh is valid/enabled, then we enqueue EOR.
+ if sor is not None:
+ self._enqueue_eor_msg(sor)
+
+ def _enqueue_eor_msg(self, sor):
+ """Enqueues Enhanced RR EOR if for given SOR a EOR is not already
+ sent.
+ """
+ if self._protocol.is_enhanced_rr_cap_valid() and not sor.eor_sent:
+ eor = messages.RouteRefresh(sor.route_family, demarcation=2)
+ self.enque_outgoing_msg(eor)
+ sor.eor_sent = True
+
+ def _schedule_sending_init_updates(self):
+ """Setup timer for sending best-paths for all other address-families
+ that qualify.
+
+ Setup timer for sending initial updates to peer.
+ """
+
+ def _enqueue_non_rtc_init_updates():
+ LOG.debug('Scheduled queuing of initial Non-RTC UPDATEs')
+ tm = self._core_service.table_manager
+ self.comm_all_best_paths(tm.global_tables)
+ self._sent_init_non_rtc_update = True
+ # Stop the timer as we have handled RTC EOR
+ self._rtc_eor_timer.stop()
+ self._rtc_eor_timer = None
+
+ self._sent_init_non_rtc_update = False
+ self._rtc_eor_timer = self._create_timer(
+ Peer.RTC_EOR_TIMER_NAME,
+ _enqueue_non_rtc_init_updates
+ )
+ # Start timer for sending initial updates
+ self._rtc_eor_timer.start(const.RTC_EOR_DEFAULT_TIME, now=False)
+ LOG.debug('Scheduled sending of initial Non-RTC UPDATEs after:'
+ ' %s sec' % const.RTC_EOR_DEFAULT_TIME)
+
+ def _unschedule_sending_init_updates(self):
+ """Un-schedules sending of initial updates
+
+ Stops the timer if set for sending initial updates.
+ Returns:
+ - True if timer was stopped
+ - False if timer was already stopped and nothing was done
+ """
+ LOG.debug('Un-scheduling sending of initial Non-RTC UPDATEs'
+ ' (init. UPDATEs already sent: %s)' %
+ self._sent_init_non_rtc_update)
+ if self._rtc_eor_timer:
+ self._rtc_eor_timer.stop()
+ self._rtc_eor_timer = None
+ return True
+ return False
+
+ def _enqueue_init_updates(self):
+ """Enqueues current routes to be shared with this peer."""
+ assert self.state.bgp_state == const.BGP_FSM_ESTABLISHED
+ if self.is_mbgp_cap_valid(nlri.RF_RTC_UC):
+ # Enqueues all best-RTC_NLRIs to be sent as initial update to this
+ # peer.
+ self._peer_manager.comm_all_rt_nlris(self)
+ self._schedule_sending_init_updates()
+ else:
+ # Enqueues all best-path to be sent as initial update to this peer
+ # expect for RTC route-family.
+ tm = self._core_service.table_manager
+ self.comm_all_best_paths(tm.global_tables)
+
+ def comm_all_best_paths(self, global_tables):
+ """Shares/communicates current best paths with this peers.
+
+ Can be used to send initial updates after we have established session
+ with `peer`.
+ """
+ LOG.debug('Communicating current best path for all afi/safi except'
+ ' 1/132')
+ # We will enqueue best path from all global destination.
+ for route_family, table in global_tables.iteritems():
+ if route_family == nlri.RF_RTC_UC:
+ continue
+ if self.is_mbgp_cap_valid(route_family):
+ for dest in table.itervalues():
+ if dest.best_path:
+ self.communicate_path(dest.best_path)
+
+ def communicate_path(self, path):
+ """Communicates `path` to this peer if it qualifies.
+
+ Checks if `path` should be shared/communicated with this peer according
+ to various conditions: like bgp state, transmit side loop, local and
+ remote AS path, community attribute, etc.
+ """
+ LOG.debug('Peer %s asked to communicate path' % self)
+ if not path:
+ raise ValueError('Invalid path %s given.' % path)
+
+ # We do not send anything to peer who is not in established state.
+ if not self.in_established():
+ LOG.debug('Skipping sending path as peer is not in '
+ 'ESTABLISHED state %s' % path)
+ return
+
+ # Check if this session is available for given paths afi/safi
+ path_rf = path.route_family
+ if not self.is_mpbgp_cap_valid(path_rf):
+ LOG.debug('Skipping sending path as %s route family is not'
+ ' available for this session' % path_rf)
+ return
+
+ # If RTC capability is available and path afi/saif is other than RT
+ # nlri
+ if path_rf != nlri.RF_RTC_UC and \
+ self.is_mpbgp_cap_valid(nlri.RF_RTC_UC):
+ rtfilter = self._peer_manager.curr_peer_rtfilter(self)
+ # If peer does not have any rtfilter or if rtfilter does not have
+ # any RTs common with path RTs we do not share this path with the
+ # peer
+ if rtfilter and not path.has_rts_in(rtfilter):
+ LOG.debug('Skipping sending path as rffilter %s and path '
+ 'rts %s have no RT in common' %
+ (rtfilter, path.get_rts()))
+ return
+
+ # Transmit side loop detection: We check if leftmost AS matches
+ # peers AS, if so we do not send UPDATE message to this peer.
+ as_path = path.get_pattr(pathattr.AsPath.ATTR_NAME)
+ if as_path and as_path.has_matching_leftmost(self.remote_as):
+ LOG.debug('Skipping sending path as AS_PATH has peer AS %s' %
+ self.remote_as)
+ return
+
+ if self._neigh_conf.multi_exit_disc:
+ med_attr = path.get_pattr(pathattr.Med.ATTR_NAME)
+ if not med_attr:
+ path = bgp_utils.clone_path_and_update_med_for_target_neighbor(
+ path,
+ self._neigh_conf.multi_exit_disc
+ )
+
+ # For connected/local-prefixes, we send update to all peers.
+ if path.source is None:
+ # Construct OutgoingRoute specific for this peer and put it in
+ # its sink.
+ outgoing_route = OutgoingRoute(path)
+ self.enque_outgoing_msg(outgoing_route)
+
+ # If path from a bgp-peer is new best path, we share it with
+ # all bgp-peers except the source peer and other peers in his AS.
+ # This is default JNOS setting that in JNOS can be disabled with
+ # 'advertise-peer-as' setting.
+ elif (self != path.source or
+ self.remote_as != path.source.remote_as):
+ # When BGP speaker receives an UPDATE message from an internal
+ # peer, the receiving BGP speaker SHALL NOT re-distribute the
+ # routing information contained in that UPDATE message to other
+ # internal peers (unless the speaker acts as a BGP Route
+ # Reflector) [RFC4271].
+ if (self.remote_as == self._core_service.asn and
+ self.remote_as == path.source.remote_as):
+ return
+
+ # If new best path has community attribute, it should be taken into
+ # account when sending UPDATE to peers.
+ comm_attr = path.get_pattr(pathattr.Community.ATTR_NAME)
+ if comm_attr:
+ comm_attr_na = comm_attr.has_comm_attr(
+ pathattr.Community.NO_ADVERTISE
+ )
+ # If we have NO_ADVERTISE attribute present, we do not send
+ # UPDATE to any peers
+ if comm_attr_na:
+ LOG.debug('Path has community attr. NO_ADVERTISE = %s'
+ '. Hence not advertising to peer' %
+ comm_attr_na)
+ return
+
+ comm_attr_ne = comm_attr.has_comm_attr(
+ pathattr.Community.NO_EXPORT
+ )
+ comm_attr_nes = comm_attr.has_comm_attr(
+ pathattr.Community.NO_EXPORT_SUBCONFED)
+ # If NO_EXPORT_SUBCONFED/NO_EXPORT is one of the attribute, we
+ # do not advertise to eBGP peers as we do not have any
+ # confederation feature at this time.
+ if ((comm_attr_nes or comm_attr_ne) and
+ (self.remote_as != self._core_service.asn)):
+ LOG.debug('Skipping sending UPDATE to peer: %s as per '
+ 'community attribute configuration' % self)
+ return
+
+ # Construct OutgoingRoute specific for this peer and put it in
+ # its sink.
+ outgoing_route = OutgoingRoute(path)
+ self.enque_outgoing_msg(outgoing_route)
+ LOG.debug('Enqueued outgoing route %s for peer %s' %
+ (outgoing_route.path.nlri, self))
+
+ def connection_made(self):
+ """Protocols connection established handler
+ """
+ LOG.critical(
+ 'Connection to peer: %s established',
+ self._neigh_conf.ip_address,
+ extra={
+ 'resource_name': self._neigh_conf.name,
+ 'resource_id': self._neigh_conf.id
+ }
+ )
+
+ def connection_lost(self, reason):
+ """Protocols connection lost handler.
+ """
+ LOG.critical(
+ 'Connection to peer %s lost, reason: %s Resetting '
+ 'retry connect loop: %s' %
+ (self._neigh_conf.ip_address, reason,
+ self._connect_retry_event.is_set()),
+ extra={
+ 'resource_name': self._neigh_conf.name,
+ 'resource_id': self._neigh_conf.id
+ }
+ )
+ self.state.bgp_state = const.BGP_FSM_IDLE
+ if self._protocol:
+ self._protocol = None
+ # Create new collection for initial RT NLRIs
+ self._init_rtc_nlri_path = []
+ self._sent_init_non_rtc_update = False
+ # Clear sink.
+ self.clear_outgoing_msg_list()
+ # Un-schedule timers
+ self._unschedule_sending_init_updates()
+
+ # Increment the version number of this source.
+ self.version_num += 1
+ self._peer_manager.on_peer_down(self)
+
+ # Check configuration if neighbor is still enabled, we try
+ # reconnecting.
+ if self._neigh_conf.enabled:
+ if not self._connect_retry_event.is_set():
+ self._connect_retry_event.set()
diff --git a/ryu/services/protocols/bgp/processor.py b/ryu/services/protocols/bgp/processor.py
new file mode 100644
index 00000000..db5ae9bf
--- /dev/null
+++ b/ryu/services/protocols/bgp/processor.py
@@ -0,0 +1,512 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ Module related to processing bgp paths.
+"""
+
+import logging
+
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_RTC_UC
+from ryu.services.protocols.bgp.protocols.bgp.pathattr import AsPath
+from ryu.services.protocols.bgp.protocols.bgp.pathattr import LocalPref
+from ryu.services.protocols.bgp.protocols.bgp.pathattr import Med
+from ryu.services.protocols.bgp.protocols.bgp.pathattr import Origin
+
+from ryu.services.protocols.bgp.base import Activity
+from ryu.services.protocols.bgp.base import add_bgp_error_metadata
+from ryu.services.protocols.bgp.base import BGP_PROCESSOR_ERROR_CODE
+from ryu.services.protocols.bgp.base import BGPSException
+from ryu.services.protocols.bgp.utils import circlist
+from ryu.services.protocols.bgp.utils.evtlet import EventletIOFactory
+
+
+LOG = logging.getLogger('bgpspeaker.processor')
+
+
+@add_bgp_error_metadata(code=BGP_PROCESSOR_ERROR_CODE, sub_code=1,
+ def_desc='Error occurred when processing bgp '
+ 'destination.')
+class BgpProcessorError(BGPSException):
+ """Base exception related to all destination path processing errors.
+ """
+ pass
+
+
+# Disabling known bug in pylint.
+# pylint: disable=R0921
+class BgpProcessor(Activity):
+ """Worker that processes queued `Destination'.
+
+ `Destination` that have updates related to its paths need to be
+ (re)processed. Only one instance of this processor is enough for normal
+ cases. If you want more control on which destinations get processed faster
+ compared to other destinations, you can create several instance of this
+ works to achieve the desired work flow.
+ """
+
+ # Max. number of destinations processed per cycle.
+ MAX_DEST_PROCESSED_PER_CYCLE = 100
+
+ #
+ # DestQueue
+ #
+ # A circular list type in which objects are linked to each
+ # other using the 'next_dest_to_process' and 'prev_dest_to_process'
+ # attributes.
+ #
+ _DestQueue = circlist.CircularListType(
+ next_attr_name='next_dest_to_process',
+ prev_attr_name='prev_dest_to_process')
+
+ def __init__(self, core_service, work_units_per_cycle=None):
+ Activity.__init__(self)
+ # Back pointer to core service instance that created this processor.
+ self._core_service = core_service
+ self._dest_queue = BgpProcessor._DestQueue()
+ self._rtdest_queue = BgpProcessor._DestQueue()
+ self.dest_que_evt = EventletIOFactory.create_custom_event()
+ self.work_units_per_cycle =\
+ work_units_per_cycle or BgpProcessor.MAX_DEST_PROCESSED_PER_CYCLE
+
+ def _run(self, *args, **kwargs):
+ # Sit in tight loop, getting destinations from the queue and processing
+ # one at a time.
+ while True:
+ LOG.debug('Starting new processing run...')
+ # We process all RT destination first so that we get a new RT
+ # filter that apply for each peer
+ self._process_rtdest()
+
+ # We then process a batch of other destinations (we do not process
+ # all destination here as we want to give change to other
+ # greenthread to run)
+ self._process_dest()
+
+ if self._dest_queue.is_empty():
+ # If we have no destinations queued for processing, we wait.
+ self.dest_que_evt.clear()
+ self.dest_que_evt.wait()
+ else:
+ self.pause(0)
+
+ def _process_dest(self):
+ dest_processed = 0
+ LOG.debug('Processing destination...')
+ while (dest_processed < self.work_units_per_cycle and
+ not self._dest_queue.is_empty()):
+ # We process the first destination in the queue.
+ next_dest = self._dest_queue.pop_first()
+ if next_dest:
+ next_dest.process()
+ dest_processed += 1
+
+ def _process_rtdest(self):
+ LOG.debug('Processing RT NLRI destination...')
+ if self._rtdest_queue.is_empty():
+ return
+ else:
+ processed_any = False
+ while not self._rtdest_queue.is_empty():
+ # We process the first destination in the queue.
+ next_dest = self._rtdest_queue.pop_first()
+ if next_dest:
+ next_dest.process()
+ processed_any = True
+
+ if processed_any:
+ # Since RT destination were updated we update RT filters
+ self._core_service.update_rtfilters()
+
+ def enqueue(self, destination):
+ """Enqueues given destination for processing.
+
+ Given instance should be a valid destination.
+ """
+ if not destination:
+ raise BgpProcessorError('Invalid destination %s.' % destination)
+
+ dest_queue = self._dest_queue
+ # RtDest are queued in a separate queue
+ if destination.route_family == RF_RTC_UC:
+ dest_queue = self._rtdest_queue
+
+ # We do not add given destination to the queue for processing if
+ # it is already on the queue.
+ if not dest_queue.is_on_list(destination):
+ dest_queue.append(destination)
+
+ # Wake-up processing thread if sleeping.
+ self.dest_que_evt.set()
+
+#==============================================================================
+# Best path computation related utilities.
+#==============================================================================
+
+# Various reasons a path is chosen as best path.
+BPR_UNKNOWN = 'Unknown'
+BPR_ONLY_PATH = 'Only Path'
+BPR_REACHABLE_NEXT_HOP = 'Reachable Next Hop'
+BPR_HIGHEST_WEIGHT = 'Highest Weight'
+BPR_LOCAL_PREF = 'Local Pref.'
+BPR_LOCAL_ORIGIN = 'Local Origin'
+BPR_ASPATH = 'AS Path'
+BPR_ORIGIN = 'Origin'
+BPR_MED = 'MED'
+BPR_ASN = 'ASN'
+BPR_IGP_COST = 'IGP Cost'
+BPR_ROUTER_ID = 'Router ID'
+
+
+def _compare_by_version(path1, path2):
+ """Returns the current/latest learned path.
+
+ Checks if given paths are from same source/peer and then compares their
+ version number to determine which path is received later. If paths are from
+ different source/peer return None.
+ """
+ if path1.source == path2.source:
+ if path1.source_version_num > path2.source_version_num:
+ return path1
+ else:
+ return path2
+ return None
+
+
+def compute_best_path(local_asn, path1, path2):
+ """Compares given paths and returns best path.
+
+ Parameters:
+ -`local_asn`: asn of local bgpspeaker
+ -`path1`: first path to compare
+ -`path2`: second path to compare
+
+ Best path processing will involve following steps:
+ 1. Select a path with a reachable next hop.
+ 2. Select the path with the highest weight.
+ 3. If path weights are the same, select the path with the highest
+ local preference value.
+ 4. Prefer locally originated routes (network routes, redistributed
+ routes, or aggregated routes) over received routes.
+ 5. Select the route with the shortest AS-path length.
+ 6. If all paths have the same AS-path length, select the path based
+ on origin: IGP is preferred over EGP; EGP is preferred over
+ Incomplete.
+ 7. If the origins are the same, select the path with lowest MED
+ value.
+ 8. If the paths have the same MED values, select the path learned
+ via EBGP over one learned via IBGP.
+ 9. Select the route with the lowest IGP cost to the next hop.
+ 10. Select the route received from the peer with the lowest BGP
+ router ID.
+
+ Returns None if best-path among given paths cannot be computed else best
+ path.
+ Assumes paths from NC has source equal to None.
+ """
+ best_path = None
+ best_path_reason = BPR_UNKNOWN
+
+ # Follow best path calculation algorithm steps.
+ if best_path is None:
+ best_path = _cmp_by_reachable_nh(path1, path2)
+ best_path_reason = BPR_REACHABLE_NEXT_HOP
+ if best_path is None:
+ best_path = _cmp_by_higest_wg(path1, path2)
+ best_path_reason = BPR_HIGHEST_WEIGHT
+ if best_path is None:
+ best_path = _cmp_by_local_pref(path1, path2)
+ best_path_reason = BPR_LOCAL_PREF
+ if best_path is None:
+ best_path = _cmp_by_local_origin(path1, path2)
+ best_path_reason = BPR_LOCAL_ORIGIN
+ if best_path is None:
+ best_path = _cmp_by_aspath(path1, path2)
+ best_path_reason = BPR_ASPATH
+ if best_path is None:
+ best_path = _cmp_by_origin(path1, path2)
+ best_path_reason = BPR_ORIGIN
+ if best_path is None:
+ best_path = _cmp_by_med(path1, path2)
+ best_path_reason = BPR_MED
+ if best_path is None:
+ best_path = _cmp_by_asn(local_asn, path1, path2)
+ best_path_reason = BPR_ASN
+ if best_path is None:
+ best_path = _cmp_by_igp_cost(path1, path2)
+ best_path_reason = BPR_IGP_COST
+ if best_path is None:
+ best_path = _cmp_by_router_id(local_asn, path1, path2)
+ best_path_reason = BPR_ROUTER_ID
+ if best_path is None:
+ best_path_reason = BPR_UNKNOWN
+
+ return (best_path, best_path_reason)
+
+
+def _cmp_by_reachable_nh(path1, path2):
+ """Compares given paths and selects best path based on reachable next-hop.
+
+ If no path matches this criteria, return None.
+ """
+ # TODO(PH): Currently we do not have a way to check if a IGP route to
+ # NEXT_HOP exists from BGPS.
+ return None
+
+
+def _cmp_by_higest_wg(path1, path2):
+ """Selects a path with highest weight.
+
+ Weight is BGPS specific parameter. It is local to the router on which it
+ is configured.
+ Return:
+ None if best path among given paths cannot be decided, else best path.
+ """
+ # TODO(PH): Revisit this when BGPS has concept of policy to be applied to
+ # in-bound NLRIs.
+ return None
+
+
+def _cmp_by_local_pref(path1, path2):
+ """Selects a path with highest local-preference.
+
+ Unlike the weight attribute, which is only relevant to the local
+ router, local preference is an attribute that routers exchange in the
+ same AS. Highest local-pref is preferred. If we cannot decide,
+ we return None.
+ """
+ # TODO(PH): Revisit this when BGPS has concept of policy to be applied to
+ # in-bound NLRIs.
+ # Default local-pref values is 100
+ lp1 = path1.get_pattr(LocalPref.ATTR_NAME)
+ lp2 = path2.get_pattr(LocalPref.ATTR_NAME)
+ if not (lp1 and lp2):
+ return None
+
+ # Highest local-preference value is preferred.
+ lp1 = lp1.value
+ lp2 = lp2.value
+ if lp1 > lp2:
+ return path1
+ elif lp2 > lp1:
+ return path2
+ else:
+ return None
+
+
+def _cmp_by_local_origin(path1, path2):
+ """Select locally originating path as best path.
+
+ Locally originating routes are network routes, redistributed routes,
+ or aggregated routes. For now we are going to prefer routes received
+ through a Flexinet-Peer as locally originating route compared to routes
+ received from a BGP peer.
+ Returns None if given paths have same source.
+ """
+ # If both paths are from same sources we cannot compare them here.
+ if path1.source == path2.source:
+ return None
+
+ # Here we consider prefix from NC as locally originating static route.
+ # Hence it is preferred.
+ if path1.source is None:
+ return path1
+
+ if path2.source is None:
+ return path2
+
+ return None
+
+
+def _cmp_by_aspath(path1, path2):
+ """Calculated the best-paths by comparing as-path lengths.
+
+ Shortest as-path length is preferred. If both path have same lengths,
+ we return None.
+ """
+ as_path1 = path1.get_pattr(AsPath.ATTR_NAME)
+ as_path2 = path2.get_pattr(AsPath.ATTR_NAME)
+ assert as_path1 and as_path2
+ l1 = as_path1.get_as_path_len()
+ l2 = as_path2.get_as_path_len()
+ assert l1 is not None and l2 is not None
+ if l1 > l2:
+ return path2
+ elif l2 > l1:
+ return path1
+ else:
+ return None
+
+
+def _cmp_by_origin(path1, path2):
+ """Select the best path based on origin attribute.
+
+ IGP is preferred over EGP; EGP is preferred over Incomplete.
+ If both paths have same origin, we return None.
+ """
+ def get_origin_pref(origin):
+ if origin.value == Origin.IGP:
+ return 3
+ elif origin.value == Origin.EGP:
+ return 2
+ elif origin.value == Origin.INCOMPLETE:
+ return 1
+ else:
+ LOG.error('Invalid origin value encountered %s.' % origin)
+ return 0
+
+ origin1 = path1.get_pattr(Origin.ATTR_NAME)
+ origin2 = path2.get_pattr(Origin.ATTR_NAME)
+ assert origin1 is not None and origin2 is not None
+
+ # If both paths have same origins
+ if origin1.value == origin2.value:
+ return None
+
+ # Translate origin values to preference.
+ origin1 = get_origin_pref(origin1)
+ origin2 = get_origin_pref(origin2)
+ # Return preferred path.
+ if origin1 == origin2:
+ return None
+ elif origin1 > origin2:
+ return path1
+ return path2
+
+
+def _cmp_by_med(path1, path2):
+ """Select the path based with lowest MED value.
+
+ If both paths have same MED, return None.
+ By default, a route that arrives with no MED value is treated as if it
+ had a MED of 0, the most preferred value.
+ RFC says lower MED is preferred over higher MED value.
+ """
+ def get_path_med(path):
+ med = path.get_pattr(Med.ATTR_NAME)
+ if not med:
+ return 0
+ return med.value
+
+ med1 = get_path_med(path1)
+ med2 = get_path_med(path2)
+
+ if med1 == med2:
+ return None
+ elif med1 < med2:
+ return path1
+ return path2
+
+
+def _cmp_by_asn(local_asn, path1, path2):
+ """Select the path based on source (iBGP/eBGP) peer.
+
+ eBGP path is preferred over iBGP. If both paths are from same kind of
+ peers, return None.
+ """
+ def get_path_source_asn(path):
+ asn = None
+ if path.source is None:
+ asn = local_asn
+ else:
+ asn = path.source.remote_as
+ return asn
+
+ p1_asn = get_path_source_asn(path1)
+ p2_asn = get_path_source_asn(path2)
+ # If path1 is from ibgp peer and path2 is from ebgp peer.
+ if (p1_asn == local_asn) and (p2_asn != local_asn):
+ return path2
+
+ # If path2 is from ibgp peer and path1 is from ebgp peer,
+ if (p2_asn == local_asn) and (p1_asn != local_asn):
+ return path1
+
+ # If both paths are from ebgp or ibpg peers, we cannot decide.
+ return None
+
+
+def _cmp_by_igp_cost(path1, path2):
+ """Select the route with the lowest IGP cost to the next hop.
+
+ Return None if igp cost is same.
+ """
+ # TODO(PH): Currently BGPS has no concept of IGP and IGP cost.
+ return None
+
+
+def _cmp_by_router_id(local_asn, path1, path2):
+ """Select the route received from the peer with the lowest BGP router ID.
+
+ If both paths are eBGP paths, then we do not do any tie breaking, i.e we do
+ not pick best-path based on this criteria.
+ RFC: http://tools.ietf.org/html/rfc5004
+ We pick best path between two iBGP paths as usual.
+ """
+ def get_asn(path_source):
+ if path_source is None:
+ return local_asn
+ else:
+ return path_source.remote_as
+
+ def get_router_id(path_source, local_bgp_id):
+ if path_source is None:
+ return local_bgp_id
+ else:
+ return path_source.protocol.recv_open.bgpid
+
+ path_source1 = path1.source
+ path_source2 = path2.source
+
+ # If both paths are from NC we have same router Id, hence cannot compare.
+ if path_source1 is None and path_source2 is None:
+ return None
+
+ asn1 = get_asn(path_source1)
+ asn2 = get_asn(path_source2)
+
+ is_ebgp1 = asn1 != local_asn
+ is_ebgp2 = asn2 != local_asn
+ # If both paths are from eBGP peers, then according to RFC we need
+ # not tie break using router id.
+ if (is_ebgp1 and is_ebgp2):
+ return None
+
+ if ((is_ebgp1 is True and is_ebgp2 is False) or
+ (is_ebgp1 is False and is_ebgp2 is True)):
+ raise ValueError('This method does not support comparing ebgp with'
+ ' ibgp path')
+
+ # At least one path is not coming from NC, so we get local bgp id.
+ if path_source1 is not None:
+ local_bgp_id = path_source1.protocol.sent_open.bgpid
+ else:
+ local_bgp_id = path_source2.protocol.sent_open.bgpid
+
+ # Get router ids.
+ router_id1 = get_router_id(path_source1, local_bgp_id)
+ router_id2 = get_router_id(path_source2, local_bgp_id)
+
+ # If both router ids are same/equal we cannot decide.
+ # This case is possible since router ids are arbitrary.
+ if router_id1 == router_id2:
+ return None
+
+ # Select the path with lowest router Id.
+ from ryu.services.protocols.bgp.ker.utils.bgp import from_inet_ptoi
+ if (from_inet_ptoi(router_id1) <
+ from_inet_ptoi(router_id2)):
+ return path1
+ else:
+ return path2
diff --git a/ryu/services/protocols/bgp/protocol.py b/ryu/services/protocols/bgp/protocol.py
new file mode 100644
index 00000000..43f45347
--- /dev/null
+++ b/ryu/services/protocols/bgp/protocol.py
@@ -0,0 +1,87 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Module defines protocol based classes and utils.
+"""
+
+from abc import ABCMeta
+from abc import abstractmethod
+
+
+class Protocol(object):
+ """Interface for various protocols.
+
+ Protocol usually encloses a transport/connection/socket to
+ peer/client/server and encodes and decodes communication/messages. Protocol
+ can also maintain any related state machine, protocol message encoding or
+ decoding utilities. This interface identifies minimum methods to support to
+ facilitate or provide hooks to sub-classes to override behavior as
+ appropriate.
+ """
+ __metaclass__ = ABCMeta
+
+ @abstractmethod
+ def data_received(self, data):
+ """Handler for date received over connection/transport.
+
+ Here *data* is in raw bytes. This *data* should further be converted to
+ protocol specific messages and as appropriate transition to new state
+ machine state or send appropriate response.
+ """
+ pass
+
+ @abstractmethod
+ def connection_made(self):
+ """Called when connection has been established according to protocol.
+
+ This is the right place to do some initialization or sending initial
+ hello messages.
+ """
+ pass
+
+ @abstractmethod
+ def connection_lost(self, reason):
+ """Handler called when connection to peer/remote according to protocol
+ has been lost.
+
+ Here we can do any clean-up related to connection/transport/timers/etc.
+ """
+ pass
+
+
+class Factory(object):
+ """This is a factory which produces protocols.
+
+ Can also act as context for protocols.
+ """
+ __metaclass__ = ABCMeta
+
+ # Put a subclass of Protocol here:
+ protocol = None
+
+ @abstractmethod
+ def build_protocol(self, socket):
+ """Create an instance of a subclass of Protocol.
+
+ Override this method to alter how Protocol instances get created.
+ """
+ raise NotImplementedError()
+
+ @abstractmethod
+ def start_protocol(self, socket):
+ """Launch protocol instance to handle input on an incoming connection.
+ """
+ raise NotImplementedError()
diff --git a/ryu/services/protocols/bgp/protocols/bgp/__init__.py b/ryu/services/protocols/bgp/protocols/bgp/__init__.py
new file mode 100644
index 00000000..99c53380
--- /dev/null
+++ b/ryu/services/protocols/bgp/protocols/bgp/__init__.py
@@ -0,0 +1,7 @@
+try:
+ from collections import OrderedDict
+except ImportError:
+ from ordereddict import OrderedDict
+
+# Pointer to active/available OrderedDict.
+OrderedDict = OrderedDict
diff --git a/ryu/services/protocols/bgp/protocols/bgp/capabilities.py b/ryu/services/protocols/bgp/protocols/bgp/capabilities.py
new file mode 100644
index 00000000..ed2acaa9
--- /dev/null
+++ b/ryu/services/protocols/bgp/protocols/bgp/capabilities.py
@@ -0,0 +1,280 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This module provides BGP protocol capabilities classes and utility methods to
+encode and decode them.
+"""
+
+from abc import ABCMeta
+from abc import abstractmethod
+import logging
+import struct
+
+from ryu.services.protocols.bgp.protocols.bgp.exceptions import \
+ MalformedOptionalParam
+from ryu.services.protocols.bgp.protocols.bgp.nlri import get_rf
+from ryu.services.protocols.bgp.protocols.bgp.nlri import \
+ RouteFamily as route_fmly
+
+
+# Logger instance for this module
+LOG = logging.getLogger('bgpspeaker.bgp.proto.capabilities')
+
+# Registry for bgp capability class by their code.
+# <Key>: <Value> - <capability-code>: <capability-class>
+_BGP_CAPABILITY_REGISTRY = {}
+
+
+def _register_bgp_capabilities(cls):
+ """Utility decorator used to register bgp supported/recognized
+ capabilities.
+
+ Capabilities classes are registered by their capability-code.
+ """
+ assert issubclass(cls, Capability)
+ assert hasattr(cls, 'CODE')
+ assert _BGP_CAPABILITY_REGISTRY.get(cls.CODE) is None
+ _BGP_CAPABILITY_REGISTRY[cls.CODE] = cls
+ return cls
+
+
+def is_recognized_cap_codes(cap_code):
+ return cap_code in _BGP_CAPABILITY_REGISTRY
+
+
+def decode(byte_value):
+ """Decodes given `byte_value` into appropriate capabilities.
+
+ Parameter:
+ - `byte_value`: (str) byte representation of one capability
+ advertisement
+ Returns:
+ - list of capabilities decoded from given bytes
+ Note: Different routers pack capability in one capability
+ advertisement/optional parameter or group them into several capability
+ advertisements. Hence we return a list of one or more decoded
+ capabilities.
+ """
+ idx = 0
+ total_len = len(byte_value)
+ caps = []
+ # Parse one of more capabilities packed inside given capability-
+ # advertisement payload
+ while idx < total_len:
+ cap_code, clen = struct.unpack_from('BB', byte_value, idx)
+ idx += 2
+ cap = byte_value[idx:idx + clen]
+ idx += clen
+
+ cap_cls = _BGP_CAPABILITY_REGISTRY.get(cap_code)
+ if cap_cls:
+ cap = cap_cls.from_bytes(cap)
+ caps.append(cap)
+ else:
+ # RFC 5492 says: If a BGP speaker receives from its peer a
+ # capability that it does not itself support or recognize, it MUST
+ # ignore that capability. In particular, the Unsupported
+ # Capability NOTIFICATION message MUST NOT be generated and the BGP
+ # session MUST NOT be terminated in response to reception of a
+ # capability that is not supported by the local speaker.
+ cap = UnSupportedCap(cap_code, cap)
+
+ return caps
+
+
+class Capability(object):
+ """Super class of all bgp capability optional parameters.
+ """
+ __metaclass__ = ABCMeta
+ CODE = -1
+ NAME = 'abstract-cap'
+
+ @abstractmethod
+ def packvalue(self):
+ """Encode this bgp capability."""
+ raise NotImplementedError()
+
+ def encode(self):
+ """Encodes this bgp capability with header and body."""
+ body = self.packvalue()
+ return struct.pack('BB', self.__class__.CODE, len(body)) + body
+
+ def __repr__(self):
+ return '<%s>' % self.__class__.NAME
+
+
+class UnSupportedCap(Capability):
+ """Represents unknown capability.
+
+ According to RFC 5492 it is recommended to that we do not sent NOTIFICATION
+ message for "Unsupported Capability".
+ """
+ NAME = 'unsupported-cap'
+
+ def __init__(self, code, value):
+ self.CODE = code
+ self._value = value
+
+ def packvalue(self):
+ return self._value
+
+ def __repr__(self):
+ return '<UnSupportedCap(code=%s)>' % self.CODE
+
+
+@_register_bgp_capabilities
+class MultiprotocolExtentionCap(Capability):
+ """This class represents bgp multi-protocol extension capability.
+ """
+ CODE = 1
+ NAME = 'mbgp'
+
+ def __init__(self, route_family):
+ if not route_fmly.is_valid(route_family):
+ raise ValueError('Invalid argument %s' % route_family)
+
+ Capability.__init__(self)
+ self.route_family = route_family
+
+ def packvalue(self):
+ return struct.pack('!HH', self.route_family.afi,
+ self.route_family.safi)
+
+ @classmethod
+ def from_bytes(cls, value):
+ afi, _, safi = struct.unpack_from('!HBB', value)
+ return cls(get_rf(afi, safi))
+
+ def __repr__(self):
+ return ('<MultiprotocolExtenstionCap(af=%s, saf=%s)>' %
+ (self.route_family.afi, self.route_family.safi))
+
+ def __eq__(self, other):
+ if (other.__class__.CODE == self.__class__.CODE and
+ other.route_family.afi == self.route_family.afi and
+ other.route_family.safi == self.route_family.safi):
+ return True
+ return False
+
+
+class ZeroLengthCap(Capability):
+ """This is a super class represent all bgp capability with zero length."""
+ CODE = -1
+ NAME = 'zero-length'
+
+ def packvalue(self):
+ return ''
+
+ @classmethod
+ def from_bytes(cls, value):
+ if len(value) > 0:
+ LOG.error('Zero length capability has non-zero length value!')
+ raise MalformedOptionalParam()
+ return cls.get_singleton()
+
+ @staticmethod
+ def get_singleton():
+ raise NotImplementedError()
+
+
+@_register_bgp_capabilities
+class RouteRefreshCap(ZeroLengthCap):
+ CODE = 2
+ NAME = 'route-refresh'
+
+ def __str__(self):
+ return RouteRefreshCap.NAME
+
+ @staticmethod
+ def get_singleton():
+ return _ROUTE_REFRESH_CAP
+
+
+@_register_bgp_capabilities
+class OldRouteRefreshCap(ZeroLengthCap):
+ CODE = 128
+ NAME = 'old-route-refresh'
+
+ def __str__(self):
+ return OldRouteRefreshCap.NAME
+
+ @staticmethod
+ def get_singleton():
+ return _OLD_ROUTE_REFRESH_CAP
+
+
+# Since four byte as capability is not fully supported, we do not register it
+# as supported/recognized capability.
+@_register_bgp_capabilities
+class GracefulRestartCap(Capability):
+ CODE = 64
+ NAME = 'graceful-restart'
+
+ def __init__(self, value):
+ # TODO(PH): Provide implementation
+ Capability.__init__(self)
+ self.value = value
+
+ def packvalue(self):
+ # TODO(PH): Provide implementation
+ return self.value
+
+ @classmethod
+ def from_bytes(cls, value):
+ return cls(value)
+
+
+# Since four byte as capability is not fully supported, we do not register it
+# as supported/recognized capability.
+@_register_bgp_capabilities
+class FourByteAsCap(Capability):
+ CODE = 65
+ NAME = '4byteas'
+
+ def __init__(self, four_byte_as):
+ Capability.__init__(self)
+ self.four_byte_as = four_byte_as
+
+ def packvalue(self):
+ return struct.pack('!I', self.four_byte_as)
+
+ @classmethod
+ def from_bytes(cls, value):
+ value, = struct.unpack('!I', value)
+ return cls(value)
+
+ def __repr__(self):
+ return '<FourByteAsCap(%s)>' % self.four_byte_as
+
+ def __eq__(self, other):
+ if (other and other.four_byte_as == self.four_byte_as):
+ return True
+ return False
+
+
+@_register_bgp_capabilities
+class EnhancedRouteRefreshCap(ZeroLengthCap):
+ CODE = 70
+ NAME = 'enhanced-refresh'
+
+ @staticmethod
+ def get_singleton():
+ return _ENHANCED_ROUTE_REFRESH_CAP
+
+# Zero length capability singletons
+_ROUTE_REFRESH_CAP = RouteRefreshCap()
+_ENHANCED_ROUTE_REFRESH_CAP = EnhancedRouteRefreshCap()
+_OLD_ROUTE_REFRESH_CAP = OldRouteRefreshCap()
diff --git a/ryu/services/protocols/bgp/protocols/bgp/exceptions.py b/ryu/services/protocols/bgp/protocols/bgp/exceptions.py
new file mode 100644
index 00000000..dba5ed6e
--- /dev/null
+++ b/ryu/services/protocols/bgp/protocols/bgp/exceptions.py
@@ -0,0 +1,349 @@
+import struct
+
+
+class BgpExc(Exception):
+ """Base bgp exception."""
+
+ CODE = 0
+ """BGP error code."""
+
+ SUB_CODE = 0
+ """BGP error sub-code."""
+
+ SEND_ERROR = True
+ """Flag if set indicates Notification message should be sent to peer."""
+
+ def __init__(self, data=''):
+ self.data = data
+
+ def __str__(self):
+ return '<%s %r>' % (self.__class__.__name__, self.data)
+
+
+class BadNotification(BgpExc):
+ SEND_ERROR = False
+
+#=============================================================================
+# Message Header Errors
+#=============================================================================
+
+
+class NotSync(BgpExc):
+ CODE = 1
+ SUB_CODE = 1
+
+
+class BadLen(BgpExc):
+ CODE = 1
+ SUB_CODE = 2
+
+ def __init__(self, msg_type_code, message_length):
+ self.msg_type_code = msg_type_code
+ self.length = message_length
+ self.data = struct.pack('!H', self.length)
+
+ def __str__(self):
+ return '<BadLen %d msgtype=%d>' % (self.length, self.msg_type_code)
+
+
+class BadMsg(BgpExc):
+ """Error to indicate un-recognized message type.
+
+ RFC says: If the Type field of the message header is not recognized, then
+ the Error Subcode MUST be set to Bad Message Type. The Data field MUST
+ contain the erroneous Type field.
+ """
+ CODE = 1
+ SUB_CODE = 3
+
+ def __init__(self, msg_type):
+ self.msg_type = msg_type
+ self.data = struct.pack('B', msg_type)
+
+ def __str__(self):
+ return '<BadMsg %d>' % (self.msg,)
+
+#=============================================================================
+# OPEN Message Errors
+#=============================================================================
+
+
+class MalformedOptionalParam(BgpExc):
+ """If recognized optional parameters are malformed.
+
+ RFC says: If one of the Optional Parameters in the OPEN message is
+ recognized, but is malformed, then the Error Subcode MUST be set to 0
+ (Unspecific).
+ """
+ CODE = 2
+ SUB_CODE = 0
+
+
+class UnsupportedVersion(BgpExc):
+ """Error to indicate unsupport bgp version number.
+
+ RFC says: If the version number in the Version field of the received OPEN
+ message is not supported, then the Error Subcode MUST be set to Unsupported
+ Version Number. The Data field is a 2-octet unsigned integer, which
+ indicates the largest, locally-supported version number less than the
+ version the remote BGP peer bid (as indicated in the received OPEN
+ message), or if the smallest, locally-supported version number is greater
+ than the version the remote BGP peer bid, then the smallest, locally-
+ supported version number.
+ """
+ CODE = 2
+ SUB_CODE = 1
+
+ def __init__(self, locally_support_version):
+ self.data = struct.pack('H', locally_support_version)
+
+
+class BadPeerAs(BgpExc):
+ """Error to indicate open message has incorrect AS number.
+
+ RFC says: If the Autonomous System field of the OPEN message is
+ unacceptable, then the Error Subcode MUST be set to Bad Peer AS. The
+ determination of acceptable Autonomous System numbers is configure peer AS.
+ """
+ CODE = 2
+ SUB_CODE = 2
+
+
+class BadBgpId(BgpExc):
+ """Error to indicate incorrect BGP Identifier.
+
+ RFC says: If the BGP Identifier field of the OPEN message is syntactically
+ incorrect, then the Error Subcode MUST be set to Bad BGP Identifier.
+ Syntactic correctness means that the BGP Identifier field represents a
+ valid unicast IP host address.
+ """
+ CODE = 2
+ SUB_CODE = 3
+
+
+class UnsupportedOptParam(BgpExc):
+ """Error to indicate unsupported optional parameters.
+
+ RFC says: If one of the Optional Parameters in the OPEN message is not
+ recognized, then the Error Subcode MUST be set to Unsupported Optional
+ Parameters.
+ """
+ CODE = 2
+ SUB_CODE = 4
+
+
+class AuthFailure(BgpExc):
+ CODE = 2
+ SUB_CODE = 5
+
+
+class UnacceptableHoldTime(BgpExc):
+ """Error to indicate Unacceptable Hold Time in open message.
+
+ RFC says: If the Hold Time field of the OPEN message is unacceptable, then
+ the Error Subcode MUST be set to Unacceptable Hold Time.
+ """
+ CODE = 2
+ SUB_CODE = 6
+
+#=============================================================================
+# UPDATE message related errors
+#=============================================================================
+
+
+class MalformedAttrList(BgpExc):
+ """Error to indicate UPDATE message is malformed.
+
+ RFC says: Error checking of an UPDATE message begins by examining the path
+ attributes. If the Withdrawn Routes Length or Total Attribute Length is
+ too large (i.e., if Withdrawn Routes Length + Total Attribute Length + 23
+ exceeds the message Length), then the Error Subcode MUST be set to
+ Malformed Attribute List.
+ """
+ CODE = 3
+ SUB_CODE = 1
+
+
+class UnRegWellKnowAttr(BgpExc):
+ CODE = 3
+ SUB_CODE = 2
+
+
+class MissingWellKnown(BgpExc):
+ """Error to indicate missing well-known attribute.
+
+ RFC says: If any of the well-known mandatory attributes are not present,
+ then the Error Subcode MUST be set to Missing Well-known Attribute. The
+ Data field MUST contain the Attribute Type Code of the missing, well-known
+ attribute.
+ """
+ CODE = 3
+ SUB_CODE = 3
+
+ def __init__(self, pattr_type_code):
+ self.pattr_type_code = pattr_type_code
+ self.data = struct.pack('B', pattr_type_code)
+
+
+class AttrFlagError(BgpExc):
+ """Error to indicate recognized path attributes have incorrect flags.
+
+ RFC says: If any recognized attribute has Attribute Flags that conflict
+ with the Attribute Type Code, then the Error Subcode MUST be set to
+ Attribute Flags Error. The Data field MUST contain the erroneous attribute
+ (type, length, and value).
+ """
+ CODE = 3
+ SUB_CODE = 4
+
+
+class AttrLenError(BgpExc):
+ CODE = 3
+ SUB_CODE = 5
+
+
+class InvalidOriginError(BgpExc):
+ """Error indicates undefined Origin attribute value.
+
+ RFC says: If the ORIGIN attribute has an undefined value, then the Error
+ Sub- code MUST be set to Invalid Origin Attribute. The Data field MUST
+ contain the unrecognized attribute (type, length, and value).
+ """
+ CODE = 3
+ SUB_CODE = 6
+
+
+class RoutingLoop(BgpExc):
+ CODE = 3
+ SUB_CODE = 7
+
+
+class InvalidNextHop(BgpExc):
+ CODE = 3
+ SUB_CODE = 8
+
+
+class OptAttrError(BgpExc):
+ """Error indicates Optional Attribute is malformed.
+
+ RFC says: If an optional attribute is recognized, then the value of this
+ attribute MUST be checked. If an error is detected, the attribute MUST be
+ discarded, and the Error Subcode MUST be set to Optional Attribute Error.
+ The Data field MUST contain the attribute (type, length, and value).
+ """
+ CODE = 3
+ SUB_CODE = 9
+
+
+class InvalidNetworkField(BgpExc):
+ CODE = 3
+ SUB_CODE = 10
+
+
+class MalformedAsPath(BgpExc):
+ """Error to indicate if AP_PATH attribute is syntactically incorrect.
+
+ RFC says: The AS_PATH attribute is checked for syntactic correctness. If
+ the path is syntactically incorrect, then the Error Subcode MUST be set to
+ Malformed AS_PATH.
+ """
+ CODE = 3
+ SUB_CODE = 11
+
+
+#=============================================================================
+# Hold Timer Expired
+#=============================================================================
+
+
+class HoldTimerExpired(BgpExc):
+ """Error to indicate Hold Timer expired.
+
+ RFC says: If a system does not receive successive KEEPALIVE, UPDATE, and/or
+ NOTIFICATION messages within the period specified in the Hold Time field of
+ the OPEN message, then the NOTIFICATION message with the Hold Timer Expired
+ Error Code is sent and the BGP connection is closed.
+ """
+ CODE = 4
+ SUB_CODE = 1
+
+#=============================================================================
+# Finite State Machine Error
+#=============================================================================
+
+
+class FiniteStateMachineError(BgpExc):
+ """Error to indicate any Finite State Machine Error.
+
+ RFC says: Any error detected by the BGP Finite State Machine (e.g., receipt
+ of an unexpected event) is indicated by sending the NOTIFICATION message
+ with the Error Code Finite State Machine Error.
+ """
+ CODE = 5
+ SUB_CODE = 1
+
+
+#=============================================================================
+# Cease Errors
+#=============================================================================
+
+class MaxPrefixReached(BgpExc):
+ CODE = 6
+ SUB_CODE = 1
+
+
+class AdminShutdown(BgpExc):
+ """Error to indicate Administrative shutdown.
+
+ RFC says: If a BGP speaker decides to administratively shut down its
+ peering with a neighbor, then the speaker SHOULD send a NOTIFICATION
+ message with the Error Code Cease and the Error Subcode 'Administrative
+ Shutdown'.
+ """
+ CODE = 6
+ SUB_CODE = 2
+
+
+class PeerDeConfig(BgpExc):
+ CODE = 6
+ SUB_CODE = 3
+
+
+class AdminReset(BgpExc):
+ CODE = 6
+ SUB_CODE = 4
+
+
+class ConnRejected(BgpExc):
+ """Error to indicate Connection Rejected.
+
+ RFC says: If a BGP speaker decides to disallow a BGP connection (e.g., the
+ peer is not configured locally) after the speaker accepts a transport
+ protocol connection, then the BGP speaker SHOULD send a NOTIFICATION
+ message with the Error Code Cease and the Error Subcode "Connection
+ Rejected".
+ """
+ CODE = 6
+ SUB_CODE = 5
+
+
+class OtherConfChange(BgpExc):
+ CODE = 6
+ SUB_CODE = 6
+
+
+class CollisionResolution(BgpExc):
+ """Error to indicate Connection Collision Resolution.
+
+ RFC says: If a BGP speaker decides to send a NOTIFICATION message with the
+ Error Code Cease as a result of the collision resolution procedure (as
+ described in [BGP-4]), then the subcode SHOULD be set to "Connection
+ Collision Resolution".
+ """
+ CODE = 6
+ SUB_CODE = 7
+
+
+class OutOfResource(BgpExc):
+ CODE = 6
+ SUB_CODE = 8
diff --git a/ryu/services/protocols/bgp/protocols/bgp/messages.py b/ryu/services/protocols/bgp/protocols/bgp/messages.py
new file mode 100644
index 00000000..db4f8403
--- /dev/null
+++ b/ryu/services/protocols/bgp/protocols/bgp/messages.py
@@ -0,0 +1,540 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This module provides BGP protocol message classes and utility methods to encode
+and decode them.
+
+This file is adapted from pybgp open source project.
+"""
+from abc import ABCMeta
+from abc import abstractmethod
+from copy import copy
+import cStringIO
+import logging
+import socket
+import struct
+
+from ryu.services.protocols.bgp.protocols.bgp import capabilities
+from ryu.services.protocols.bgp.protocols.bgp.exceptions import BadBgpId
+from ryu.services.protocols.bgp.protocols.bgp.exceptions import BadLen
+from ryu.services.protocols.bgp.protocols.bgp.exceptions import BadMsg
+from ryu.services.protocols.bgp.protocols.bgp.exceptions import BadNotification
+from ryu.services.protocols.bgp.protocols.bgp.exceptions import \
+ MalformedAttrList
+from ryu.services.protocols.bgp.protocols.bgp.exceptions import \
+ UnacceptableHoldTime
+from ryu.services.protocols.bgp.protocols.bgp import nlri
+from ryu.services.protocols.bgp.protocols.bgp.nlri import get_rf
+from ryu.services.protocols.bgp.protocols.bgp import OrderedDict
+from ryu.services.protocols.bgp.protocols.bgp import pathattr
+from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4
+from ryu.services.protocols.bgp.utils.validation import is_valid_old_asn
+
+
+LOG = logging.getLogger('bgpspeaker.bgp.proto.messages')
+
+# BGP capability optional parameter type
+CAP_OPT_PARA_TYPE = 2
+
+# Registry for bgp message class by their type code.
+# <key>: <value> - <type-code>: <msg class>
+_BGP_MESSAGE_REGISTRY = {}
+
+
+def _register_bgp_message(cls):
+ """Used as class decorator for registering bgp message class by their
+ type-code.
+ """
+ assert _BGP_MESSAGE_REGISTRY.get(cls.TYPE_CODE) is None
+ assert hasattr(cls, 'from_bytes')
+ _BGP_MESSAGE_REGISTRY[cls.TYPE_CODE] = cls
+ return cls
+
+
+class BgpMessage(object):
+ """Super class of all bgp messages.
+ """
+ __metaclass__ = ABCMeta
+ TYPE_CODE = 0
+ MSG_NAME = 'abstract-msg'
+ HEADER_SIZE = 19
+
+ @abstractmethod
+ def packvalue(self):
+ """Encodes the body of this bgp message."""
+ raise NotImplementedError()
+
+ def encode(self):
+ """Encodes this bgp message with header and body."""
+ body = self.packvalue()
+ return struct.pack('!16sHB', '\xff' * 16, 19 + len(body),
+ self.__class__.TYPE_CODE) + body
+
+
+class RecognizedBgpMessage(BgpMessage):
+ """Represents recognized/supported bgp message.
+
+ Declares a factory method to create an instance from bytes.
+ """
+ @classmethod
+ def from_bytes(cls, recv_bytes, total_msg_length):
+ raise NotImplementedError()
+
+
+@_register_bgp_message
+class Open(RecognizedBgpMessage):
+ """Represents bgp OPEN message.
+
+ This is the first message sent by each peer after TCP connection is
+ established.
+ """
+ MSG_NAME = 'open'
+ TYPE_CODE = 1
+ MIN_LENGTH = 29
+
+ def __init__(self, version, asnum, holdtime, bgpid, caps,
+ unrec_params=None):
+ # Validate arguments.
+ if version < 1:
+ raise ValueError('Invalid version number %s' % version)
+ if not is_valid_old_asn(asnum):
+ raise ValueError('Invalid AS number %s' % asnum)
+ if holdtime <= 2:
+ raise ValueError('Holdtime has to be greater than 2 sec.')
+ if not caps:
+ raise ValueError('Invalid capabilities.')
+ if not is_valid_ipv4(bgpid):
+ raise ValueError('Invalid bgp ID, should be valid IPv4, '
+ 'but given %s' % bgpid)
+
+ BgpMessage.__init__(self)
+ self._version = version
+ self._holdtime = holdtime
+ self._asnum = asnum
+ self._bgpid = bgpid
+ self._caps = caps
+ self._unrec_params = unrec_params
+ if not unrec_params:
+ self._unrec_params = OrderedDict()
+
+ @property
+ def version(self):
+ return self._version
+
+ @property
+ def holdtime(self):
+ return self._holdtime
+
+ @property
+ def asnum(self):
+ return self._asnum
+
+ @property
+ def bgpid(self):
+ return self._bgpid
+
+ @property
+ def caps(self):
+ return copy(self._caps)
+
+ @property
+ def unrec_params(self):
+ return copy(self._unrec_params)
+
+ @classmethod
+ def from_bytes(cls, recv_bytes, total_msg_len):
+ # Validate OPEN message length.
+ if len(recv_bytes) < 10:
+ raise BadLen(Open.TYPE_CODE, len(recv_bytes) + cls.HEADER_SIZE)
+
+ version, asnum, holdtime, bgpid, paramlen = \
+ struct.unpack_from('!BHH4sB', recv_bytes)
+
+ if len(recv_bytes) != (10 + paramlen):
+ # TODO(PH): Check what RFC says to do here.
+ LOG.debug('Open message: too short.')
+
+ offset = 10
+
+ # BGP implementation MUST reject Hold Time values of one or two
+ # seconds.
+ if holdtime <= 2:
+ raise UnacceptableHoldTime()
+
+ # BGP Identifier field MUST represents a valid unicast IP host address.
+ bgpid = socket.inet_ntoa(bgpid)
+ if not is_valid_ipv4(bgpid):
+ raise BadBgpId()
+
+ # Parse optional parameters.
+ caps = OrderedDict()
+ unrec_params = OrderedDict()
+ while offset < len(recv_bytes):
+ ptype, plen = struct.unpack_from('BB', recv_bytes, offset)
+ offset += 2
+ value = recv_bytes[offset:offset + plen]
+ offset += plen
+
+ # Parse capabilities optional parameter.
+ if ptype == CAP_OPT_PARA_TYPE:
+ bgp_caps = capabilities.decode(value)
+ # store decoded bgp capabilities by their capability-code
+ for cap in bgp_caps:
+ cap_code = cap.CODE
+ if cap_code in caps:
+ caps[cap_code].append(cap)
+ else:
+ caps[cap_code] = [cap]
+ else:
+ # Other unrecognized optional parameters.
+ unrec_params[ptype] = value
+
+ # Un-recognized capabilities are passed on, its up to application to
+ # check if unrec-optional-paramters are a problem and send NOTIFICATION
+ return cls(version, asnum, holdtime, bgpid, caps, unrec_params)
+
+ def packvalue(self):
+ params = cStringIO.StringIO()
+ # Capabilities optional parameters.
+ for capability in self.caps.itervalues():
+ for cap in capability:
+ encoded_cap = cStringIO.StringIO()
+ encoded_cap.write(cap.encode())
+ encoded_cap_value = encoded_cap.getvalue()
+ encoded_cap.close()
+ params.write(struct.pack('BB',
+ CAP_OPT_PARA_TYPE,
+ len(encoded_cap_value)))
+ params.write(encoded_cap_value)
+
+ # Other optional parameters.
+ for ptype, pvalue in self.unrec_params.items():
+ params.write(struct.pack('BB', ptype, len(pvalue)))
+ params.write(pvalue)
+
+ bgpid = socket.inet_aton(self.bgpid)
+ params_value = params.getvalue()
+ params.close()
+ return struct.pack('!BHH4sB', self.version, self.asnum, self.holdtime,
+ bgpid, len(params_value)) + params_value
+
+ def __str__(self):
+ str_rep = cStringIO.StringIO()
+ str_rep.write('Open message Ver=%s As#=%s Hold Time=%s Bgp Id=%s' %
+ (self.version, self.asnum, self.holdtime, self.bgpid))
+ for param, value in self.unrec_params.items():
+ str_rep.write(' unrec_param %s=%r' % (param, value))
+ for cap, value in self.caps.items():
+ str_rep.write(' cap %s=%r' % (cap, value))
+ return str_rep.getvalue()
+
+
+@_register_bgp_message
+class Keepalive(BgpMessage):
+ MSG_NAME = 'keepalive'
+ TYPE_CODE = 4
+
+ @classmethod
+ def from_bytes(cls, recv_bytes, total_msg_len):
+ # Validate KeepAlive msg. length
+ if len(recv_bytes):
+ LOG.info("Received keepalive msg. with data! %r" % (recv_bytes,))
+ raise BadLen(
+ Keepalive.TYPE_CODE,
+ len(recv_bytes) + cls.HEADER_SIZE
+ )
+
+ self = cls()
+ return self
+
+ def packvalue(self):
+ return ''
+
+ def __str__(self):
+ return 'Keepalive message'
+
+
+@_register_bgp_message
+class RouteRefresh(BgpMessage):
+ MSG_NAME = 'route-refresh'
+ TYPE_CODE = 5
+
+ def __init__(self, route_family, demarcation=0):
+ BgpMessage.__init__(self)
+ self._route_family = route_family
+ self._demarcation = demarcation
+ self.eor_sent = False
+
+ @property
+ def route_family(self):
+ return self._route_family
+
+ @property
+ def demarcation(self):
+ return self._demarcation
+
+ @classmethod
+ def from_bytes(cls, recv_bytes, total_msg_len):
+ # Validate length of RouteRefresh message.
+ if len(recv_bytes) != 4:
+ raise BadLen(
+ RouteRefresh.TYPE_CODE,
+ len(recv_bytes) + cls.HEADER_SIZE
+ )
+
+ afi, reserved, safi = struct.unpack_from('!HBB', recv_bytes)
+ route_family = get_rf(afi, safi)
+ return cls(route_family, reserved)
+
+ def packvalue(self):
+ return struct.pack('!HBB', self.route_family.afi, self.demarcation,
+ self._route_family.safi)
+
+ def __str__(self):
+ return 'Route-refresh message (%s, %s)' % \
+ (self.route_family, self.demarcation)
+
+
+@_register_bgp_message
+class Notification(BgpMessage):
+ MSG_NAME = 'notification'
+ TYPE_CODE = 3
+ REASONS = {
+ (1, 1): 'Message Header Error: not synchronised',
+ (1, 2): 'Message Header Error: bad message len',
+ (1, 3): 'Message Header Error: bad message type',
+ (2, 1): 'Open Message Error: unsupported version',
+ (2, 2): 'Open Message Error: bad peer AS',
+ (2, 3): 'Open Message Error: bad BGP identifier',
+ (2, 4): 'Open Message Error: unsupported optional param',
+ (2, 5): 'Open Message Error: authentication failure',
+ (2, 6): 'Open Message Error: unacceptable hold time',
+ (2, 7): 'Open Message Error: Unsupported Capability',
+ (2, 8): 'Open Message Error: Unassigned',
+ (3, 1): 'Update Message Error: malformed attribute list',
+ (3, 2): 'Update Message Error: unrecognized well-known attr',
+ (3, 3): 'Update Message Error: missing well-known attr',
+ (3, 4): 'Update Message Error: attribute flags error',
+ (3, 5): 'Update Message Error: attribute length error',
+ (3, 6): 'Update Message Error: invalid origin attr',
+ (3, 7): 'Update Message Error: as routing loop',
+ (3, 8): 'Update Message Error: invalid next hop attr',
+ (3, 9): 'Update Message Error: optional attribute error',
+ (3, 10): 'Update Message Error: invalid network field',
+ (3, 11): 'Update Message Error: malformed AS_PATH',
+ (4, 1): 'Hold Timer Expired',
+ (5, 1): 'Finite State Machine Error',
+ (6, 1): 'Cease: Maximum Number of Prefixes Reached',
+ (6, 2): 'Cease: Administrative Shutdown',
+ (6, 3): 'Cease: Peer De-configured',
+ (6, 4): 'Cease: Administrative Reset',
+ (6, 5): 'Cease: Connection Rejected',
+ (6, 6): 'Cease: Other Configuration Change',
+ (6, 7): 'Cease: Connection Collision Resolution',
+ (6, 8): 'Cease: Out of Resources',
+ }
+
+ def __init__(self, code, subcode, data=''):
+ BgpMessage.__init__(self)
+ self._code = code
+ self._subcode = subcode
+ self._data = data
+
+ @property
+ def code(self):
+ return self._code
+
+ @property
+ def subcode(self):
+ return self._subcode
+
+ @property
+ def data(self):
+ return self._data
+
+ @classmethod
+ def from_bytes(cls, recv_bytes, total_msg_len):
+ # Validate NOTIFICATION msg. length.
+ if len(recv_bytes) < 2:
+ LOG.error('Received NOTIFICATION msg. with bad length %s' %
+ (len(recv_bytes) + cls.HEADER_SIZE))
+ raise BadNotification()
+
+ code, subcode = struct.unpack_from('BB', recv_bytes)
+ data = recv_bytes[2:]
+
+ # Check code or sub-code are recognized.
+ if not Notification.REASONS.get((code, subcode)):
+ LOG.error('Received notification msg. with unrecognized Error '
+ 'code or Sub-code (%s, %s)' % (code, subcode))
+ raise BadNotification()
+
+ return cls(code, subcode, data)
+
+ def __str__(self):
+ c, s = self.code, self.subcode
+ if (c, s) in self.REASONS:
+ return ('Notification "%s" params %r' %
+ (self.REASONS[c, s], self.data))
+ return ('Notification message code=%d subcode=%d params=%r' %
+ (self.code, self.subcode, self.data))
+
+ def packvalue(self):
+ v = struct.pack('BB', self.code, self.subcode)
+ if self.data:
+ v += self.data
+ return v
+
+
+@_register_bgp_message
+class Update(BgpMessage):
+ MSG_NAME = 'update'
+ TYPE_CODE = 2
+ WITHDRAW_NLRI = 'withdraw_nlri'
+ PATH_ATTR_AND_NLRI = 'path_attr_and_nlri'
+ MIN_LENGTH = 23
+
+ def __init__(self, pathattr_map=None, nlri_list=None, withdraw_list=None):
+ """Initailizes a new `Update` instance.
+
+ Parameter:
+ - `pathattr_map`: (OrderedDict) key -> attribute name,
+ value -> attribute.
+ - `nlri_list`: (list/iterable) NLRIs.
+ - `withdraw_list`: (list/iterable) Withdraw routes.
+ """
+ if nlri_list is None:
+ nlri_list = []
+ if withdraw_list is None:
+ withdraw_list = []
+ if not pathattr_map:
+ pathattr_map = OrderedDict()
+
+ self._nlri_list = list(nlri_list)
+ self._withdraw_list = list(withdraw_list)
+ self._pathattr_map = copy(pathattr_map)
+
+ @property
+ def nlri_list(self):
+ return self._nlri_list[:]
+
+ @property
+ def withdraw_list(self):
+ return self._withdraw_list[:]
+
+ @property
+ def pathattr_map(self):
+ return copy(self._pathattr_map)
+
+ def get_path_attr(self, attr_name):
+ return self._pathattr_map.get(attr_name)
+
+ @classmethod
+ def from_bytes(cls, recv_bytes, total_msg_len):
+ # Validate UPDATE message length
+ if len(recv_bytes) < 4:
+ raise BadLen(Update.TYPE_CODE, len(recv_bytes) + cls.HEADER_SIZE)
+ withdraw_list = None
+ nlri_list = None
+ pathattr_map = OrderedDict()
+
+ d = {}
+ idx = 0
+ # Compute withdraw route length + total attribute length.
+ recv_len = 0
+ for kind in (Update.WITHDRAW_NLRI, Update.PATH_ATTR_AND_NLRI):
+ plen, = struct.unpack_from('!H', recv_bytes, idx)
+ idx += 2
+ d[kind] = recv_bytes[idx: (idx + plen)]
+ idx += plen
+ recv_len += plen
+
+ # Check if length of received bytes is valid.
+ if (recv_len + Update.MIN_LENGTH) < total_msg_len:
+ raise MalformedAttrList()
+
+ if d[Update.WITHDRAW_NLRI]:
+ withdraw_list = nlri.parse(d[Update.WITHDRAW_NLRI])
+ # TODO(PH): We have to test how ipv4 nlri packed after path-attr are
+ # getting parsed.
+ nlri_list = nlri.parse(recv_bytes[idx:])
+
+ idx = 0
+ recv_bytes = d[Update.PATH_ATTR_AND_NLRI]
+ while idx < len(recv_bytes):
+ used, pattr = pathattr.decode(recv_bytes, idx)
+ # TODO(PH) Can optimize here by checking if path attribute is
+ # MpReachNlri and stop parsing if RT are not interesting.
+ idx += used
+ pathattr_map[pattr.ATTR_NAME] = pattr
+
+ return cls(pathattr_map=pathattr_map,
+ nlri_list=nlri_list, withdraw_list=withdraw_list)
+
+ def __repr__(self):
+ str_rep = cStringIO.StringIO()
+ str_rep.write('<Update message withdraw=%r' % (self._withdraw_list,))
+ for ptype, pattr in self._pathattr_map.items():
+ str_rep.write('\n path attr %s, %s' % (ptype, pattr,))
+# if ptype in (MpReachNlri.ATTR_NAME, MpUnreachNlri):
+# for nnlri in pattr.nlri_list:
+# str_rep.write('\n nlri=%s' % (nnlri,))
+ for nnlri in self._nlri_list:
+ str_rep.write('\nmp nlri %s' % (nnlri,))
+
+ str_rep.write('>')
+
+ return str_rep.getvalue()
+
+ def __cmp__(self, other):
+ if isinstance(other, Update):
+ return cmp(
+ (self._pathattr_map, self._withdraw_list, self._nlri_list),
+ (other.pathattr_map, other.withdraw_list, other.nlri_list),
+ )
+ return -1
+
+ def packvalue(self):
+ bvalue = ''
+
+ bwithdraw = ''
+ for awithdraw in self._withdraw_list:
+ bwithdraw += awithdraw.encode()
+
+ bvalue += struct.pack('!H', len(bwithdraw))
+ bvalue += bwithdraw
+
+ pattr = ''
+ for _, attr in self._pathattr_map.items():
+ if attr is not None:
+ pattr += attr.encode()
+ bvalue += struct.pack('!H', len(pattr))
+ bvalue += pattr
+
+ for anlri in self._nlri_list:
+ bvalue += anlri.encode()
+
+ return bvalue
+
+
+def decode(ptype, payload, msg_len):
+ """Decodes given payload into bgp message instance of given type.
+ """
+ bgp_msg_class = _BGP_MESSAGE_REGISTRY.get(ptype)
+ if not bgp_msg_class:
+ raise BadMsg(ptype)
+
+ return bgp_msg_class.from_bytes(payload, msg_len)
diff --git a/ryu/services/protocols/bgp/protocols/bgp/nlri.py b/ryu/services/protocols/bgp/protocols/bgp/nlri.py
new file mode 100644
index 00000000..bed279d3
--- /dev/null
+++ b/ryu/services/protocols/bgp/protocols/bgp/nlri.py
@@ -0,0 +1,841 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ Module related to BGP Network layer reachability information (NLRI).
+"""
+
+from abc import ABCMeta
+import logging
+import socket
+import struct
+from types import IntType
+
+from ryu.services.protocols.bgp.protocols.bgp.exceptions import OptAttrError
+from ryu.services.protocols.bgp.utils.other import bytes2hex
+from ryu.services.protocols.bgp.utils.other import hex2byte
+from ryu.services.protocols.bgp.utils.validation import is_valid_ext_comm_attr
+from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4
+from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4_prefix
+from ryu.services.protocols.bgp.utils.validation import is_valid_ipv6_prefix
+from ryu.services.protocols.bgp.utils.validation import is_valid_mpls_label
+from ryu.services.protocols.bgp.utils.validation import is_valid_old_asn
+from ryu.services.protocols.bgp.utils.validation import is_valid_route_disc
+
+
+LOG = logging.getLogger('protocols.bgp.nlri')
+
+# Registry for bgp message class by their type code.
+# <key>: <value> - <afi, safi>: <nlri class>
+_NLRI_REGISTRY = {}
+
+
+def _register_nlri(cls):
+ """Used as class decorator for registering NLRI classes by their afi/safi.
+ """
+ assert _NLRI_REGISTRY.get((cls.AFI, cls.SAFI)) is None
+ _NLRI_REGISTRY[(cls.AFI, cls.SAFI)] = cls
+ return cls
+
+
+#
+# AddressFamily
+#
+class AddressFamily(object):
+ """Subclasses of this class hold methods for a specific AF and
+ help the calling code to stay AF-independent.
+
+ Each subclass need have just a singleton instance (see below).
+ """
+
+ def __init__(self, afi):
+ self.afi = afi
+
+ def __hash__(self):
+ return hash(self.afi)
+
+ def __cmp__(self, other):
+ afi1 = None
+ afi2 = None
+ if isinstance(other, IntType):
+ afi2 = other
+ else:
+ afi2 = other.afi
+ if isinstance(self, IntType):
+ afi1 = self
+ else:
+ afi1 = self.afi
+ return cmp(afi1, afi2)
+
+
+class AfiIpv4(AddressFamily):
+ def __init__(self):
+ super(AfiIpv4, self).__init__(1)
+
+ def __repr__(self):
+ return "IPv4"
+
+
+class AfiIpv6(AddressFamily):
+ def __init__(self):
+ super(AfiIpv6, self).__init__(2)
+
+ def __repr__(self):
+ return "IPv6"
+
+
+#
+# SubAddressFamily
+#
+# An sub-address family as defined by BGP.
+#
+class SubAddressFamily(object):
+
+ def __init__(self, safi):
+ self.safi = safi
+
+ def __hash__(self):
+ return hash(self.safi)
+
+ def __cmp__(self, other):
+ safi1 = None
+ safi2 = None
+ if isinstance(self, IntType):
+ safi1 = self
+ else:
+ safi1 = self.safi
+ if isinstance(other, IntType):
+ safi2 = other
+ else:
+ safi2 = other.safi
+ return cmp(safi1, safi2)
+
+
+class SafiNlriUnicast(SubAddressFamily):
+ def __init__(self):
+ super(SafiNlriUnicast, self).__init__(1)
+
+ def __repr__(self):
+ return "SafiNlriUnicast"
+
+
+class SafiVpn(SubAddressFamily):
+ def __init__(self):
+ super(SafiVpn, self).__init__(128)
+
+ def __repr__(self):
+ return "SafiVpn"
+
+
+class SafiRtc(SubAddressFamily):
+ def __init__(self):
+ super(SafiRtc, self).__init__(132)
+
+ def __repr__(self):
+ return "SafiRtc"
+
+NLRI_UC = SafiNlriUnicast()
+SAF_VPN = SafiVpn()
+SAF_RTC = SafiRtc()
+
+# Singleton objects for each AF.
+AF_IPv4 = AfiIpv4()
+AF_IPv6 = AfiIpv6()
+
+# Constants to represent address family and sub-address family.
+ADD_FMLY = 'afi'
+SUB_ADD_FMLY = 'safi'
+
+
+#
+# RouteFamily
+#
+class RouteFamily(object):
+ """The family that a given route (or Network Layer Reachability
+ Information) belongs to.
+
+ Basically represents a combination of AFI/SAFI.
+ """
+ __slots__ = ('_add_fmly', '_sub_add_fmly')
+
+ def __init__(self, add_fmly, sub_add_fmly):
+ # Validate i/p.
+ if not add_fmly or not sub_add_fmly:
+ raise ValueError('Invalid arguments.')
+
+ self._add_fmly = add_fmly
+ self._sub_add_fmly = sub_add_fmly
+
+ @property
+ def afi(self):
+ return self._add_fmly.afi
+
+ @property
+ def safi(self):
+ return self._sub_add_fmly.safi
+
+ def __repr__(self):
+ return ('RouteFamily(afi=%s, safi=%s)' % (self.afi, self.safi))
+
+ def __cmp__(self, other):
+ other_rf = (other.afi, other.safi)
+ self_rf = (self.afi, self.safi)
+ return cmp(self_rf, other_rf)
+
+ @staticmethod
+ def is_valid(other):
+ if other and (hasattr(other, 'afi') and hasattr(other, 'safi')):
+ return True
+ return False
+
+# Various route family singletons.
+RF_IPv4_UC = RouteFamily(AF_IPv4, NLRI_UC)
+RF_IPv6_UC = RouteFamily(AF_IPv6, NLRI_UC)
+RF_IPv4_VPN = RouteFamily(AF_IPv4, SAF_VPN)
+RF_IPv6_VPN = RouteFamily(AF_IPv6, SAF_VPN)
+RF_RTC_UC = RouteFamily(AF_IPv4, SAF_RTC)
+
+_rf_by_afi_safi = {
+ (1, 1): RF_IPv4_UC,
+ (2, 1): RF_IPv6_UC,
+ (1, 128): RF_IPv4_VPN,
+ (2, 128): RF_IPv6_VPN,
+ (1, 132): RF_RTC_UC
+}
+
+
+def get_rf(afi, safi):
+ """Returns *RouteFamily* singleton instance for given *afi* and *safi*."""
+ if not isinstance(afi, IntType):
+ afi = int(afi)
+ if not isinstance(safi, IntType):
+ safi = int(safi)
+ return _rf_by_afi_safi.get((afi, safi))
+
+
+# TODO(PH): Consider trade-offs of making this extend Internable.
+class Nlri(object):
+ """Represents super class of all Network Layer Reachability Information.
+ """
+ __meta__ = ABCMeta
+ __slots__ = ()
+
+ # Sub-classes should set afi/safi constants appropriately.
+ AFI = 0
+ SAFI = 0
+
+ @classmethod
+ def encode(self):
+ raise NotImplementedError()
+
+ @property
+ def route_family(self):
+ return get_rf(self.__class__.AFI, self.__class__.SAFI)
+
+
+@_register_nlri
+class Vpnv4(Nlri):
+ """Vpnv4 NLRI.
+ """
+ __slots__ = ('_labels', '_route_disc', '_prefix')
+
+ AFI = 1
+ SAFI = 128
+
+ def __init__(self, labels, route_disc, prefix):
+ Nlri.__init__(self)
+ if not labels:
+ labels = []
+
+ # Validate given params
+ for label in labels:
+ if not is_valid_mpls_label(label):
+ raise ValueError('Invalid label %s' % label)
+ if (not is_valid_ipv4_prefix(prefix) or
+ not is_valid_route_disc(route_disc)):
+ raise ValueError('Invalid parameter value(s).')
+
+ self._labels = labels
+ self._route_disc = route_disc
+ self._prefix = prefix
+
+ @property
+ def label_list(self):
+ return self._labels[:]
+
+ @property
+ def route_disc(self):
+ return self._route_disc
+
+ @property
+ def prefix(self):
+ return self._prefix
+
+ @property
+ def formatted_nlri_str(self):
+ return "%s:%s" % (self._route_disc, self.prefix)
+
+ def __repr__(self):
+ if self._labels:
+ l = ','.join([str(l) for l in self._labels])
+ else:
+ l = 'none'
+
+ return ('Vpnv4(label=%s, route_disc=%s, prefix=%s)' %
+ (l, self.route_disc, self.prefix))
+
+ def __str__(self):
+ return 'Vpnv4 %s:%s, %s' % (self.route_disc, self.prefix, self._labels)
+
+ def __cmp__(self, other):
+ return cmp(
+ (self._labels, self.route_disc, self.prefix),
+ (other.label_list, other.route_disc, other.prefix),
+ )
+
+ def encode(self):
+ plen = 0
+ v = ''
+ labels = self._labels[:]
+
+ if not labels:
+ return '\0'
+
+ labels = [l << 4 for l in labels]
+ labels[-1] |= 1
+
+ for l in labels:
+ lo = l & 0xff
+ hi = (l & 0xffff00) >> 8
+ v += struct.pack('>HB', hi, lo)
+ plen += 24
+
+ l, r = self.route_disc.split(':')
+ if '.' in l:
+ ip = socket.inet_aton(l)
+ route_disc = struct.pack('!H4sH', 1, ip, int(r))
+ else:
+ route_disc = struct.pack('!HHI', 0, int(l), int(r))
+
+ v += route_disc
+ plen += 64
+
+ ip, masklen = self.prefix.split('/')
+ ip = socket.inet_aton(ip)
+ masklen = int(masklen)
+
+ plen += masklen
+ if masklen > 24:
+ v += ip
+ elif masklen > 16:
+ v += ip[:3]
+ elif masklen > 8:
+ v += ip[:2]
+ elif masklen > 0:
+ v += ip[:1]
+ else:
+ pass
+
+ return struct.pack('B', plen) + v
+
+ @classmethod
+ def from_bytes(cls, plen, val):
+
+ if plen == 0:
+ # TODO(PH): Confirm this is valid case and implementation.
+ return cls([], '0:0', '0.0.0.0/0')
+
+ idx = 0
+
+ # plen is the length, in bits, of all the MPLS labels,
+ # plus the 8-byte RD, plus the IP prefix
+ labels = []
+ while True:
+ ls, = struct.unpack_from('3s', val, idx)
+ idx += 3
+ plen -= 24
+
+ if ls == '\x80\x00\x00':
+ # special null label for vpnv4 withdraws
+ labels = None
+ break
+
+ label, = struct.unpack_from('!I', '\x00' + ls)
+ bottom = label & 1
+
+ labels.append(label >> 4)
+ if bottom:
+ break
+ # TODO(PH): We are breaking after first label as we support only
+ # one label for now. Revisit if we need to support stack of labels.
+ break
+
+ rdtype, route_disc = struct.unpack_from('!H6s', val, idx)
+ if rdtype == 1:
+ rdip, num = struct.unpack('!4sH', route_disc)
+ rdip = socket.inet_ntoa(rdip)
+ route_disc = '%s:%s' % (rdip, num)
+ else:
+ num1, num2 = struct.unpack('!HI', route_disc)
+ route_disc = '%s:%s' % (num1, num2)
+
+ idx += 8
+ plen -= 64
+
+ ipl = pb(plen)
+ ip = val[idx:idx + ipl]
+ idx += ipl
+
+ prefix = unpack_ipv4(ip, plen)
+
+ return cls(labels, route_disc, prefix)
+
+
+@_register_nlri
+class Vpnv6(Nlri):
+ """Vpnv4 NLRI.
+ """
+ __slots__ = ('_labels', '_route_disc', '_prefix')
+
+ AFI = 2
+ SAFI = 128
+
+ def __init__(self, labels, route_disc, prefix):
+ Nlri.__init__(self)
+ if not labels:
+ labels = []
+
+ # Validate given params
+ for label in labels:
+ if not is_valid_mpls_label(label):
+ raise ValueError('Invalid label %s' % label)
+ if (not is_valid_route_disc(route_disc) or
+ not is_valid_ipv6_prefix(prefix)):
+ raise ValueError('Invalid parameter value(s).')
+
+ self._labels = labels
+ self._route_disc = route_disc
+ self._prefix = prefix
+
+ @property
+ def label_list(self):
+ return self._labels[:]
+
+ @property
+ def route_disc(self):
+ return self._route_disc
+
+ @property
+ def prefix(self):
+ return self._prefix
+
+ @property
+ def formatted_nlri_str(self):
+ return "%s:%s" % (self._route_disc, self.prefix)
+
+ def __repr__(self):
+ if self._labels:
+ l = ','.join([str(l) for l in self._labels])
+ else:
+ l = 'none'
+
+ return ('Vpnv6(label=%s, route_disc=%s, prefix=%s)' %
+ (l, self.route_disc, self.prefix))
+
+ def __str__(self):
+ return 'Vpnv6 %s:%s, %s' % (self.route_disc, self.prefix, self._labels)
+
+ def __cmp__(self, other):
+ return cmp(
+ (self._labels, self.route_disc, Ipv6(self.prefix).encode()),
+ (other.label_list, other.route_disc, Ipv6(other.prefix).encode()),
+ )
+
+ def encode(self):
+ plen = 0
+ v = ''
+ labels = self._labels[:]
+
+ if not labels:
+ return '\0'
+
+ labels = [l << 4 for l in labels]
+ labels[-1] |= 1
+
+ for l in labels:
+ lo = l & 0xff
+ hi = (l & 0xffff00) >> 8
+ v += struct.pack('>HB', hi, lo)
+ plen += 24
+
+ l, r = self.route_disc.split(':')
+ if '.' in l:
+ ip = socket.inet_aton(l)
+ route_disc = struct.pack('!H4sH', 1, ip, int(r))
+ else:
+ route_disc = struct.pack('!HHI', 0, int(l), int(r))
+ v += route_disc
+ plen += 64
+
+ ip, masklen = self.prefix.split('/')
+ ip = socket.inet_pton(socket.AF_INET6, ip)
+ masklen = int(masklen)
+
+ plen += masklen
+ v += ip[:pb6(masklen)]
+
+ return struct.pack('B', plen) + v
+
+ @classmethod
+ def from_bytes(cls, plen, val):
+ if plen == 0:
+ # TODO(PH): Confirm this is valid case and implementation.
+ return cls([], '0:0', '::/0')
+
+ idx = 0
+
+ # plen is the length, in bits, of all the MPLS labels,
+ # plus the 8-byte RD, plus the IP prefix
+ labels = []
+ while True:
+ ls, = struct.unpack_from('3s', val, idx)
+ idx += 3
+ plen -= 24
+
+ if ls == '\x80\x00\x00':
+ # special null label for vpnv4 withdraws
+ labels = None
+ break
+
+ label, = struct.unpack_from('!I', '\x00' + ls)
+ bottom = label & 1
+
+ labels.append(label >> 4)
+ if bottom:
+ break
+ # TODO(PH): We are breaking after first label as we support only
+ # one label for now. Revisit if we need to support stack of labels.
+ break
+
+ rdtype, route_disc = struct.unpack_from('!H6s', val, idx)
+ if rdtype == 1:
+ rdip, num = struct.unpack('!4sH', route_disc)
+ rdip = socket.inet_ntoa(rdip)
+ route_disc = '%s:%s' % (rdip, num)
+ else:
+ num1, num2 = struct.unpack('!HI', route_disc)
+ route_disc = '%s:%s' % (num1, num2)
+
+ idx += 8
+ plen -= 64
+
+ ipl = pb6(plen)
+ ip = val[idx:idx + ipl]
+ idx += ipl
+
+ prefix = unpack_ipv6(ip, plen)
+
+ return cls(labels, route_disc, prefix)
+
+
+@_register_nlri
+class Ipv4(Nlri):
+ __slots__ = ('_prefix')
+
+ AFI = 1
+ SAFI = 1
+
+ def __init__(self, prefix):
+ if not is_valid_ipv4_prefix(prefix):
+ raise ValueError('Invalid prefix %s.' % prefix)
+ Nlri.__init__(self)
+ self._prefix = prefix
+
+ @property
+ def prefix(self):
+ return self._prefix
+
+ @property
+ def formatted_nlri_str(self):
+ return self._prefix
+
+ def __cmp__(self, other):
+ aip, alen = self.prefix.split('/')
+ alen = int(alen)
+ aip = socket.inet_aton(aip)
+
+ bip, blen = other.prefix.split('/')
+ blen = int(blen)
+ bip = socket.inet_aton(bip)
+
+ return cmp((aip, alen), (bip, blen))
+
+ def encode(self):
+ plen = 0
+ v = ''
+
+ ip, masklen = self.prefix.split('/')
+ ip = socket.inet_aton(ip)
+ masklen = int(masklen)
+
+ plen += masklen
+ if masklen > 24:
+ v += ip
+ elif masklen > 16:
+ v += ip[:3]
+ elif masklen > 8:
+ v += ip[:2]
+ elif masklen > 0:
+ v += ip[:1]
+ else:
+ pass
+
+ return struct.pack('B', plen) + v
+
+ def __repr__(self):
+ return 'Ipv4(%s)' % (self.prefix)
+
+ def __str__(self):
+ return 'Ipv4 ' + self.prefix
+
+ @classmethod
+ def from_bytes(cls, plen, val):
+ return cls(unpack_ipv4(val, plen))
+
+
+@_register_nlri
+class Ipv6(Nlri):
+ __slots__ = ('_prefix')
+
+ AFI = 2
+ SAFI = 1
+
+ def __init__(self, prefix):
+ if not is_valid_ipv6_prefix(prefix):
+ raise ValueError('Invalid prefix %s.' % prefix)
+ Nlri.__init__(self)
+ self._prefix = prefix
+
+ @property
+ def prefix(self):
+ return self._prefix
+
+ @property
+ def formatted_nlri_str(self):
+ return self._prefix
+
+ def __cmp__(self, other):
+ abin = self.encode()
+ bbin = other.encode()
+ return cmp(abin, bbin)
+
+ def encode(self):
+ plen = 0
+ v = ''
+
+ ip, masklen = self.prefix.split('/')
+ ip = socket.inet_pton(socket.AF_INET6, ip)
+ masklen = int(masklen)
+
+ plen += masklen
+ ip_slice = pb6(masklen)
+ v += ip[:ip_slice]
+
+ return struct.pack('B', plen) + v
+
+ def __repr__(self):
+ return 'Ipv6(%s)' % (self.prefix)
+
+ def __str__(self):
+ return 'Ipv6 ' + self.prefix
+
+ @classmethod
+ def from_bytes(cls, plen, val):
+ return cls(unpack_ipv6(val, plen))
+
+
+@_register_nlri
+class RtNlri(Nlri):
+ """Route Target Membership NLRI.
+
+ Route Target membership NLRI is advertised in BGP UPDATE messages using
+ the MP_REACH_NLRI and MP_UNREACH_NLRI attributes.
+ """
+ __slots__ = ('_origin_as', '_route_target')
+
+ AFI = 1
+ SAFI = 132
+ DEFAULT_AS = '0:0'
+ DEFAULT_RT = '0:0'
+
+ def __init__(self, origin_as, route_target):
+ Nlri.__init__(self)
+ # If given is not default_as and default_rt
+ if not (origin_as is RtNlri.DEFAULT_AS and
+ route_target is RtNlri.DEFAULT_RT):
+ # We validate them
+ if (not is_valid_old_asn(origin_as) or
+ not is_valid_ext_comm_attr(route_target)):
+ raise ValueError('Invalid params.')
+ self._origin_as = origin_as
+ self._route_target = route_target
+
+ @property
+ def origin_as(self):
+ return self._origin_as
+
+ @property
+ def route_target(self):
+ return self._route_target
+
+ @property
+ def formatted_nlri_str(self):
+ return "%s:%s" % (self.origin_as, self.route_target)
+
+ def is_default_rtnlri(self):
+ if (self._origin_as is RtNlri.DEFAULT_AS and
+ self._route_target is RtNlri.DEFAULT_RT):
+ return True
+ return False
+
+ def __str__(self):
+ return 'RtNlri ' + str(self._origin_as) + ':' + self._route_target
+
+ def __repr__(self):
+ return 'RtNlri(%s, %s)' % (self._origin_as, self._route_target)
+
+ def __cmp__(self, other):
+ return cmp(
+ (self._origin_as, self._route_target),
+ (other.origin_as, other.route_target),
+ )
+
+ @classmethod
+ def from_bytes(cls, plen, val):
+ idx = 0
+ if plen == 0 and not val:
+ return cls(RtNlri.DEFAULT_AS, RtNlri.DEFAULT_RT)
+
+ # Extract origin AS.
+ origin_as, = struct.unpack_from('!I', val, idx)
+ idx += 4
+
+ # Extract route target.
+ route_target = ''
+ etype, esubtype, payload = struct.unpack_from('BB6s', val, idx)
+ # RFC says: The value of the high-order octet of the Type field for the
+ # Route Target Community can be 0x00, 0x01, or 0x02. The value of the
+ # low-order octet of the Type field for this community is 0x02.
+ # TODO(PH): Remove this exception when it breaks something Here we make
+ # exception as Routem packs lower-order octet as 0x00
+ if etype in (0, 2) and esubtype in (0, 2):
+ # If we have route target community in AS number format.
+ asnum, i = struct.unpack('!HI', payload)
+ route_target = ('%s:%s' % (asnum, i))
+ elif etype == 1 and esubtype == 2:
+ # If we have route target community in IP address format.
+ ip_addr, i = struct.unpack('!4sH', payload)
+ ip_addr = socket.inet_ntoa(ip_addr)
+ route_target = ('%s:%s' % (ip_addr, i))
+ elif etype == 0 and esubtype == 1:
+ # TODO(PH): Parsing of RtNlri 1:1:100:1
+ asnum, i = struct.unpack('!HI', payload)
+ route_target = ('%s:%s' % (asnum, i))
+
+ return cls(origin_as, route_target)
+
+ def encode(self):
+ rt_nlri = ''
+ if not self.is_default_rtnlri():
+ rt_nlri += struct.pack('!I', self.origin_as)
+ # Encode route target
+ first, second = self.route_target.split(':')
+ if '.' in first:
+ ip_addr = socket.inet_aton(first)
+ rt_nlri += struct.pack('!BB4sH', 1, 2, ip_addr, int(second))
+ else:
+ rt_nlri += struct.pack('!BBHI', 0, 2, int(first), int(second))
+
+ # RT Nlri is 12 octets
+ return struct.pack('B', (8 * 12)) + rt_nlri
+
+
+def pb(masklen):
+ if masklen > 24:
+ return 4
+ elif masklen > 16:
+ return 3
+ elif masklen > 8:
+ return 2
+ elif masklen > 0:
+ return 1
+ return 0
+
+_v6_bits = range(120, -8, -8)
+_v6_bytes = [i / 8 for i in range(128, 0, -8)]
+
+
+def pb6(masklen):
+ for idx, bits in enumerate(_v6_bits):
+ if masklen > bits:
+ return _v6_bytes[idx]
+ return 0
+
+
+def unpack_ipv4(pi, masklen):
+ pi += '\x00' * 4
+ return '%s/%s' % (socket.inet_ntoa(pi[:4]), masklen)
+
+
+def unpack_ipv6(pi, masklen):
+ pi += '\x00' * 16
+ ip = socket.inet_ntop(socket.AF_INET6, pi[:16])
+ return '%s/%s' % (ip, masklen)
+
+
+def ipv4_mapped_ipv6(ipv4):
+ if not is_valid_ipv4(ipv4):
+ raise ValueError('Invalid ipv4 address given %s.' % ipv4)
+ ipv4n = socket.inet_pton(socket.AF_INET, ipv4)
+ ipv6_hex = '00' * 10 + 'ff' * 2 + bytes2hex(ipv4n)
+ ipv6n = hex2byte(ipv6_hex)
+ ipv6 = socket.inet_ntop(socket.AF_INET6, ipv6n)
+ return ipv6
+
+
+# TODO(PH): Consider refactoring common functionality new methods
+# Look at previous commit
+def parse(received, afi=1, safi=1):
+ recv_nlri_list = []
+
+ klass = _NLRI_REGISTRY.get((afi, safi))
+ if not klass:
+ raise ValueError('Asked to parse unsupported NLRI afi/safi: '
+ '(%s, %s)' % (afi, safi))
+
+ try:
+ idx = 0
+ while idx < len(received):
+ plen, = struct.unpack_from('B', received, idx)
+ idx += 1
+ nbytes, rest = divmod(plen, 8)
+ if rest:
+ nbytes += 1
+ val = received[idx:idx + nbytes]
+ idx += nbytes
+ recv_nlri_list.append(klass.from_bytes(plen, val))
+ except Exception:
+ raise OptAttrError()
+
+ return recv_nlri_list
diff --git a/ryu/services/protocols/bgp/protocols/bgp/pathattr.py b/ryu/services/protocols/bgp/protocols/bgp/pathattr.py
new file mode 100644
index 00000000..04499d4b
--- /dev/null
+++ b/ryu/services/protocols/bgp/protocols/bgp/pathattr.py
@@ -0,0 +1,1076 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This module provides BGP Path Attributes classes and utility methods to
+encode and decode them.
+
+This file was adapted from pybgp open source project.
+"""
+from abc import ABCMeta
+from abc import abstractmethod
+import copy
+import logging
+import socket
+import StringIO
+import struct
+
+from ryu.services.protocols.bgp.protocols.bgp.exceptions import AttrFlagError
+from ryu.services.protocols.bgp.protocols.bgp.exceptions import AttrLenError
+from ryu.services.protocols.bgp.protocols.bgp.exceptions import InvalidNextHop
+from ryu.services.protocols.bgp.protocols.bgp.exceptions import \
+ InvalidOriginError
+from ryu.services.protocols.bgp.protocols.bgp.exceptions import MalformedAsPath
+from ryu.services.protocols.bgp.protocols.bgp.exceptions import OptAttrError
+from ryu.services.protocols.bgp.protocols.bgp import nlri
+from ryu.services.protocols.bgp.protocols.bgp.nlri import get_rf
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv4_VPN
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv6_VPN
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_RTC_UC
+from ryu.services.protocols.bgp.utils.internable import Internable
+from ryu.services.protocols.bgp.utils import validation
+from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4
+from ryu.services.protocols.bgp.utils.validation import is_valid_ipv6
+
+
+LOG = logging.getLogger('protocols.bgp.pathattr')
+
+# BGP Attribute flags
+EXTENDED_LEN_BGP_ATTR = 0x10
+PARTIAL_BGP_ATTR = 0x20
+TRANSITIVE_BGP_ATTR = 0x40
+OPTIONAL_BGP_ATTR = 0x80
+
+# BGP flag mask
+DEFAULT_FLAGS_MASK = 0x3f
+
+# BGP recognized path attribute class registry by type-code.
+# i.e. <key, value>: <type-code, path-attr-class>
+_PATH_ATTR_REGISTRY = {}
+
+
+def _register_path_attr(cls):
+ """Used as decorator for registering recognized bgp path attribute class
+ by their type-code.
+ """
+ assert issubclass(cls, RcgPathAttr)
+ assert hasattr(cls, 'TYPE_CODE') and hasattr(cls, 'FLAGS')
+ assert _PATH_ATTR_REGISTRY.get(cls.TYPE_CODE) is None
+ _PATH_ATTR_REGISTRY[cls.TYPE_CODE] = cls
+ return cls
+
+
+def decode(received, idx=0):
+ """Decodes given bytes into corresponding BGP path attribute.
+ """
+ iidx = idx
+ flagb, path_attr_type = struct.unpack_from('BB', received, idx)
+ idx += 2
+ used = 2
+
+ if flagb & 16:
+ length, = struct.unpack_from('>H', received, idx)
+ idx += 2
+ used += 2
+ else:
+ length, = struct.unpack_from('!B', received, idx)
+ idx += 1
+ used += 1
+
+ recv_data = received[idx:(idx + length)]
+ used += length
+
+ # Check if this attribute type is recognized.
+ path_attr_class = _PATH_ATTR_REGISTRY.get(path_attr_type)
+ path_attr = None
+ if path_attr_class:
+ # Check if flags match expected from known/recognized attribute type.
+ if not path_attr_class.check_flags(flagb):
+ LOG.error(
+ "Flags(%s) of pathattr %s received in update don't "
+ "match expected flags(%s)"
+ % (
+ flagb,
+ str(path_attr_class),
+ path_attr_class.FLAGS
+ )
+ )
+ raise AttrFlagError(data=received[iidx:used])
+
+ try:
+ path_attr = path_attr_class.from_bytes(recv_data)
+ except (AttrLenError, InvalidOriginError, InvalidNextHop,
+ OptAttrError) as e:
+ # Set attribute type, length and value as data/payload.
+ e.data = received[iidx:used]
+ raise e
+ else:
+ path_attr = UnRcgPathAttr(recv_data, flagb, path_attr_type)
+
+ return used, path_attr
+
+
+class PathAttr(Internable):
+ """Abstract base class for bgp path attributes.
+
+ Defines interface for all path attributes and provides some default util.
+ methods.
+ """
+ __metaclass__ = ABCMeta
+ __slots__ = ('_flags')
+ TYPE_CODE = 0
+ ATTR_NAME = 'default'
+
+ def __init__(self, flags):
+ super(PathAttr, self).__init__()
+ self._flags = flags
+
+ @property
+ def flags(self):
+ return self._flags
+
+ @abstractmethod
+ def packvalue(self):
+ """Encodes path-attribute value/pay-load into binary format."""
+ raise NotImplementedError()
+
+ def encode(self):
+ """Wraps packed path-attribute value with headers, such as,
+ flags, type-code and length.
+ """
+ valueb = self.packvalue()
+ flags = self._flags
+ type_code = self.__class__.TYPE_CODE
+
+ if len(valueb) > 255:
+ flags = flags | 16
+ return struct.pack('!BBH', flags, type_code, len(valueb)) + valueb
+
+ flags = flags & (0xff ^ 16)
+ return struct.pack('BBB', flags, type_code, len(valueb)) + valueb
+
+ def str_flags(self):
+ """Returns a list of attribute category for this bgp attribute."""
+
+ if self._flags:
+ flag_cat = []
+ if self._flags & OPTIONAL_BGP_ATTR:
+ flag_cat.append('optional')
+ else:
+ flag_cat.append('well-known')
+
+ if self._flags & TRANSITIVE_BGP_ATTR:
+ flag_cat.append('transitive')
+ else:
+ flag_cat.append('non-transitive')
+
+ if self._flags & PARTIAL_BGP_ATTR:
+ flag_cat.append('partial')
+ else:
+ flag_cat.append('complete')
+
+ if self._flags & EXTENDED_LEN_BGP_ATTR:
+ flag_cat.append('ext-length')
+ else:
+ flag_cat.append('regular-length')
+
+ return ','.join(flag_cat)
+
+ return 'None'
+
+ def __repr__(self):
+ return '<%s type/num=%s/%s flags %s>' % (
+ self.__class__.__name__, 'unknown', self.__class__.TYPE_CODE,
+ self.str_flags())
+
+ def __cmp__(self, other):
+ if isinstance(other, PathAttr):
+ if other.__class__ == self.__class__:
+ return cmp(self._flags, other._flags)
+
+ return -1
+
+ def __hash__(self):
+ return hash(self._flags, self.__class__.TYPE_CODE)
+
+
+class RcgPathAttr(PathAttr):
+ """Base class for all recognized path attributes."""
+ # Flags for this type of known path attribute.
+ # Sub-classes should provide value as per RFC.
+ FLAGS = None
+
+ # There are some flags we don't care about. By default we don't care about
+ # extended-length bit and partial bit, so mask is 0x3f (0011 1111)
+ FLAGS_MASK = DEFAULT_FLAGS_MASK
+
+ def __init__(self):
+ PathAttr.__init__(self, self.__class__.FLAGS)
+
+ @classmethod
+ def from_bytes(cls, val):
+ raise NotImplementedError()
+
+ @classmethod
+ def check_flags(cls, flags):
+ """Check if provided flags match flags required by RFC (cls.FLAGS).
+
+ RFC path attribute higher order bit rules:
+ 0 1 2 3 4 5 6 7
+
+ Well Known
+ 0 1 - always
+ 0 - always
+ 0 - attribute length 1 octet
+ 1 - attribute length 2 octet
+
+ Optional
+ 1 1 - Transitive
+ 1 - Partial
+ 0 - Complete
+ 0 - Non-Transitive
+ 0 - always
+ """
+ return cls.FLAGS | cls.FLAGS_MASK == flags | cls.FLAGS_MASK
+
+
+class UnRcgPathAttr(PathAttr):
+ """Class for all un-supported/un-recognized bgp path-attributes.
+ """
+ __slots__ = ('_type_code', '_value')
+ ATTR_NAME = 'unknown'
+
+ def __init__(self, value, flags, type_code):
+ PathAttr.__init__(self, flags)
+ self._type_code = type_code
+ self._value = value
+
+ @property
+ def value(self):
+ return self._value
+
+ @property
+ def type_code(self):
+ return self._type_code
+
+ def packvalue(self):
+ return self._value
+
+ def encode(self):
+ all_flags = self._flags
+ valueb = self.packvalue()
+
+ if len(valueb) > 255:
+ all_flags = all_flags | 16
+ return struct.pack('!BBH', all_flags, self.type_code,
+ len(valueb)) + valueb
+
+ all_flags = all_flags & (0xff ^ 16)
+ return struct.pack('BBB', all_flags, self.type_code,
+ len(valueb)) + valueb
+
+ def is_optional_transitive(self):
+ """Returns true if this is an optional path attribute.
+ """
+ return self._flags & OPTIONAL_BGP_ATTR
+
+ def is_transitive(self):
+ """Returns true if this is an transitive path attribute.
+ """
+ return self._flags & TRANSITIVE_BGP_ATTR
+
+ def __repr__(self):
+ return '<%s type/num=%s/%s flags %s value %r>' % (
+ self.__class__.__name__, 'unknown', self.type_code, self.flags,
+ self.value)
+
+
+@_register_path_attr
+class Origin(RcgPathAttr):
+ """ORIGIN is a well-known mandatory bgp path-attribute."""
+ __slots__ = ('_value')
+ TYPE_CODE = 1
+ ATTR_NAME = 'origin'
+ # 010 - well known, transitive, complete
+ FLAGS = TRANSITIVE_BGP_ATTR
+
+ # Various Origin values.
+ IGP = 'igp'
+ EGP = 'egp'
+ INCOMPLETE = 'incomplete'
+
+ def __init__(self, value='incomplete'):
+ RcgPathAttr.__init__(self)
+ if value not in (Origin.IGP, Origin.EGP, Origin.INCOMPLETE):
+ raise ValueError('Invalid Origin attribute value.')
+ self._value = value
+
+ @property
+ def value(self):
+ return self._value
+
+ @classmethod
+ def from_bytes(cls, value):
+ """Decodes bgp path-attribute with type-code 1, i.e. ORIGIN.
+ """
+ if value == '\x00':
+ value = Origin.IGP
+ elif value == '\x01':
+ value = Origin.EGP
+ elif value == '\x02':
+ value = Origin.INCOMPLETE
+ else:
+ raise InvalidOriginError()
+
+ return cls(value)
+
+ def packvalue(self):
+ if self.value == Origin.IGP:
+ return '\x00'
+ elif self.value == Origin.EGP:
+ return '\x01'
+ elif self.value == Origin.INCOMPLETE:
+ return '\x02'
+ return self.value
+
+ def __repr__(self):
+ return '<Origin ' + self.value + '>'
+
+ def __str__(self):
+ return str(self.value)
+
+
+@_register_path_attr
+class AsPath(RcgPathAttr):
+ """AS_PATH is a well-known mandatory bgp path attribute.
+ """
+ __slots__ = ('_path_seg_list')
+ TYPE_CODE = 2
+ ATTR_NAME = 'aspath'
+ # Higher order bits: 010 - well known, transitive, complete
+ FLAGS = TRANSITIVE_BGP_ATTR
+ SEG_TYP_AS_SET = 1
+ SEG_TYP_AS_SEQ = 2
+
+ def __init__(self, path_seg_list):
+ RcgPathAttr.__init__(self)
+ self._path_seg_list = None
+ if isinstance(path_seg_list, str):
+ self._path_seg_list = []
+ for seg in path_seg_list.split():
+ if seg.startswith('set(') and seg.endswith(')'):
+ seg = set([int(s) for s in seg[4:-1].split(',')])
+ else:
+ seg = [int(s) for s in seg.split(',')]
+ self._path_seg_list.append(seg)
+ else:
+ self._path_seg_list = path_seg_list[:]
+
+ @property
+ def path_seg_list(self):
+ return copy.deepcopy(self._path_seg_list)
+
+ def get_as_path_len(self):
+ count = 0
+ for seg in self._path_seg_list:
+ if isinstance(seg, list):
+ # Segment type 2 stored in list and all AS counted.
+ count += len(seg)
+ else:
+ # Segment type 1 stored in set and count as one.
+ count += 1
+
+ return count
+
+ def has_local_as(self, local_as):
+ """Check if *local_as* is already present on path list."""
+ for as_path_seg in self._path_seg_list:
+ for as_num in as_path_seg:
+ if as_num == local_as:
+ return True
+ return False
+
+ def has_matching_leftmost(self, remote_as):
+ """Check if leftmost AS matches *remote_as*."""
+ if not self._path_seg_list or not remote_as:
+ return False
+
+ leftmost_seg = self.path_seg_list[0]
+ if leftmost_seg and leftmost_seg[0] == remote_as:
+ return True
+
+ return False
+
+ @property
+ def value(self):
+ ret = []
+ for as_path_seg in self._path_seg_list:
+ for as_num in as_path_seg:
+ ret.append(as_num)
+ return ret
+
+ def __repr__(self):
+ rstring = StringIO.StringIO()
+ rstring.write('<AsPath')
+ for as_path_seg in self._path_seg_list:
+ if isinstance(as_path_seg, set):
+ rstring.write(' set(')
+ rstring.write(','.join([str(asnum) for asnum in as_path_seg]))
+ rstring.write(')')
+ else:
+ rstring.write(' ')
+ rstring.write(','.join([str(asnum) for asnum in as_path_seg]))
+ rstring.write('>')
+ return rstring.getvalue()
+
+ def __str__(self):
+ ret = '['
+ for as_path_seg in self._path_seg_list:
+ ret += ', '.join([str(asnum) for asnum in as_path_seg])
+ return ret + ']'
+
+ @classmethod
+ def from_bytes(cls, val):
+ """Decodes bgp path-attribute with type-code 2, i.e. AS_PATH.
+ """
+ path_seg_list = []
+ iidx = 0
+
+ while iidx < len(val):
+ segtype, numas = struct.unpack_from('BB', val, iidx)
+ iidx += 2
+
+ if segtype == AsPath.SEG_TYP_AS_SET:
+ container = set()
+ add = container.add
+ elif segtype == AsPath.SEG_TYP_AS_SEQ:
+ container = []
+ add = container.append
+ else:
+ raise MalformedAsPath()
+
+ for _ in range(numas):
+ asnum, = struct.unpack_from('!H', val, iidx)
+ iidx += 2
+ add(asnum)
+ path_seg_list.append(container)
+
+ return cls(path_seg_list)
+
+ def packvalue(self):
+ valueb = ''
+ for seg in self._path_seg_list:
+ if isinstance(seg, set):
+ segtype = 1
+ elif isinstance(seg, (tuple, list)):
+ segtype = 2
+ else:
+ raise Exception('unknown segment type %r' % (seg,))
+
+ valueb += struct.pack('BB', segtype, len(seg))
+ try:
+ iter(seg)
+ except TypeError:
+ valueb += struct.pack('!H', int(seg))
+ else:
+ for asnum in seg:
+ if not isinstance(asnum, int):
+ asnum = int(asnum)
+ valueb += struct.pack('!H', asnum)
+
+ return valueb
+
+
+@_register_path_attr
+class NextHop(RcgPathAttr):
+ """NEXT_HOP is well-known mandatory bgp path-attribute.
+ """
+ __slots__ = ()
+ TYPE_CODE = 3
+ ATTR_NAME = 'nexthop'
+ # Higher order bits: 010 - well known, transitive, complete
+ FLAGS = TRANSITIVE_BGP_ATTR
+
+ def __init__(self, ip_address):
+ if not is_valid_ipv4(ip_address):
+ raise ValueError('Invalid ipv4 address %s.' % ip_address)
+ RcgPathAttr.__init__(self)
+ self._ip_address = ip_address
+
+ @property
+ def ip_address(self):
+ return self._ip_address
+
+ def __repr__(self):
+ return '<nexthop %s>' % (self.ip_address)
+
+ def __str__(self):
+ return str(self.ip_address)
+
+ @classmethod
+ def from_bytes(cls, value):
+ """Decodes bgp path-attribute with type-code 3, i.e. NEXT_HOP.
+ """
+ value = socket.inet_ntoa(value)
+ return cls(value)
+
+ def packvalue(self):
+ return socket.inet_aton(self._ip_address)
+
+
+@_register_path_attr
+class IntAttr(RcgPathAttr):
+ """Super class of all bgp path-attribute whose value is an unsigned
+ integer.
+ """
+ __slots__ = ('_value')
+
+ def __init__(self, value):
+ if not value:
+ value = 0
+ self._value = value
+ RcgPathAttr.__init__(self)
+
+ @property
+ def value(self):
+ return self._value
+
+ def __repr__(self):
+ return '<%s(%d)>' % (self.__class__.__name__, self.value)
+
+ def __str__(self):
+ return str(self.value)
+
+ @classmethod
+ def from_bytes(cls, val):
+ """Decode bgp path-attributes whose value is an unsigned integer.
+ """
+ value, = struct.unpack_from('!I', val)
+ return cls(value)
+
+ def packvalue(self):
+ return struct.pack('!I', self.value)
+
+
+@_register_path_attr
+class Med(IntAttr):
+ """MED is optional non-transitive bgp path-attribute."""
+ __slots__ = ()
+ TYPE_CODE = 4
+ ATTR_NAME = 'med'
+ # Higher order bits: 100 - optional, non-transitive, complete
+ FLAGS = OPTIONAL_BGP_ATTR
+
+ def __init__(self, value):
+ IntAttr.__init__(self, value)
+
+
+@_register_path_attr
+class LocalPref(IntAttr):
+ """LOCAL_PREF is a well-known discretionary attribute."""
+ __slots__ = ()
+ TYPE_CODE = 5
+ ATTR_NAME = 'localpref'
+ # Higher order bits: 010 - well-known, transitive, complete
+ FLAGS = TRANSITIVE_BGP_ATTR
+
+ def __init__(self, value):
+ IntAttr.__init__(self, value)
+
+
+@_register_path_attr
+class Originator(RcgPathAttr):
+ """ORIGINATOR_ID is a optional non-transitive attribute."""
+ __slots__ = ('_value')
+ TYPE_CODE = 9
+ ATTR_NAME = 'originator'
+ FLAGS = OPTIONAL_BGP_ATTR
+
+ def __init__(self, value):
+ RcgPathAttr.__init__(self)
+ self._value = value
+
+ @property
+ def value(self):
+ return self._value
+
+ @classmethod
+ def from_bytes(cls, val):
+ """Decodes bgp path-attribute with type code 9, i.e. ORIGINATOR_ID.
+ """
+ if len(val) == 4:
+ value = socket.inet_ntoa(val)
+ else:
+ raise Exception('Invalid originator')
+
+ return cls(value)
+
+ def packvalue(self):
+ return socket.inet_aton(self.value)
+
+
+@_register_path_attr
+class ClusterList(RcgPathAttr):
+ """CLUSTER_LIST is a optional non-transitive bgp path-attribute.
+ """
+ __slots__ = ('_cluster_list')
+ TYPE_CODE = 10
+ ATTR_NAME = 'cluster-list'
+ FLAGS = OPTIONAL_BGP_ATTR
+
+ def __init__(self, cluster_list):
+ if not cluster_list:
+ raise ValueError('Invalid cluster list.')
+ # TODO(PH): add more validation of input here.
+ RcgPathAttr.__init__(self)
+ self._cluster_list = cluster_list
+
+ @property
+ def cluster_list(self):
+ return self._cluster_list
+
+ @classmethod
+ def from_bytes(cls, val):
+ """Decodes bgp path-attribute with type-code 10, i.e. CLUSTER_LIST.
+ """
+ cluster_list = []
+ iidx = 0
+ while iidx < len(val):
+ cluster_list.append(
+ socket.inet_ntoa(struct.unpack_from('4s', val, iidx)[0])
+ )
+ iidx += 4
+ return cls(cluster_list)
+
+ def packvalue(self):
+ valueb = ''
+ for c in self.cluster_list:
+ valueb += socket.inet_aton(c)
+ return valueb
+
+
+@_register_path_attr
+class MpReachNlri(RcgPathAttr):
+ """MP_REACH_NLRI is a optional non-transitive bgp path-attribute.
+ """
+ __slots__ = ('_route_family', '_next_hop', '_nlri_list', '_reserved')
+ TYPE_CODE = 14
+ ATTR_NAME = 'mp-reach-nlri'
+ NEXT_HOP = 'nh'
+ NLRI = 'nlri'
+ RESERVED = 'reserved'
+ # Higher order bits: 100 - optional, non-transitive, complete
+ FLAGS = OPTIONAL_BGP_ATTR
+
+ def __init__(self, route_family, next_hop, nlri_list, reserved=None):
+ if not (hasattr(route_family, 'afi') and
+ hasattr(route_family, 'safi')):
+ raise ValueError('Invalid parameter value for route_family %s.' %
+ route_family)
+
+ if not next_hop:
+ raise ValueError('Invalid next_hop %s' % next_hop)
+
+ # MpReachNlri attribute should have next-hop belonging to same
+ # route-family
+ if ((route_family == RF_IPv4_VPN and not is_valid_ipv4(next_hop)) or
+ (route_family == RF_IPv6_VPN and not is_valid_ipv6(next_hop))):
+ raise ValueError('Next hop should belong to %s route family' %
+ route_family)
+
+ if not nlri_list:
+ nlri_list = []
+
+ RcgPathAttr.__init__(self)
+ self._route_family = route_family
+ self._next_hop = next_hop
+ self._nlri_list = nlri_list
+ self._reserved = reserved
+
+ @property
+ def route_family(self):
+ return self._route_family
+
+ @property
+ def next_hop(self):
+ return self._next_hop
+
+ @property
+ def nlri_list(self):
+ return self._nlri_list[:]
+
+ @property
+ def reserved(self):
+ return self._reserved
+
+ def __repr__(self):
+ return '<MpReachNlri route_family=%r next_hop=%r nlri_list=%r>' % (
+ self.route_family, self.next_hop, self._nlri_list)
+
+ @classmethod
+ def from_bytes(cls, val):
+ """Decodes bgp path-attribute with type code 14, i.e MP_REACH_NLRI.
+ """
+ afi, safi, nhlen = struct.unpack_from('!HBB', val)
+ fmt = '%dsB' % (nhlen,)
+ next_hop, reserved = struct.unpack_from(fmt, val, 4)
+
+ if afi == 1 and safi is 128:
+ # Vpnv4
+ _, _, nhip = struct.unpack('!II4s', next_hop)
+ next_hop = socket.inet_ntop(socket.AF_INET, nhip)
+ elif afi == 1 and safi == 132:
+ # RtNlri
+ nhip, = struct.unpack('!4s', next_hop)
+ next_hop = socket.inet_ntop(socket.AF_INET, nhip)
+ elif afi == 2 and safi == 128:
+ # Vpnv6
+ _, _, nhip = struct.unpack('!II16s', next_hop)
+ next_hop = socket.inet_ntop(socket.AF_INET6, nhip)
+ else:
+ LOG.error('Received NLRI for afi/safi (%s/%s), which is not'
+ ' supported yet!' % (afi, safi))
+ raise OptAttrError()
+
+ n_nlri = nlri.parse(val[5 + nhlen:], afi, safi)
+ route_family = get_rf(afi, safi)
+ return cls(route_family, next_hop, n_nlri, reserved)
+
+ def packvalue(self):
+ afi = self._route_family.afi
+ safi = self._route_family.safi
+ if self._route_family == RF_IPv4_VPN:
+ next_hop = '\0' * 8
+ next_hop += socket.inet_aton(self.next_hop)
+ elif self._route_family == RF_RTC_UC:
+ next_hop = socket.inet_aton(self.next_hop)
+ elif self._route_family == RF_IPv6_VPN:
+ next_hop = '\0' * 8
+ next_hop += socket.inet_pton(socket.AF_INET6, self.next_hop)
+ else:
+ next_hop = self.next_hop
+
+ valueb = struct.pack('!HBB', afi, safi, len(next_hop))
+ valueb += next_hop
+ valueb += chr(self.reserved or 0)
+
+ for n_nlri in self._nlri_list:
+ valueb += n_nlri.encode()
+ return valueb
+
+
+@_register_path_attr
+class MpUnreachNlri(RcgPathAttr):
+ """MP_UNREACH_NLRI is a optional non-transitive bgp path-attribute.
+ """
+ __slots__ = ('_route_family', '_nlri_list')
+ TYPE_CODE = 15
+ ATTR_NAME = 'mp-unreach-nlri'
+ NLRI = 'withdraw_nlri'
+ # Higher order bits: 100 - optional, non-transitive, complete
+ FLAGS = OPTIONAL_BGP_ATTR
+
+ def __init__(self, route_family, nlri_list):
+ if not (hasattr(route_family, 'afi') and
+ hasattr(route_family, 'safi')):
+ raise ValueError('Invalid parameter value for route_family %s' %
+ route_family)
+ if not nlri_list:
+ nlri_list = []
+
+ RcgPathAttr.__init__(self)
+ self._route_family = route_family
+ self._nlri_list = nlri_list
+
+ @property
+ def nlri_list(self):
+ return self._nlri_list[:]
+
+ @property
+ def route_family(self):
+ return self._route_family
+
+ def __repr__(self):
+ return '<MpUneachNlri route_family=%r nlri_list=%r>' % (
+ self._route_family, self._nlri_list)
+
+ @classmethod
+ def from_bytes(cls, val):
+ """Decodes bgp path-attribute of type-code 15, i.e. MP_UNREACH_NLRI.
+ """
+ afi, safi = struct.unpack_from('!HB', val)
+ route_family = get_rf(afi, safi)
+ w_nlri = nlri.parse(val[3:], afi, safi)
+ return cls(route_family, w_nlri)
+
+ def packvalue(self):
+ afi = self._route_family.afi
+ safi = self._route_family.safi
+
+ valueb = struct.pack('!HB', afi, safi)
+
+ for w_nlri in self._nlri_list:
+ valueb += w_nlri.encode()
+ return valueb
+
+
+@_register_path_attr
+class Community(RcgPathAttr):
+ """COMMUNITY is a optional transitive bgp path-attribute.
+ """
+ __slots__ = ('_attr_list')
+ TYPE_CODE = 8
+ ATTR_NAME = 'community'
+ FLAGS = TRANSITIVE_BGP_ATTR | OPTIONAL_BGP_ATTR
+
+ # String constants of well-known-communities
+ NO_EXPORT = int('0xFFFFFF01', 16)
+ NO_ADVERTISE = int('0xFFFFFF02', 16)
+ NO_EXPORT_SUBCONFED = int('0xFFFFFF03', 16)
+ WELL_KNOW_COMMUNITIES = (NO_EXPORT, NO_ADVERTISE, NO_EXPORT_SUBCONFED)
+
+ def __init__(self, *attrs):
+ if not attrs:
+ raise ValueError('Invalid parameter for community attribute '
+ 'list %r.' % attrs)
+ self._attr_list = []
+ for attr in attrs:
+ if not isinstance(attr, int):
+ raise ValueError('Invalid community attribute value %s.' %
+ attr)
+ self._attr_list.append(attr)
+
+ RcgPathAttr.__init__(self)
+
+ @property
+ def attr_list(self):
+ return self._attr_list[:]
+
+ @classmethod
+ def from_bytes(cls, val):
+ """Decodes path attribute of type code 8, i.e. Community attribute.
+ """
+ att_list = []
+ iidx = 0
+ while iidx < len(val):
+ comm_attr, = struct.unpack_from('!I', val, iidx)
+ att_list.append(comm_attr)
+ iidx += 4
+ return cls(*att_list)
+
+ def packvalue(self):
+ commu_attr = ''
+ for attr in self._attr_list:
+ commu_attr += struct.pack('!I', int(attr))
+ return commu_attr
+
+ @staticmethod
+ def is_no_export(comm_attr):
+ """Returns True if given value matches well-known community NO_EXPORT
+ attribute value.
+ """
+ return comm_attr == Community.NO_EXPORT
+
+ @staticmethod
+ def is_no_advertise(comm_attr):
+ """Returns True if given value matches well-known community
+ NO_ADVERTISE attribute value.
+ """
+ return comm_attr == Community.NO_ADVERTISE
+
+ @staticmethod
+ def is_no_export_subconfed(comm_attr):
+ """Returns True if given value matches well-known community
+ NO_EXPORT_SUBCONFED attribute value.
+ """
+ return comm_attr == Community.NO_EXPORT_SUBCONFED
+
+ def has_comm_attr(self, attr):
+ """Returns True if given community attribute is present."""
+
+ for comm_attr in self._attr_list:
+ if comm_attr == attr:
+ return True
+
+ return False
+
+ def _community_repr(self, comm_attr):
+ """Matches given value with all well-known community attribute values.
+
+ Returns string representation of the well-known attribute if we
+ have a match else returns given value.
+ """
+
+ if self.is_no_export(comm_attr):
+ return 'NO_EXPORT'
+ elif self.is_no_advertise(comm_attr):
+ return 'NO_ADVERTISE'
+ elif self.is_no_export_subconfed(comm_attr):
+ return 'NO_EXPORT_SUBCONFED'
+ return (str(comm_attr >> 16) + ':' +
+ str(comm_attr & int('0x0000ffff', 16)))
+
+ def __repr__(self):
+ attr_list_repr = (','.join([self._community_repr(ca)
+ for ca in self._attr_list]))
+ return ('<Community([%s])>' % attr_list_repr)
+
+
+@_register_path_attr
+class ExtCommunity(RcgPathAttr):
+ """EXTENDED COMMUNITIES is a optional and transitive bgp path-attribute.
+ """
+ __slots__ = ('_rt_list', '_soo_list', '_unknowns')
+ TYPE_CODE = 16
+ ATTR_NAME = 'extcommunity'
+ RT = 'route_target'
+ SOO = 'site_of_origin'
+ UNKNOWN = 'unknown_community'
+ # Higher order bits: 110 - optional, transitive, complete
+ FLAGS = TRANSITIVE_BGP_ATTR | OPTIONAL_BGP_ATTR
+
+ def __str__(self):
+ return 'rt_list: {0}, soo_list: {1}'.format(
+ self.rt_list,
+ self.soo_list
+ )
+
+ def __init__(self, rt_list, soo_list, unknowns=None):
+ if not rt_list and not soo_list:
+ raise ValueError('Have to provide at-least one RT/SOO attribute.')
+ if not rt_list:
+ rt_list = []
+ if not soo_list:
+ soo_list = []
+ if not unknowns:
+ unknowns = {}
+
+ ExtCommunity.validate_supported_attributes(rt_list)
+ ExtCommunity.validate_supported_attributes(soo_list)
+
+ RcgPathAttr.__init__(self)
+ self._rt_list = list(rt_list)
+ self._soo_list = list(soo_list)
+ self._unknowns = unknowns
+
+ @property
+ def rt_list(self):
+ """Returns a list of extracted/configured route target community."""
+ # TODO(PH): Make sure we do not raise Exception here but return empty
+ # list instead.
+ return self._rt_list[:]
+
+ @property
+ def soo_list(self):
+ """Returns route origin community."""
+ return self._soo_list[:]
+
+ def __repr__(self):
+ return '<%s type/num=%s/%s flags %s, rts: %s, soo: %s>' % (
+ self.__class__.__name__, self.__class__.ATTR_NAME,
+ self.__class__.TYPE_CODE,
+ self.str_flags(), self.rt_list, self.soo_list)
+
+ def has_unknown_communities(self):
+ """Returns True if we have extracted/configured community other than
+ route target or route origin community.
+ """
+ return True if self._unknowns else False
+
+ @classmethod
+ def validate_supported_attributes(cls, attr_list):
+ """Validate *attr_list* has all valid RTs or SOO attribute
+ representations.
+
+ RTs and SOO are represented as string in following format:
+ *global_admin_part:local_admin_part*
+ """
+ for attr in attr_list:
+ if not validation.is_valid_ext_comm_attr(attr):
+ raise ValueError('Attribute %s is not a valid RT/SOO' % attr)
+
+ @classmethod
+ def from_bytes(cls, val):
+ """Decodes ext-community path-attribute.
+ """
+ rt_list = []
+ soo_list = []
+ unknowns = {}
+ iidx = 0
+ while iidx < len(val):
+ etype, esubtype, payload = struct.unpack_from('BB6s', val, iidx)
+ # RFC says: The value of the high-order octet of the Type field for
+ # the Route Target Community can be 0x00, 0x01, or 0x02. The value
+ # of the low-order octet of the Type field for this community is
+ # 0x02. TODO(PH): Remove this exception when it breaks something
+ # Here we make exception as Routem packs lower-order octet as 0x00
+ if etype in (0, 2) and esubtype in (0, 2):
+ # If we have route target community in AS number format.
+ asnum, i = struct.unpack('!HI', payload)
+ rt_list.append('%s:%s' % (asnum, i))
+ elif etype == 1 and esubtype == 2:
+ # If we have route target community in IP address format.
+ ip_addr, i = struct.unpack('!4sH', payload)
+ ip_addr = socket.inet_ntoa(ip_addr)
+ rt_list.append('%s:%s' % (ip_addr, i))
+ elif etype in (0, 2) and esubtype == 3:
+ # If we have route origin community in AS number format.
+ asnum, nnum = struct.unpack('!HI', payload)
+ soo_list.append('%s:%s' % (asnum, nnum))
+ elif etype == 1 and esubtype == 3:
+ # If we have route origin community in IP address format.
+ ip_addr, nnum = struct.unpack('!4sH', payload)
+ ip_addr = socket.inet_ntoa(ip_addr)
+ soo_list.append('%s:%s' % (ip_addr, nnum))
+ else:
+ # All other communities, other than RT and SOO are unknown.
+ unknown_list = unknowns.get(etype)
+ if unknown_list is None:
+ unknown_list = []
+ unknowns[etype] = unknown_list
+ unknown_list.append(
+ '%s:%s' % (etype, val[iidx + 1:iidx + 8].encode('hex'))
+ )
+ iidx += 8
+
+ return cls(rt_list, soo_list, unknowns)
+
+ def packvalue(self):
+ excomb = ''
+ # Pack route target community attrs.
+ for route_target in self._rt_list:
+ first, second = route_target.split(':')
+ if '.' in first:
+ ip_addr = socket.inet_aton(first)
+ excomb += struct.pack('!BB4sH', 1, 2, ip_addr,
+ int(second))
+ else:
+ excomb += struct.pack('!BBHI', 0, 2, int(first),
+ int(second))
+ # Pack route origin community attrs.
+ for route_origin in self._soo_list:
+ first, second = route_origin.split(':')
+ if '.' in first:
+ ip_addr = socket.inet_aton(first)
+ excomb += struct.pack('!BB4sH', 1, 3, ip_addr,
+ int(second))
+ else:
+ excomb += struct.pack('!BBHI', 0, 3, int(first),
+ int(second))
+ for type, attr_list in self._unknowns.items():
+ # Pack all unknown ext. attrs.
+ excomb += struct.pack('B', int(type))
+ excomb += attr_list.decode('hex')
+ return excomb
diff --git a/ryu/services/protocols/bgp/rtconf/base.py b/ryu/services/protocols/bgp/rtconf/base.py
new file mode 100644
index 00000000..95717f8c
--- /dev/null
+++ b/ryu/services/protocols/bgp/rtconf/base.py
@@ -0,0 +1,700 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Running or runtime configuration base classes.
+"""
+from abc import ABCMeta
+from abc import abstractmethod
+import logging
+from types import BooleanType
+from types import IntType
+from types import LongType
+
+from ryu.services.protocols.bgp.base import add_bgp_error_metadata
+from ryu.services.protocols.bgp.base import BGPSException
+from ryu.services.protocols.bgp.base import get_validator
+from ryu.services.protocols.bgp.base import RUNTIME_CONF_ERROR_CODE
+from ryu.services.protocols.bgp.base import validate
+
+from ryu.services.protocols.bgp.protocols.bgp.pathattr import ExtCommunity
+from ryu.services.protocols.bgp.utils import validation
+from ryu.services.protocols.bgp.utils.validation import is_valid_old_asn
+
+LOG = logging.getLogger('bgpspeaker.rtconf.base')
+
+#
+# Nested settings.
+#
+CAP_REFRESH = 'cap_refresh'
+CAP_ENHANCED_REFRESH = 'cap_enhanced_refresh'
+CAP_MBGP_VPNV4 = 'cap_mbgp_vpnv4'
+CAP_MBGP_VPNV6 = 'cap_mbgp_vpnv6'
+CAP_RTC = 'cap_rtc'
+RTC_AS = 'rtc_as'
+HOLD_TIME = 'hold_time'
+
+# To control how many prefixes can be received from a neighbor.
+# 0 value indicates no limit and other related options will be ignored.
+# Current behavior is to log that limit has reached.
+MAX_PREFIXES = 'max_prefixes'
+
+# Has same meaning as: http://www.juniper.net/techpubs/software/junos/junos94
+# /swconfig-routing/disabling-suppression-of-route-
+# advertisements.html#id-13255463
+ADVERTISE_PEER_AS = 'advertise_peer_as'
+
+# MED - MULTI_EXIT_DISC
+MULTI_EXIT_DISC = 'multi_exit_disc'
+
+# Extended community attribute route origin.
+SITE_OF_ORIGINS = 'site_of_origins'
+
+# Constants related to errors.
+CONF_NAME = 'conf_name'
+CONF_VALUE = 'conf_value'
+
+# Max. value limits
+MAX_NUM_IMPORT_RT = 1000
+MAX_NUM_EXPORT_RT = 250
+MAX_NUM_SOO = 10
+
+
+#==============================================================================
+# Runtime configuration errors or exceptions.
+#==============================================================================
+
+@add_bgp_error_metadata(code=RUNTIME_CONF_ERROR_CODE, sub_code=1,
+ def_desc='Error with runtime-configuration.')
+class RuntimeConfigError(BGPSException):
+ """Base class for all runtime configuration errors.
+ """
+ pass
+
+
+@add_bgp_error_metadata(code=RUNTIME_CONF_ERROR_CODE, sub_code=2,
+ def_desc='Missing required configuration.')
+class MissingRequiredConf(RuntimeConfigError):
+ """Exception raised when trying to configure with missing required
+ settings.
+ """
+ def __init__(self, **kwargs):
+ conf_name = kwargs.get('conf_name')
+ if conf_name:
+ super(MissingRequiredConf, self).__init__(
+ desc='Missing required configuration: %s' % conf_name)
+ else:
+ super(MissingRequiredConf, self).__init__(desc=kwargs.get('desc'))
+
+
+@add_bgp_error_metadata(code=RUNTIME_CONF_ERROR_CODE, sub_code=3,
+ def_desc='Incorrect Type for configuration.')
+class ConfigTypeError(RuntimeConfigError):
+ """Exception raised when configuration value type miss-match happens.
+ """
+ def __init__(self, **kwargs):
+ conf_name = kwargs.get(CONF_NAME)
+ conf_value = kwargs.get(CONF_VALUE)
+ if conf_name and conf_value:
+ super(ConfigTypeError, self).__init__(
+ desc='Incorrect Type %s for configuration: %s' %
+ (conf_value, conf_name))
+ elif conf_name:
+ super(ConfigTypeError, self).__init__(
+ desc='Incorrect Type for configuration: %s' % conf_name)
+ else:
+ super(ConfigTypeError, self).__init__(desc=kwargs.get('desc'))
+
+
+@add_bgp_error_metadata(code=RUNTIME_CONF_ERROR_CODE, sub_code=4,
+ def_desc='Incorrect Value for configuration.')
+class ConfigValueError(RuntimeConfigError):
+ """Exception raised when configuration value is of correct type but
+ incorrect value.
+ """
+ def __init__(self, **kwargs):
+ conf_name = kwargs.get(CONF_NAME)
+ conf_value = kwargs.get(CONF_VALUE)
+ if conf_name and conf_value:
+ super(ConfigValueError, self).__init__(
+ desc='Incorrect Value %s for configuration: %s' %
+ (conf_value, conf_name))
+ elif conf_name:
+ super(ConfigValueError, self).__init__(
+ desc='Incorrect Value for configuration: %s' % conf_name)
+ else:
+ super(ConfigValueError, self).__init__(desc=kwargs.get('desc'))
+
+
+#==============================================================================
+# Configuration base classes.
+#==============================================================================
+
+class BaseConf(object):
+ """Base class for a set of configuration values.
+
+ Configurations can be required or optional. Also acts as a container of
+ configuration change listeners.
+ """
+ __metaclass__ = ABCMeta
+
+ def __init__(self, **kwargs):
+ self._req_settings = self.get_req_settings()
+ self._opt_settings = self.get_opt_settings()
+ self._valid_evts = self.get_valid_evts()
+ self._listeners = {}
+ self._settings = {}
+
+ # validate required and unknown settings
+ self._validate_req_unknown_settings(**kwargs)
+
+ # Initialize configuration settings.
+ self._init_req_settings(**kwargs)
+ self._init_opt_settings(**kwargs)
+
+ @property
+ def settings(self):
+ """Returns a copy of current settings."""
+ return self._settings.copy()
+
+ @classmethod
+ def get_valid_evts(self):
+ return set()
+
+ @classmethod
+ def get_req_settings(self):
+ return set()
+
+ @classmethod
+ def get_opt_settings(self):
+ return set()
+
+ @abstractmethod
+ def _init_opt_settings(self, **kwargs):
+ """Sub-classes should override this method to initialize optional
+ settings.
+ """
+ pass
+
+ @abstractmethod
+ def update(self, **kwargs):
+ # Validate given values
+ self._validate_req_unknown_settings(**kwargs)
+
+ def _validate_req_unknown_settings(self, **kwargs):
+ """Checks if required settings are present.
+
+ Also checks if unknown requirements are present.
+ """
+ # Validate given configuration.
+ self._all_attrs = (self._req_settings | self._opt_settings)
+ if not kwargs and len(self._req_settings) > 0:
+ raise MissingRequiredConf(desc='Missing all required attributes.')
+
+ given_attrs = frozenset(kwargs.keys())
+ unknown_attrs = given_attrs - self._all_attrs
+ if unknown_attrs:
+ raise RuntimeConfigError(desc=(
+ 'Unknown attributes: %s' %
+ ', '.join([str(i) for i in unknown_attrs]))
+ )
+ missing_req_settings = self._req_settings - given_attrs
+ if missing_req_settings:
+ raise MissingRequiredConf(conf_name=list(missing_req_settings))
+
+ def _init_req_settings(self, **kwargs):
+ for req_attr in self._req_settings:
+ req_attr_value = kwargs.get(req_attr)
+ if req_attr_value is None:
+ raise MissingRequiredConf(conf_name=req_attr_value)
+ # Validate attribute value
+ req_attr_value = get_validator(req_attr)(req_attr_value)
+ self._settings[req_attr] = req_attr_value
+
+ def add_listener(self, evt, callback):
+# if (evt not in self.get_valid_evts()):
+# raise RuntimeConfigError(desc=('Unknown event %s' % evt))
+
+ listeners = self._listeners.get(evt, None)
+ if not listeners:
+ listeners = set()
+ self._listeners[evt] = listeners
+ listeners.update([callback])
+
+ def remove_listener(self, evt, callback):
+ if evt in self.get_valid_evts():
+ listeners = self._listeners.get(evt, None)
+ if listeners and (callback in listeners):
+ listeners.remove(callback)
+ return True
+
+ return False
+
+ def _notify_listeners(self, evt, value):
+ listeners = self._listeners.get(evt, [])
+ for callback in listeners:
+ callback(ConfEvent(self, evt, value))
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__, self._settings)
+
+
+class ConfWithId(BaseConf):
+ """Configuration settings related to identity."""
+ # Config./resource identifier.
+ ID = 'id'
+ # Config./resource name.
+ NAME = 'name'
+ # Config./resource description.
+ DESCRIPTION = 'description'
+
+ UPDATE_NAME_EVT = 'update_name_evt'
+ UPDATE_DESCRIPTION_EVT = 'update_description_evt'
+
+ VALID_EVT = frozenset([UPDATE_NAME_EVT, UPDATE_DESCRIPTION_EVT])
+ REQUIRED_SETTINGS = frozenset([ID])
+ OPTIONAL_SETTINGS = frozenset([NAME, DESCRIPTION])
+
+ def __init__(self, **kwargs):
+ super(ConfWithId, self).__init__(**kwargs)
+
+ @classmethod
+ def get_opt_settings(cls):
+ self_confs = super(ConfWithId, cls).get_opt_settings()
+ self_confs.update(ConfWithId.OPTIONAL_SETTINGS)
+ return self_confs
+
+ @classmethod
+ def get_req_settings(cls):
+ self_confs = super(ConfWithId, cls).get_req_settings()
+ self_confs.update(ConfWithId.REQUIRED_SETTINGS)
+ return self_confs
+
+ @classmethod
+ def get_valid_evts(cls):
+ self_valid_evts = super(ConfWithId, cls).get_valid_evts()
+ self_valid_evts.update(ConfWithId.VALID_EVT)
+ return self_valid_evts
+
+ def _init_opt_settings(self, **kwargs):
+ super(ConfWithId, self)._init_opt_settings(**kwargs)
+ self._settings[ConfWithId.NAME] = \
+ compute_optional_conf(ConfWithId.NAME, str(self), **kwargs)
+ self._settings[ConfWithId.DESCRIPTION] = \
+ compute_optional_conf(ConfWithId.DESCRIPTION, str(self), **kwargs)
+
+ @property
+ def id(self):
+ return self._settings[ConfWithId.ID]
+
+ @property
+ def name(self):
+ return self._settings[ConfWithId.NAME]
+
+ @name.setter
+ def name(self, new_name):
+ old_name = self.name
+ if not new_name:
+ new_name = repr(self)
+ else:
+ get_validator(ConfWithId.NAME)(new_name)
+
+ if old_name != new_name:
+ self._settings[ConfWithId.NAME] = new_name
+ self._notify_listeners(ConfWithId.UPDATE_NAME_EVT,
+ (old_name, self.name))
+
+ @property
+ def description(self):
+ return self._settings[ConfWithId.DESCRIPTION]
+
+ @description.setter
+ def description(self, new_description):
+ old_desc = self.description
+ if not new_description:
+ new_description = str(self)
+ else:
+ get_validator(ConfWithId.DESCRIPTION)(new_description)
+
+ if old_desc != new_description:
+ self._settings[ConfWithId.DESCRIPTION] = new_description
+ self._notify_listeners(ConfWithId.UPDATE_DESCRIPTION_EVT,
+ (old_desc, self.description))
+
+ def update(self, **kwargs):
+ # Update inherited configurations
+ super(ConfWithId, self).update(**kwargs)
+ self.name = compute_optional_conf(ConfWithId.NAME,
+ str(self),
+ **kwargs)
+ self.description = compute_optional_conf(ConfWithId.DESCRIPTION,
+ str(self),
+ **kwargs)
+
+
+class ConfWithStats(BaseConf):
+ """Configuration settings related to statistics collection."""
+
+ # Enable or disable statistics logging.
+ STATS_LOG_ENABLED = 'statistics_log_enabled'
+ DEFAULT_STATS_LOG_ENABLED = False
+
+ # Statistics logging time.
+ STATS_TIME = 'statistics_interval'
+ DEFAULT_STATS_TIME = 60
+
+ UPDATE_STATS_LOG_ENABLED_EVT = 'update_stats_log_enabled_evt'
+ UPDATE_STATS_TIME_EVT = 'update_stats_time_evt'
+
+ VALID_EVT = frozenset([UPDATE_STATS_LOG_ENABLED_EVT,
+ UPDATE_STATS_TIME_EVT])
+ OPTIONAL_SETTINGS = frozenset([STATS_LOG_ENABLED, STATS_TIME])
+
+ def __init__(self, **kwargs):
+ super(ConfWithStats, self).__init__(**kwargs)
+
+ def _init_opt_settings(self, **kwargs):
+ super(ConfWithStats, self)._init_opt_settings(**kwargs)
+ self._settings[ConfWithStats.STATS_LOG_ENABLED] = \
+ compute_optional_conf(ConfWithStats.STATS_LOG_ENABLED,
+ ConfWithStats.DEFAULT_STATS_LOG_ENABLED,
+ **kwargs)
+ self._settings[ConfWithStats.STATS_TIME] = \
+ compute_optional_conf(ConfWithStats.STATS_TIME,
+ ConfWithStats.DEFAULT_STATS_TIME,
+ **kwargs)
+
+ @property
+ def stats_log_enabled(self):
+ return self._settings[ConfWithStats.STATS_LOG_ENABLED]
+
+ @stats_log_enabled.setter
+ def stats_log_enabled(self, enabled):
+ get_validator(ConfWithStats.STATS_LOG_ENABLED)(enabled)
+ if enabled != self.stats_log_enabled:
+ self._settings[ConfWithStats.STATS_LOG_ENABLED] = enabled
+ self._notify_listeners(ConfWithStats.UPDATE_STATS_LOG_ENABLED_EVT,
+ enabled)
+
+ @property
+ def stats_time(self):
+ return self._settings[ConfWithStats.STATS_TIME]
+
+ @stats_time.setter
+ def stats_time(self, stats_time):
+ get_validator(ConfWithStats.STATS_TIME)(stats_time)
+ if stats_time != self.stats_time:
+ self._settings[ConfWithStats.STATS_TIME] = stats_time
+ self._notify_listeners(ConfWithStats.UPDATE_STATS_TIME_EVT,
+ stats_time)
+
+ @classmethod
+ def get_opt_settings(cls):
+ confs = super(ConfWithStats, cls).get_opt_settings()
+ confs.update(ConfWithStats.OPTIONAL_SETTINGS)
+ return confs
+
+ @classmethod
+ def get_valid_evts(cls):
+ valid_evts = super(ConfWithStats, cls).get_valid_evts()
+ valid_evts.update(ConfWithStats.VALID_EVT)
+ return valid_evts
+
+ def update(self, **kwargs):
+ # Update inherited configurations
+ super(ConfWithStats, self).update(**kwargs)
+ self.stats_log_enabled = \
+ compute_optional_conf(ConfWithStats.STATS_LOG_ENABLED,
+ ConfWithStats.DEFAULT_STATS_LOG_ENABLED,
+ **kwargs)
+ self.stats_time = \
+ compute_optional_conf(ConfWithStats.STATS_TIME,
+ ConfWithStats.DEFAULT_STATS_TIME,
+ **kwargs)
+
+
+class BaseConfListener(object):
+ """Base class of all configuration listeners."""
+ __metaclass__ = ABCMeta
+
+ def __init__(self, base_conf):
+ pass
+ # TODO(PH): re-vist later and check if we need this check
+# if not isinstance(base_conf, BaseConf):
+# raise TypeError('Currently we only support listening to '
+# 'instances of BaseConf')
+
+
+class ConfWithIdListener(BaseConfListener):
+
+ def __init__(self, conf_with_id):
+ assert conf_with_id
+ super(ConfWithIdListener, self).__init__(conf_with_id)
+ conf_with_id.add_listener(ConfWithId.UPDATE_NAME_EVT,
+ self.on_chg_name_conf_with_id)
+ conf_with_id.add_listener(ConfWithId.UPDATE_DESCRIPTION_EVT,
+ self.on_chg_desc_conf_with_id)
+
+ def on_chg_name_conf_with_id(self, conf_evt):
+ # Note did not makes this method abstract as this is not important
+ # event.
+ raise NotImplementedError()
+
+ def on_chg_desc_conf_with_id(self, conf_evt):
+ # Note did not makes this method abstract as this is not important
+ # event.
+ raise NotImplementedError()
+
+
+class ConfWithStatsListener(BaseConfListener):
+
+ def __init__(self, conf_with_stats):
+ assert conf_with_stats
+ super(ConfWithStatsListener, self).__init__(conf_with_stats)
+ conf_with_stats.add_listener(
+ ConfWithStats.UPDATE_STATS_LOG_ENABLED_EVT,
+ self.on_chg_stats_enabled_conf_with_stats)
+
+ conf_with_stats.add_listener(ConfWithStats.UPDATE_STATS_TIME_EVT,
+ self.on_chg_stats_time_conf_with_stats)
+
+ @abstractmethod
+ def on_chg_stats_time_conf_with_stats(self, conf_evt):
+ raise NotImplementedError()
+
+ @abstractmethod
+ def on_chg_stats_enabled_conf_with_stats(self, conf_evt):
+ raise NotImplementedError()
+
+
+class ConfEvent(object):
+ """Encapsulates configuration settings change/update event."""
+
+ def __init__(self, evt_src, evt_name, evt_value):
+ """Creates an instance using given parameters.
+
+ Parameters:
+ -`evt_src`: (BaseConf) source of the event
+ -`evt_name`: (str) name of event, has to be one of the valid
+ event of `evt_src`
+ - `evt_value`: (tuple) event context that helps event handler
+ """
+ if evt_name not in evt_src.get_valid_evts():
+ raise ValueError('Event %s is not a valid event for type %s.' %
+ (evt_name, type(evt_src)))
+ self._src = evt_src
+ self._name = evt_name
+ self._value = evt_value
+
+ @property
+ def src(self):
+ return self._src
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def value(self):
+ return self._value
+
+ def __repr__(self):
+ return '<ConfEvent(%s, %s, %s)>' % (self.src, self.name, self.value)
+
+ def __str__(self):
+ return ('ConfEvent(src=%s, name=%s, value=%s)' %
+ (self.src, self.name, self.value))
+
+ def __cmp__(self, other):
+ return cmp((other.src, other.name, other.value),
+ (self.src, self.name, self.value))
+
+
+#==============================================================================
+# Runtime configuration setting validators and their registry.
+#==============================================================================
+
+@validate(name=ConfWithId.ID)
+def validate_conf_id(identifier):
+ if not isinstance(identifier, str):
+ raise ConfigTypeError(conf_name=ConfWithId.ID, conf_value=identifier)
+ if len(identifier) > 128:
+ raise ConfigValueError(conf_name=ConfWithId.ID, conf_value=identifier)
+ return identifier
+
+
+@validate(name=ConfWithId.NAME)
+def validate_conf_name(name):
+ if not isinstance(name, str):
+ raise ConfigTypeError(conf_name=ConfWithId.NAME, conf_value=name)
+ if len(name) > 128:
+ raise ConfigValueError(conf_name=ConfWithId.NAME, conf_value=name)
+ return name
+
+
+@validate(name=ConfWithId.DESCRIPTION)
+def validate_conf_desc(description):
+ if not isinstance(description, str):
+ raise ConfigTypeError(conf_name=ConfWithId.DESCRIPTION,
+ conf_value=description)
+ return description
+
+
+@validate(name=ConfWithStats.STATS_LOG_ENABLED)
+def validate_stats_log_enabled(stats_log_enabled):
+ if stats_log_enabled not in (True, False):
+ raise ConfigTypeError(desc='Statistics log enabled settings can only'
+ ' be boolean type.')
+ return stats_log_enabled
+
+
+@validate(name=ConfWithStats.STATS_TIME)
+def validate_stats_time(stats_time):
+ if not isinstance(stats_time, (int, long)):
+ raise ConfigTypeError(desc='Statistics log timer value has to be of '
+ 'type int/long but got: %r' % stats_time)
+ if stats_time < 10:
+ raise ConfigValueError(desc='Statistics log timer cannot be set to '
+ 'less then 10 sec, given timer value %s.' %
+ stats_time)
+ return stats_time
+
+
+@validate(name=CAP_REFRESH)
+def validate_cap_refresh(crefresh):
+ if crefresh not in (True, False):
+ raise ConfigTypeError(desc='Invalid Refresh capability settings: %s '
+ ' boolean value expected' % crefresh)
+ return crefresh
+
+
+@validate(name=CAP_ENHANCED_REFRESH)
+def validate_cap_enhanced_refresh(cer):
+ if cer not in (True, False):
+ raise ConfigTypeError(desc='Invalid Enhanced Refresh capability '
+ 'settings: %s boolean value expected' % cer)
+ return cer
+
+
+@validate(name=CAP_MBGP_VPNV4)
+def validate_cap_mbgp_vpnv4(cmv4):
+ if cmv4 not in (True, False):
+ raise ConfigTypeError(desc='Invalid Enhanced Refresh capability '
+ 'settings: %s boolean value expected' % cmv4)
+
+ return cmv4
+
+
+@validate(name=CAP_MBGP_VPNV6)
+def validate_cap_mbgp_vpnv6(cmv6):
+ if cmv6 not in (True, False):
+ raise ConfigTypeError(desc='Invalid Enhanced Refresh capability '
+ 'settings: %s boolean value expected' % cmv6)
+
+ return cmv6
+
+
+@validate(name=CAP_RTC)
+def validate_cap_rtc(cap_rtc):
+ if cap_rtc not in (True, False):
+ raise ConfigTypeError(desc='Invalid type for specifying RTC '
+ 'capability. Expected boolean got: %s' %
+ type(cap_rtc))
+ return cap_rtc
+
+
+@validate(name=RTC_AS)
+def validate_cap_rtc_as(rtc_as):
+ if not is_valid_old_asn(rtc_as):
+ raise ConfigValueError(desc='Invalid RTC AS configuration value: %s'
+ % rtc_as)
+ return rtc_as
+
+
+@validate(name=HOLD_TIME)
+def validate_hold_time(hold_time):
+ if ((hold_time is None) or (not isinstance(hold_time, IntType)) or
+ hold_time < 10):
+ raise ConfigValueError(desc='Invalid hold_time configuration value %s'
+ % hold_time)
+
+ return hold_time
+
+
+@validate(name=MULTI_EXIT_DISC)
+def validate_med(med):
+ if med is not None and not validation.is_valid_med(med):
+ raise ConfigValueError(desc='Invalid multi-exit-discriminatory (med)'
+ ' value: %s.' % med)
+ return med
+
+
+@validate(name=SITE_OF_ORIGINS)
+def validate_soo_list(soo_list):
+ if not isinstance(soo_list, list):
+ raise ConfigTypeError(conf_name=SITE_OF_ORIGINS, conf_value=soo_list)
+ if not (len(soo_list) <= MAX_NUM_SOO):
+ raise ConfigValueError(desc='Max. SOO is limited to %s' %
+ MAX_NUM_SOO)
+ try:
+ ExtCommunity.validate_supported_attributes(soo_list)
+ except ValueError:
+ raise ConfigValueError(conf_name=SITE_OF_ORIGINS,
+ conf_value=soo_list)
+ # Check if we have duplicates
+ unique_rts = set(soo_list)
+ if len(unique_rts) != len(soo_list):
+ raise ConfigValueError(desc='Duplicate value provided in %s' %
+ (soo_list))
+ return soo_list
+
+
+@validate(name=MAX_PREFIXES)
+def validate_max_prefixes(max_prefixes):
+ if not isinstance(max_prefixes, (IntType, LongType)):
+ raise ConfigTypeError(desc='Max. prefixes value should be of type '
+ 'int or long but found %s' % type(max_prefixes))
+ if max_prefixes < 0:
+ raise ConfigValueError(desc='Invalid max. prefixes value: %s' %
+ max_prefixes)
+ return max_prefixes
+
+
+@validate(name=ADVERTISE_PEER_AS)
+def validate_advertise_peer_as(advertise_peer_as):
+ if not isinstance(advertise_peer_as, BooleanType):
+ raise ConfigTypeError(desc='Invalid type for advertise-peer-as, '
+ 'expected bool got %s' %
+ type(advertise_peer_as))
+ return advertise_peer_as
+
+
+#==============================================================================
+# Other utils.
+#==============================================================================
+
+def compute_optional_conf(conf_name, default_value, **all_config):
+ """Returns *conf_name* settings if provided in *all_config*, else returns
+ *default_value*.
+
+ Validates *conf_name* value if provided.
+ """
+ conf_value = all_config.get(conf_name)
+ if conf_value is not None:
+ # Validate configuration value.
+ get_validator(conf_name)(conf_value)
+ else:
+ conf_value = default_value
+ return conf_value
diff --git a/ryu/services/protocols/bgp/rtconf/common.py b/ryu/services/protocols/bgp/rtconf/common.py
new file mode 100644
index 00000000..b4fc5ca9
--- /dev/null
+++ b/ryu/services/protocols/bgp/rtconf/common.py
@@ -0,0 +1,334 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Runtime configuration that applies to all bgp sessions, i.e. global settings.
+"""
+import logging
+
+from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4
+from ryu.services.protocols.bgp.utils.validation import is_valid_old_asn
+
+from ryu.services.protocols.bgp import rtconf
+from ryu.services.protocols.bgp.rtconf.base import BaseConf
+from ryu.services.protocols.bgp.rtconf.base import BaseConfListener
+from ryu.services.protocols.bgp.rtconf.base import compute_optional_conf
+from ryu.services.protocols.bgp.rtconf.base import ConfigTypeError
+from ryu.services.protocols.bgp.rtconf.base import ConfigValueError
+from ryu.services.protocols.bgp.rtconf.base import MissingRequiredConf
+from ryu.services.protocols.bgp.rtconf.base import validate
+
+LOG = logging.getLogger('bgpspeaker.rtconf.common')
+
+
+# Global configuration settings.
+LOCAL_AS = 'local_as'
+ROUTER_ID = 'router_id'
+LABEL_RANGE = 'label_range'
+LABEL_RANGE_MAX = 'max'
+LABEL_RANGE_MIN = 'min'
+
+# Configuration that can be set at global level as well as per context
+# (session/vrf) level
+# Nested configuration override global or higher level configuration as they
+# are more granular.
+# TODO(apgw-dev) Nested configuration overriding higher level configuration is
+# currently low priority
+
+# Similar to Cisco command 'bgp refresh stalepath-time'. To cause the router to
+# remove stale routes from the BGP table even if the router does not receive a
+# Route-Refresh EOR message The bgp refresh stalepath-time command is not
+# needed under normal circumstances.
+# TODO(PH): Support this feature (currently low priority)
+REFRESH_STALEPATH_TIME = 'refresh_stalepath_time'
+
+# Similar to Cisco command 'bgp refresh max-eor-time'. The bgp refresh max-eor-
+# time command is not needed under normal circumstances. You might configure
+# the bgp refresh max-eor-time command in the event of continuous route
+# flapping, when the router is unable to generate a Route- Refresh EOR message,
+# in which case a Route-Refresh EOR is generated after the timer expires.
+# TODO(PH): Support this feature (currently low priority)
+REFRESH_MAX_EOR_TIME = 'refresh_max_eor_time'
+
+BGP_CONN_RETRY_TIME = 'bgp_conn_retry_time'
+BGP_SERVER_PORT = 'bgp_server_port'
+TCP_CONN_TIMEOUT = 'tcp_conn_timeout'
+MAX_PATH_EXT_RTFILTER_ALL = 'maximum_paths_external_rtfilter_all'
+
+
+# Valid default values of some settings.
+DEFAULT_LABEL_RANGE = (100, 100000)
+DEFAULT_REFRESH_STALEPATH_TIME = 0
+DEFAULT_REFRESH_MAX_EOR_TIME = 0
+DEFAULT_BGP_SERVER_PORT = 179
+DEFAULT_TCP_CONN_TIMEOUT = 30
+DEFAULT_BGP_CONN_RETRY_TIME = 30
+DEFAULT_MED = 0
+DEFAULT_MAX_PATH_EXT_RTFILTER_ALL = True
+
+
+@validate(name=LOCAL_AS)
+def validate_local_as(asn):
+ if asn is None:
+ raise MissingRequiredConf(conf_name=LOCAL_AS)
+
+ if not is_valid_old_asn(asn):
+ raise ConfigValueError(desc='Invalid local_as configuration value: %s'
+ % asn)
+ return asn
+
+
+@validate(name=ROUTER_ID)
+def validate_router_id(router_id):
+ if not router_id:
+ raise MissingRequiredConf(conf_name=ROUTER_ID)
+
+ if not isinstance(router_id, str):
+ raise ConfigTypeError(conf_name=ROUTER_ID)
+ if not is_valid_ipv4(router_id):
+ raise ConfigValueError(desc='Invalid router id %s' % router_id)
+
+ return router_id
+
+
+@validate(name=REFRESH_STALEPATH_TIME)
+def validate_refresh_stalepath_time(rst):
+ if not isinstance(rst, (int, long)):
+ raise ConfigTypeError(desc=('Configuration value for %s has to be '
+ 'int/long' % REFRESH_STALEPATH_TIME))
+ if rst < 0:
+ raise ConfigValueError(desc='Invalid refresh stalepath time %s' % rst)
+
+ return rst
+
+
+@validate(name=REFRESH_MAX_EOR_TIME)
+def validate_refresh_max_eor_time(rmet):
+ if not isinstance(rmet, (int, long)):
+ raise ConfigTypeError(desc=('Configuration value for %s has to be of '
+ 'type int/long ' % REFRESH_MAX_EOR_TIME))
+ if rmet < 0:
+ raise ConfigValueError(desc='Invalid refresh stalepath time %s' % rmet)
+
+ return rmet
+
+
+@validate(name=LABEL_RANGE)
+def validate_label_range(label_range):
+ min_label, max_label = label_range
+ if (not min_label or not max_label
+ or not isinstance(min_label, (int, long))
+ or not isinstance(max_label, (int, long)) or min_label < 17
+ or min_label >= max_label):
+ raise ConfigValueError(desc=('Invalid label_range configuration value:'
+ ' (%s).' % label_range))
+
+ return label_range
+
+
+@validate(name=BGP_SERVER_PORT)
+def validate_bgp_server_port(server_port):
+ if not isinstance(server_port, (int, long)):
+ raise ConfigTypeError(desc=('Invalid bgp sever port configuration '
+ 'value %s' % server_port))
+ if server_port <= 0 or server_port > 65535:
+ raise ConfigValueError(desc='Invalid server port %s' % server_port)
+
+ return server_port
+
+
+@validate(name=TCP_CONN_TIMEOUT)
+def validate_tcp_conn_timeout(tcp_conn_timeout):
+ # TODO(apgw-dev) made-up some valid values for this settings, check if we
+ # have a standard value in any routers
+ if not isinstance(tcp_conn_timeout, (int, long)):
+ raise ConfigTypeError(desc=('Invalid tcp connection timeout '
+ 'configuration value %s' %
+ tcp_conn_timeout))
+
+ if tcp_conn_timeout < 10:
+ raise ConfigValueError(desc=('Invalid tcp connection timeout'
+ ' configuration value %s' %
+ tcp_conn_timeout))
+
+ return tcp_conn_timeout
+
+
+@validate(name=BGP_CONN_RETRY_TIME)
+def validate_bgp_conn_retry_time(bgp_conn_retry_time):
+ if not isinstance(bgp_conn_retry_time, (int, long)):
+ raise ConfigTypeError(desc=('Invalid bgp conn. retry time '
+ 'configuration value %s' %
+ bgp_conn_retry_time))
+
+ if bgp_conn_retry_time < 10:
+ raise ConfigValueError(desc=('Invalid bgp connection retry time'
+ ' configuration value %s' %
+ bgp_conn_retry_time))
+ return bgp_conn_retry_time
+
+
+@validate(name=MAX_PATH_EXT_RTFILTER_ALL)
+def validate_max_path_ext_rtfilter_all(max_path_ext_rtfilter_all):
+ if max_path_ext_rtfilter_all not in (True, False):
+ raise ConfigTypeError(desc=('Invalid max_path_ext_rtfilter_all'
+ ' configuration value %s' %
+ max_path_ext_rtfilter_all))
+ return max_path_ext_rtfilter_all
+
+
+class CommonConf(BaseConf):
+ """Encapsulates configurations applicable to all peer sessions.
+
+ Currently if any of these configurations change, it is assumed that current
+ active peer session will be bought down and restarted.
+ """
+ CONF_CHANGED_EVT = 1
+
+ VALID_EVT = frozenset([CONF_CHANGED_EVT])
+
+ REQUIRED_SETTINGS = frozenset([ROUTER_ID, LOCAL_AS])
+
+ OPTIONAL_SETTINGS = frozenset([REFRESH_STALEPATH_TIME,
+ REFRESH_MAX_EOR_TIME,
+ LABEL_RANGE, BGP_SERVER_PORT,
+ TCP_CONN_TIMEOUT,
+ BGP_CONN_RETRY_TIME,
+ MAX_PATH_EXT_RTFILTER_ALL])
+
+ def __init__(self, **kwargs):
+ super(CommonConf, self).__init__(**kwargs)
+
+ def _init_opt_settings(self, **kwargs):
+ super(CommonConf, self)._init_opt_settings(**kwargs)
+ self._settings[LABEL_RANGE] = compute_optional_conf(
+ LABEL_RANGE, DEFAULT_LABEL_RANGE, **kwargs)
+ self._settings[REFRESH_STALEPATH_TIME] = compute_optional_conf(
+ REFRESH_STALEPATH_TIME, DEFAULT_REFRESH_STALEPATH_TIME, **kwargs)
+ self._settings[REFRESH_MAX_EOR_TIME] = compute_optional_conf(
+ REFRESH_MAX_EOR_TIME, DEFAULT_REFRESH_MAX_EOR_TIME, **kwargs)
+ self._settings[BGP_SERVER_PORT] = compute_optional_conf(
+ BGP_SERVER_PORT, DEFAULT_BGP_SERVER_PORT, **kwargs)
+ self._settings[TCP_CONN_TIMEOUT] = compute_optional_conf(
+ TCP_CONN_TIMEOUT, DEFAULT_TCP_CONN_TIMEOUT, **kwargs)
+ self._settings[BGP_CONN_RETRY_TIME] = compute_optional_conf(
+ BGP_CONN_RETRY_TIME, DEFAULT_BGP_CONN_RETRY_TIME, **kwargs)
+ self._settings[MAX_PATH_EXT_RTFILTER_ALL] = compute_optional_conf(
+ MAX_PATH_EXT_RTFILTER_ALL, DEFAULT_MAX_PATH_EXT_RTFILTER_ALL,
+ **kwargs)
+
+ #==========================================================================
+ # Required attributes
+ #==========================================================================
+
+ @property
+ def local_as(self):
+ return self._settings[LOCAL_AS]
+
+ @property
+ def router_id(self):
+ return self._settings[ROUTER_ID]
+
+ #==========================================================================
+ # Optional attributes with valid defaults.
+ #==========================================================================
+
+ @property
+ def bgp_conn_retry_time(self):
+ return self._settings[BGP_CONN_RETRY_TIME]
+
+ @property
+ def tcp_conn_timeout(self):
+ return self._settings[TCP_CONN_TIMEOUT]
+
+ @property
+ def refresh_stalepath_time(self):
+ return self._settings[REFRESH_STALEPATH_TIME]
+
+ @property
+ def refresh_max_eor_time(self):
+ return self._settings[REFRESH_MAX_EOR_TIME]
+
+ @property
+ def label_range(self):
+ return self._settings[LABEL_RANGE]
+
+ @property
+ def bgp_server_port(self):
+ return self._settings[BGP_SERVER_PORT]
+
+ @property
+ def max_path_ext_rtfilter_all(self):
+ return self._settings[MAX_PATH_EXT_RTFILTER_ALL]
+
+ @classmethod
+ def get_opt_settings(self):
+ self_confs = super(CommonConf, self).get_opt_settings()
+ self_confs.update(CommonConf.OPTIONAL_SETTINGS)
+ return self_confs
+
+ @classmethod
+ def get_req_settings(self):
+ self_confs = super(CommonConf, self).get_req_settings()
+ self_confs.update(CommonConf.REQUIRED_SETTINGS)
+ return self_confs
+
+ @classmethod
+ def get_valid_evts(self):
+ self_valid_evts = super(CommonConf, self).get_valid_evts()
+ self_valid_evts.update(CommonConf.VALID_EVT)
+ return self_valid_evts
+
+ def update(self, **kwargs):
+ """Updates global configuration settings with given values.
+
+ First checks if given configuration values differ from current values.
+ If any of the configuration values changed, generates a change event.
+ Currently we generate change event for any configuration change.
+ Note: This method is idempotent.
+ """
+ # Update inherited configurations
+ super(CommonConf, self).update(**kwargs)
+ conf_changed = False
+
+ # Validate given configurations and check if value changed
+ for conf_name, conf_value in kwargs.items():
+ rtconf.base.get_validator(conf_name)(conf_value)
+ item1 = self._settings.get(conf_name, None)
+ item2 = kwargs.get(conf_name, None)
+
+ if item1 != item2:
+ conf_changed = True
+
+ # If any configuration changed, we update configuration value and
+ # notify listeners
+ if conf_changed:
+ for conf_name, conf_value in kwargs.items():
+ # Since all new values are already validated, we can use them
+ self._settings[conf_name] = conf_value
+
+ self._notify_listeners(CommonConf.CONF_CHANGED_EVT, self)
+
+
+class CommonConfListener(BaseConfListener):
+ """Base listener for various changes to common configurations."""
+
+ def __init__(self, global_conf):
+ super(CommonConfListener, self).__init__(global_conf)
+ global_conf.add_listener(CommonConf.CONF_CHANGED_EVT,
+ self.on_update_common_conf)
+
+ def on_update_common_conf(self, evt):
+ raise NotImplementedError('This method should be overridden.')
diff --git a/ryu/services/protocols/bgp/rtconf/neighbors.py b/ryu/services/protocols/bgp/rtconf/neighbors.py
new file mode 100644
index 00000000..16ad8846
--- /dev/null
+++ b/ryu/services/protocols/bgp/rtconf/neighbors.py
@@ -0,0 +1,469 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Running or runtime configuration related to bgp peers/neighbors.
+"""
+from abc import abstractmethod
+import logging
+
+from ryu.services.protocols.bgp.base import OrderedDict
+from ryu.services.protocols.bgp.rtconf.base import ADVERTISE_PEER_AS
+from ryu.services.protocols.bgp.rtconf.base import BaseConf
+from ryu.services.protocols.bgp.rtconf.base import BaseConfListener
+from ryu.services.protocols.bgp.rtconf.base import CAP_ENHANCED_REFRESH
+from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV4
+from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV6
+from ryu.services.protocols.bgp.rtconf.base import CAP_REFRESH
+from ryu.services.protocols.bgp.rtconf.base import CAP_RTC
+from ryu.services.protocols.bgp.rtconf.base import compute_optional_conf
+from ryu.services.protocols.bgp.rtconf.base import ConfigTypeError
+from ryu.services.protocols.bgp.rtconf.base import ConfigValueError
+from ryu.services.protocols.bgp.rtconf.base import ConfWithId
+from ryu.services.protocols.bgp.rtconf.base import ConfWithIdListener
+from ryu.services.protocols.bgp.rtconf.base import ConfWithStats
+from ryu.services.protocols.bgp.rtconf.base import ConfWithStatsListener
+from ryu.services.protocols.bgp.rtconf.base import HOLD_TIME
+from ryu.services.protocols.bgp.rtconf.base import MAX_PREFIXES
+from ryu.services.protocols.bgp.rtconf.base import MULTI_EXIT_DISC
+from ryu.services.protocols.bgp.rtconf.base import RTC_AS
+from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
+from ryu.services.protocols.bgp.rtconf.base import SITE_OF_ORIGINS
+from ryu.services.protocols.bgp.rtconf.base import validate
+from ryu.services.protocols.bgp.rtconf.base import validate_med
+from ryu.services.protocols.bgp.rtconf.base import validate_soo_list
+
+from ryu.services.protocols.bgp.protocols.bgp.capabilities import \
+ EnhancedRouteRefreshCap
+from ryu.services.protocols.bgp.protocols.bgp.capabilities import \
+ MultiprotocolExtentionCap
+from ryu.services.protocols.bgp.protocols.bgp.capabilities import \
+ RouteRefreshCap
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv4_VPN
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv6_VPN
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_RTC_UC
+from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4
+from ryu.services.protocols.bgp.utils.validation import is_valid_old_asn
+
+LOG = logging.getLogger('bgpspeaker.rtconf.neighbor')
+
+# Various neighbor settings.
+REMOTE_AS = 'remote_as'
+IP_ADDRESS = 'ip_address'
+ENABLED = 'enabled'
+CHANGES = 'changes'
+LOCAL_ADDRESS = 'local_address'
+LOCAL_PORT = 'local_port'
+
+# Default value constants.
+DEFAULT_CAP_GR_NULL = True
+DEFAULT_CAP_REFRESH = True
+DEFAULT_CAP_ENHANCED_REFRESH = True
+DEFAULT_CAP_MBGP_VPNV4 = True
+DEFAULT_CAP_MBGP_VPNV6 = False
+DEFAULT_HOLD_TIME = 40
+DEFAULT_ENABLED = True
+DEFAULT_CAP_RTC = True
+
+# Default value for *MAX_PREFIXES* setting is set to 0.
+DEFAULT_MAX_PREFIXES = 0
+DEFAULT_ADVERTISE_PEER_AS = False
+
+
+@validate(name=ENABLED)
+def validate_enabled(enabled):
+ if not isinstance(enabled, bool):
+ raise ConfigValueError(desc='Enable property is not an instance of '
+ 'boolean')
+ return enabled
+
+
+@validate(name=CHANGES)
+def validate_changes(changes):
+ for k, v in changes.iteritems():
+ if k not in (MULTI_EXIT_DISC, ENABLED):
+ raise ConfigValueError(desc="Unknown field to change: %s" % k)
+
+ if k == MULTI_EXIT_DISC:
+ validate_med(v)
+ elif k == ENABLED:
+ validate_enabled(v)
+ return changes
+
+
+@validate(name=IP_ADDRESS)
+def validate_ip_address(ip_address):
+ if not is_valid_ipv4(ip_address):
+ raise ConfigValueError(desc='Invalid neighbor ip_address: %s' %
+ ip_address)
+ return ip_address
+
+
+@validate(name=LOCAL_ADDRESS)
+def validate_local_address(ip_address):
+ if not is_valid_ipv4(ip_address):
+ raise ConfigValueError(desc='Invalid local ip_address: %s' %
+ ip_address)
+ return ip_address
+
+
+@validate(name=LOCAL_PORT)
+def validate_local_port(port):
+ if not isinstance(port, (int, long)):
+ raise ConfigTypeError(desc='Invalid local port: %s' % port)
+ if port < 1025 or port > 65535:
+ raise ConfigValueError(desc='Invalid local port value: %s, has to be'
+ ' between 1025 and 65535' % port)
+ return port
+
+
+@validate(name=REMOTE_AS)
+def validate_remote_as(asn):
+ if not is_valid_old_asn(asn):
+ raise ConfigValueError(desc='Invalid remote as value %s' % asn)
+ return asn
+
+
+class NeighborConf(ConfWithId, ConfWithStats):
+ """Class that encapsulates one neighbors' configuration."""
+
+ UPDATE_ENABLED_EVT = 'update_enabled_evt'
+ UPDATE_MED_EVT = 'update_med_evt'
+
+ VALID_EVT = frozenset([UPDATE_ENABLED_EVT, UPDATE_MED_EVT])
+ REQUIRED_SETTINGS = frozenset([REMOTE_AS, IP_ADDRESS, LOCAL_ADDRESS,
+ LOCAL_PORT])
+ OPTIONAL_SETTINGS = frozenset([CAP_REFRESH,
+ CAP_ENHANCED_REFRESH, CAP_MBGP_VPNV4,
+ CAP_MBGP_VPNV6, CAP_RTC, RTC_AS, HOLD_TIME,
+ ENABLED, MULTI_EXIT_DISC, MAX_PREFIXES,
+ ADVERTISE_PEER_AS, SITE_OF_ORIGINS])
+
+ def __init__(self, **kwargs):
+ super(NeighborConf, self).__init__(**kwargs)
+
+ def _init_opt_settings(self, **kwargs):
+ self._settings[CAP_REFRESH] = compute_optional_conf(
+ CAP_REFRESH, DEFAULT_CAP_REFRESH, **kwargs)
+ self._settings[CAP_ENHANCED_REFRESH] = compute_optional_conf(
+ CAP_ENHANCED_REFRESH, DEFAULT_CAP_ENHANCED_REFRESH, **kwargs)
+ self._settings[CAP_MBGP_VPNV4] = compute_optional_conf(
+ CAP_MBGP_VPNV4, DEFAULT_CAP_MBGP_VPNV4, **kwargs)
+ self._settings[CAP_MBGP_VPNV6] = compute_optional_conf(
+ CAP_MBGP_VPNV6, DEFAULT_CAP_MBGP_VPNV6, **kwargs)
+ self._settings[HOLD_TIME] = compute_optional_conf(
+ HOLD_TIME, DEFAULT_HOLD_TIME, **kwargs)
+ self._settings[ENABLED] = compute_optional_conf(
+ ENABLED, DEFAULT_ENABLED, **kwargs)
+ self._settings[MAX_PREFIXES] = compute_optional_conf(
+ MAX_PREFIXES, DEFAULT_MAX_PREFIXES, **kwargs)
+ self._settings[ADVERTISE_PEER_AS] = compute_optional_conf(
+ ADVERTISE_PEER_AS, DEFAULT_ADVERTISE_PEER_AS, **kwargs)
+
+ # We do not have valid default MED value.
+ # If no MED attribute is provided then we do not have to use MED.
+ # If MED attribute is provided we have to validate it and use it.
+ med = kwargs.pop(MULTI_EXIT_DISC, None)
+ if med and validate_med(med):
+ self._settings[MULTI_EXIT_DISC] = med
+
+ # We do not have valid default SOO value.
+ # If no SOO attribute is provided then we do not have to use SOO.
+ # If SOO attribute is provided we have to validate it and use it.
+ soos = kwargs.pop(SITE_OF_ORIGINS, None)
+ if soos and validate_soo_list(soos):
+ self._settings[SITE_OF_ORIGINS] = soos
+
+ # RTC configurations.
+ self._settings[CAP_RTC] = \
+ compute_optional_conf(CAP_RTC, DEFAULT_CAP_RTC, **kwargs)
+ # Default RTC_AS is local (router) AS.
+ from ryu.services.protocols.bgp.speaker.core_manager import \
+ CORE_MANAGER
+ default_rt_as = CORE_MANAGER.common_conf.local_as
+ self._settings[RTC_AS] = \
+ compute_optional_conf(RTC_AS, default_rt_as, **kwargs)
+
+ # Since ConfWithId' default values use str(self) and repr(self), we
+ # call super method after we have initialized other settings.
+ super(NeighborConf, self)._init_opt_settings(**kwargs)
+
+ @classmethod
+ def get_opt_settings(cls):
+ self_confs = super(NeighborConf, cls).get_opt_settings()
+ self_confs.update(NeighborConf.OPTIONAL_SETTINGS)
+ return self_confs
+
+ @classmethod
+ def get_req_settings(cls):
+ self_confs = super(NeighborConf, cls).get_req_settings()
+ self_confs.update(NeighborConf.REQUIRED_SETTINGS)
+ return self_confs
+
+ @classmethod
+ def get_valid_evts(cls):
+ self_valid_evts = super(NeighborConf, cls).get_valid_evts()
+ self_valid_evts.update(NeighborConf.VALID_EVT)
+ return self_valid_evts
+
+ #==========================================================================
+ # Required attributes
+ #==========================================================================
+
+ @property
+ def remote_as(self):
+ return self._settings[REMOTE_AS]
+
+ @property
+ def ip_address(self):
+ return self._settings[IP_ADDRESS]
+
+ @property
+ def host_bind_ip(self):
+ return self._settings[LOCAL_ADDRESS]
+
+ @property
+ def host_bind_port(self):
+ return self._settings[LOCAL_PORT]
+
+ #==========================================================================
+ # Optional attributes with valid defaults.
+ #==========================================================================
+
+ @property
+ def hold_time(self):
+ return self._settings[HOLD_TIME]
+
+ @property
+ def cap_refresh(self):
+ return self._settings[CAP_REFRESH]
+
+ @property
+ def cap_enhanced_refresh(self):
+ return self._settings[CAP_ENHANCED_REFRESH]
+
+ @property
+ def cap_mbgp_vpnv4(self):
+ return self._settings[CAP_MBGP_VPNV4]
+
+ @property
+ def cap_mbgp_vpnv6(self):
+ return self._settings[CAP_MBGP_VPNV6]
+
+ @property
+ def cap_rtc(self):
+ return self._settings[CAP_RTC]
+
+ @property
+ def enabled(self):
+ return self._settings[ENABLED]
+
+ @enabled.setter
+ def enabled(self, enable):
+ # Update enabled flag and notify listeners.
+ if self._settings[ENABLED] != enable:
+ self._settings[ENABLED] = enable
+ self._notify_listeners(NeighborConf.UPDATE_ENABLED_EVT,
+ enable)
+
+ #==========================================================================
+ # Optional attributes with no valid defaults.
+ #==========================================================================
+
+ @property
+ def multi_exit_disc(self):
+ # This property does not have any valid default. Hence if not set we
+ # return None.
+ return self._settings.get(MULTI_EXIT_DISC)
+
+ @multi_exit_disc.setter
+ def multi_exit_disc(self, value):
+ if self._settings.get(MULTI_EXIT_DISC) != value:
+ self._settings[MULTI_EXIT_DISC] = value
+ self._notify_listeners(NeighborConf.UPDATE_MED_EVT, value)
+
+ @property
+ def soo_list(self):
+ soos = self._settings.get(SITE_OF_ORIGINS)
+ if soos:
+ soos = list(soos)
+ else:
+ soos = []
+ return soos
+
+ @property
+ def rtc_as(self):
+ return self._settings[RTC_AS]
+
+ def exceeds_max_prefix_allowed(self, prefix_count):
+ allowed_max = self._settings[MAX_PREFIXES]
+ does_exceed = False
+ # Check if allowed max. is unlimited.
+ if allowed_max != 0:
+ # If max. prefix is limited, check if given exceeds this limit.
+ if prefix_count > allowed_max:
+ does_exceed = True
+
+ return does_exceed
+
+ def get_configured_capabilites(self):
+ """Returns configured capabilities."""
+
+ capabilities = OrderedDict()
+ mbgp_caps = []
+ if self.cap_mbgp_vpnv4:
+ mbgp_caps.append(MultiprotocolExtentionCap(RF_IPv4_VPN))
+
+ if self.cap_mbgp_vpnv6:
+ mbgp_caps.append(MultiprotocolExtentionCap(RF_IPv6_VPN))
+
+ if self.cap_rtc:
+ mbgp_caps.append(MultiprotocolExtentionCap(RF_RTC_UC))
+
+ if mbgp_caps:
+ capabilities[MultiprotocolExtentionCap.CODE] = mbgp_caps
+
+ if self.cap_refresh:
+ capabilities[RouteRefreshCap.CODE] = [
+ RouteRefreshCap.get_singleton()]
+
+ if self.cap_enhanced_refresh:
+ capabilities[EnhancedRouteRefreshCap.CODE] = [
+ EnhancedRouteRefreshCap.get_singleton()]
+
+ return capabilities
+
+ def __repr__(self):
+ return '<%s(%r, %r, %r)>' % (self.__class__.__name__,
+ self.remote_as,
+ self.ip_address,
+ self.enabled)
+
+ def __str__(self):
+ return 'Neighbor: %s' % (self.ip_address)
+
+
+class NeighborsConf(BaseConf):
+ """Container of all neighbor configurations."""
+
+ ADD_NEIGH_CONF_EVT = 'add_neigh_conf_evt'
+ REMOVE_NEIGH_CONF_EVT = 'remove_neigh_conf_evt'
+
+ VALID_EVT = frozenset([ADD_NEIGH_CONF_EVT, REMOVE_NEIGH_CONF_EVT])
+
+ def __init__(self):
+ super(NeighborsConf, self).__init__()
+ self._neighbors = {}
+
+ def _init_opt_settings(self, **kwargs):
+ pass
+
+ def update(self, **kwargs):
+ raise NotImplementedError('Use either add/remove_neighbor_conf'
+ ' methods instead.')
+
+ @property
+ def rtc_as_set(self):
+ """Returns current RTC AS configured for current neighbors.
+ """
+ rtc_as_set = set()
+ for neigh in self._neighbors.itervalues():
+ rtc_as_set.add(neigh.rtc_as)
+ return rtc_as_set
+
+ @classmethod
+ def get_valid_evts(cls):
+ self_valid_evts = super(NeighborsConf, cls).get_valid_evts()
+ self_valid_evts.update(NeighborsConf.VALID_EVT)
+ return self_valid_evts
+
+ def add_neighbor_conf(self, neigh_conf):
+ # Check if we already know this neighbor
+ if neigh_conf.ip_address in self._neighbors.keys():
+ message = 'Neighbor with given ip address already exists'
+ raise RuntimeConfigError(desc=message)
+
+ # Check if this neighbor's host address overlaps with other neighbors
+ for nconf in self._neighbors.itervalues():
+ if ((neigh_conf.host_bind_ip, neigh_conf.host_bind_port) ==
+ (nconf.host_bind_ip, nconf.host_bind_port)):
+ raise RuntimeConfigError(desc='Given host_bind_ip and '
+ 'host_bind_port already taken')
+
+ # Add this neighbor to known configured neighbors and generate update
+ # event
+ self._neighbors[neigh_conf.ip_address] = neigh_conf
+ self._notify_listeners(NeighborsConf.ADD_NEIGH_CONF_EVT, neigh_conf)
+
+ def remove_neighbor_conf(self, neigh_ip_address):
+ neigh_conf = self._neighbors.pop(neigh_ip_address, None)
+ if not neigh_conf:
+ raise RuntimeConfigError(desc='Tried to remove a neighbor that '
+ 'does not exists')
+ else:
+ self._notify_listeners(NeighborsConf.REMOVE_NEIGH_CONF_EVT,
+ neigh_conf)
+ return neigh_conf
+
+ def get_neighbor_conf(self, neigh_ip_address):
+ return self._neighbors.get(neigh_ip_address, None)
+
+ def __repr__(self):
+ return '<%s(%r)>' % (self.__class__.__name__, self._neighbors)
+
+ def __str__(self):
+ return '\'Neighbors\': %s' % self._neighbors
+
+ @property
+ def settings(self):
+ return [neighbor.settings for _, neighbor in
+ self._neighbors.iteritems()]
+
+
+class NeighborConfListener(ConfWithIdListener, ConfWithStatsListener):
+ """Base listener for change events to a specific neighbors' configurations.
+ """
+ def __init__(self, neigh_conf):
+ super(NeighborConfListener, self).__init__(neigh_conf)
+ neigh_conf.add_listener(NeighborConf.UPDATE_ENABLED_EVT,
+ self.on_update_enabled)
+ neigh_conf.add_listener(NeighborConf.UPDATE_MED_EVT,
+ self.on_update_med)
+
+ @abstractmethod
+ def on_update_enabled(self, evt):
+ raise NotImplementedError('This method should be overridden.')
+
+ def on_update_med(self, evt):
+ raise NotImplementedError('This method should be overridden.')
+
+
+class NeighborsConfListener(BaseConfListener):
+ """Base listener for change events to neighbor configuration container."""
+
+ def __init__(self, neighbors_conf):
+ super(NeighborsConfListener, self).__init__(neighbors_conf)
+ neighbors_conf.add_listener(NeighborsConf.ADD_NEIGH_CONF_EVT,
+ self.on_add_neighbor_conf)
+ neighbors_conf.add_listener(NeighborsConf.REMOVE_NEIGH_CONF_EVT,
+ self.on_remove_neighbor_conf)
+
+ @abstractmethod
+ def on_add_neighbor_conf(self, evt):
+ raise NotImplementedError('This method should be overridden.')
+
+ @abstractmethod
+ def on_remove_neighbor_conf(self, evt):
+ raise NotImplementedError('This method should be overridden.')
diff --git a/ryu/services/protocols/bgp/rtconf/vrfs.py b/ryu/services/protocols/bgp/rtconf/vrfs.py
new file mode 100644
index 00000000..6b7b60c4
--- /dev/null
+++ b/ryu/services/protocols/bgp/rtconf/vrfs.py
@@ -0,0 +1,551 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Running or runtime configuration related to Virtual Routing and Forwarding
+ tables (VRFs).
+"""
+import abc
+import logging
+
+from ryu.services.protocols.bgp.protocols.bgp.pathattr import ExtCommunity
+from ryu.services.protocols.bgp.utils import validation
+
+from ryu.services.protocols.bgp.base import get_validator
+from ryu.services.protocols.bgp.rtconf.base import BaseConf
+from ryu.services.protocols.bgp.rtconf.base import BaseConfListener
+from ryu.services.protocols.bgp.rtconf.base import ConfigTypeError
+from ryu.services.protocols.bgp.rtconf.base import ConfigValueError
+from ryu.services.protocols.bgp.rtconf.base import ConfWithId
+from ryu.services.protocols.bgp.rtconf.base import ConfWithIdListener
+from ryu.services.protocols.bgp.rtconf.base import ConfWithStats
+from ryu.services.protocols.bgp.rtconf.base import ConfWithStatsListener
+from ryu.services.protocols.bgp.rtconf.base import MAX_NUM_EXPORT_RT
+from ryu.services.protocols.bgp.rtconf.base import MAX_NUM_IMPORT_RT
+from ryu.services.protocols.bgp.rtconf.base import MULTI_EXIT_DISC
+from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
+from ryu.services.protocols.bgp.rtconf.base import SITE_OF_ORIGINS
+from ryu.services.protocols.bgp.rtconf.base import validate
+from ryu.services.protocols.bgp.rtconf.base import validate_med
+from ryu.services.protocols.bgp.rtconf.base import validate_soo_list
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv4_UC
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv6_UC
+
+
+LOG = logging.getLogger('bgpspeaker.rtconf.vrfs')
+
+# Configuration setting names.
+ROUTE_DISTINGUISHER = 'route_dist'
+IMPORT_RTS = 'import_rts'
+EXPORT_RTS = 'export_rts'
+VRF_NAME = 'vrf_name'
+VRF_DESC = 'vrf_desc'
+VRF_RF = 'route_family'
+IMPORT_MAPS = 'import_maps'
+
+# Two supported VRF route-families
+VRF_RF_IPV6 = 'ipv6'
+VRF_RF_IPV4 = 'ipv4'
+SUPPORTED_VRF_RF = (VRF_RF_IPV4, VRF_RF_IPV6)
+
+
+# Default configuration values.
+DEFAULT_VRF_NAME = 'no-vrf-name'
+DEFAULT_VRF_DESC = 'no-vrf-desc'
+
+
+@validate(name=IMPORT_RTS)
+def validate_import_rts(import_rts):
+ if not isinstance(import_rts, list):
+ raise ConfigTypeError(conf_name=IMPORT_RTS, conf_value=import_rts)
+ if not (len(import_rts) <= MAX_NUM_IMPORT_RT):
+ raise ConfigValueError(desc='Max. import RT is limited to %s' %
+ MAX_NUM_IMPORT_RT)
+ try:
+ ExtCommunity.validate_supported_attributes(import_rts)
+ except ValueError:
+ raise ConfigValueError(conf_name=IMPORT_RTS,
+ conf_value=import_rts)
+ # Check if we have duplicates
+ unique_rts = set(import_rts)
+ if len(unique_rts) != len(import_rts):
+ raise ConfigValueError(desc='Duplicate value provided %s' %
+ (import_rts))
+
+ return import_rts
+
+
+@validate(name=EXPORT_RTS)
+def validate_export_rts(export_rts):
+ if not isinstance(export_rts, list):
+ raise ConfigTypeError(conf_name=EXPORT_RTS, conf_value=export_rts)
+ if not (len(export_rts) <= MAX_NUM_EXPORT_RT):
+ raise ConfigValueError(desc='Max. import RT is limited to %s' %
+ MAX_NUM_EXPORT_RT)
+ try:
+ ExtCommunity.validate_supported_attributes(export_rts)
+ except Exception:
+ raise ConfigValueError(conf_name=EXPORT_RTS, conf_value=export_rts)
+ # Check if we have duplicates
+ unique_rts = set(export_rts)
+ if len(unique_rts) != len(export_rts):
+ raise ConfigValueError(desc='Duplicate value provided in %s' %
+ (export_rts))
+ return export_rts
+
+
+@validate(name=ROUTE_DISTINGUISHER)
+def valdiate_rd(route_disc):
+ if not isinstance(route_disc, str):
+ raise ConfigTypeError(conf_name=ROUTE_DISTINGUISHER,
+ conf_value=route_disc)
+
+ if not validation.is_valid_route_disc(route_disc):
+ raise ConfigValueError(conf_name=ROUTE_DISTINGUISHER,
+ conf_value=route_disc)
+ return route_disc
+
+
+@validate(name=VRF_RF)
+def validate_vrf_rf(vrf_rf):
+ if vrf_rf not in SUPPORTED_VRF_RF:
+ raise ConfigValueError(desc='Give VRF route family %s is not '
+ 'supported.' % vrf_rf)
+ return vrf_rf
+
+
+class VrfConf(ConfWithId, ConfWithStats):
+ """Class that encapsulates configurations for one VRF."""
+
+ VRF_CHG_EVT = 'vrf_chg_evt'
+
+ VALID_EVT = frozenset([VRF_CHG_EVT])
+
+ REQUIRED_SETTINGS = frozenset([ROUTE_DISTINGUISHER,
+ IMPORT_RTS,
+ EXPORT_RTS])
+
+ OPTIONAL_SETTINGS = frozenset(
+ [MULTI_EXIT_DISC, SITE_OF_ORIGINS, VRF_RF, IMPORT_MAPS]
+ )
+
+ def __init__(self, **kwargs):
+ """Create an instance of VRF runtime configuration."""
+ super(VrfConf, self).__init__(**kwargs)
+
+ def _init_opt_settings(self, **kwargs):
+ super(VrfConf, self)._init_opt_settings(**kwargs)
+ # We do not have valid default MED value.
+ # If no MED attribute is provided then we do not have to use MED.
+ # If MED attribute is provided we have to validate it and use it.
+ med = kwargs.pop(MULTI_EXIT_DISC, None)
+ if med and validate_med(med):
+ self._settings[MULTI_EXIT_DISC] = med
+
+ # We do not have valid default SOO value.
+ # If no SOO attribute is provided then we do not have to use SOO.
+ # If SOO attribute is provided we have to validate it and use it.
+ soos = kwargs.pop(SITE_OF_ORIGINS, None)
+ if soos and validate_soo_list(soos):
+ self._settings[SITE_OF_ORIGINS] = soos
+
+ # Current we we only support VRF for IPv4 and IPv6 with default IPv4
+ vrf_rf = kwargs.pop(VRF_RF, VRF_RF_IPV4)
+ if vrf_rf and validate_vrf_rf(vrf_rf):
+ self._settings[VRF_RF] = vrf_rf
+
+ import_maps = kwargs.pop(IMPORT_MAPS, [])
+ self._settings[IMPORT_MAPS] = import_maps
+
+ #==========================================================================
+ # Required attributes
+ #==========================================================================
+
+ @property
+ def route_dist(self):
+ return self._settings[ROUTE_DISTINGUISHER]
+
+ #==========================================================================
+ # Optional attributes with valid defaults.
+ #==========================================================================
+
+ @property
+ def import_rts(self):
+ return list(self._settings[IMPORT_RTS])
+
+ @property
+ def export_rts(self):
+ return list(self._settings[EXPORT_RTS])
+
+ @property
+ def soo_list(self):
+ soos = self._settings.get(SITE_OF_ORIGINS)
+ if soos:
+ soos = list(soos)
+ else:
+ soos = []
+ return soos
+
+ @property
+ def multi_exit_disc(self):
+ """Returns configured value of MED, else None.
+
+ This configuration does not have default value.
+ """
+ return self._settings.get(MULTI_EXIT_DISC)
+
+ @property
+ def route_family(self):
+ """Returns configured route family for this VRF
+
+ This configuration does not change.
+ """
+ return self._settings.get(VRF_RF)
+
+ @property
+ def rd_rf_id(self):
+ return VrfConf.create_rd_rf_id(self.route_dist, self.route_family)
+
+ @property
+ def import_maps(self):
+ return self._settings.get(IMPORT_MAPS)
+
+ @staticmethod
+ def create_rd_rf_id(route_dist, route_family):
+ return route_dist, route_family
+
+ @staticmethod
+ def vrf_rf_2_rf(vrf_rf):
+ if vrf_rf == VRF_RF_IPV4:
+ return RF_IPv4_UC
+ elif vrf_rf == VRF_RF_IPV6:
+ return RF_IPv6_UC
+ else:
+ raise ValueError('Unsupported VRF route family given %s' % vrf_rf)
+
+ @staticmethod
+ def rf_2_vrf_rf(route_family):
+ if route_family == RF_IPv4_UC:
+ return VRF_RF_IPV4
+ elif route_family == RF_IPv6_UC:
+ return VRF_RF_IPV6
+ else:
+ raise ValueError('No supported mapping for route family '
+ 'to vrf_route_family exists for %s' %
+ route_family)
+
+ @property
+ def settings(self):
+ """Returns a copy of current settings.
+
+ As some of the attributes are themselves containers, we clone the
+ settings to provide clones for those containers as well.
+ """
+ # Shallow copy first
+ cloned_setting = self._settings.copy()
+ # Don't want clone to link to same RT containers
+ cloned_setting[IMPORT_RTS] = self.import_rts
+ cloned_setting[EXPORT_RTS] = self.export_rts
+ cloned_setting[SITE_OF_ORIGINS] = self.soo_list
+ return cloned_setting
+
+ @classmethod
+ def get_opt_settings(cls):
+ self_confs = super(VrfConf, cls).get_opt_settings()
+ self_confs.update(VrfConf.OPTIONAL_SETTINGS)
+ return self_confs
+
+ @classmethod
+ def get_req_settings(cls):
+ self_confs = super(VrfConf, cls).get_req_settings()
+ self_confs.update(VrfConf.REQUIRED_SETTINGS)
+ return self_confs
+
+ @classmethod
+ def get_valid_evts(cls):
+ self_valid_evts = super(VrfConf, cls).get_valid_evts()
+ self_valid_evts.update(VrfConf.VALID_EVT)
+ return self_valid_evts
+
+ def update(self, **kwargs):
+ """Updates this `VrfConf` settings.
+
+ Notifies listeners if any settings changed. Returns `True` if update
+ was successful. This vrfs' route family, id and route dist settings
+ cannot be updated/changed.
+ """
+ # Update inherited configurations
+ super(VrfConf, self).update(**kwargs)
+ vrf_id = kwargs.get(ConfWithId.ID)
+ vrf_rd = kwargs.get(ROUTE_DISTINGUISHER)
+ vrf_rf = kwargs.get(VRF_RF)
+ if (vrf_id != self.id or
+ vrf_rd != self.route_dist or
+ vrf_rf != self.route_family):
+ raise ConfigValueError(desc='id/route-distinguisher/route-family'
+ ' do not match configured value.')
+
+ # Validate and update individual settings
+ new_imp_rts, old_imp_rts = \
+ self._update_import_rts(**kwargs)
+ export_rts_changed = self._update_export_rts(**kwargs)
+ soos_list_changed = self._update_soo_list(**kwargs)
+ med_changed = self._update_med(**kwargs)
+ re_export_needed = (export_rts_changed or
+ soos_list_changed or
+ med_changed)
+ import_maps = kwargs.get(IMPORT_MAPS, [])
+ re_import_needed = self._update_importmaps(import_maps)
+
+ # If we did have any change in value of any settings, we notify
+ # listeners
+ if (new_imp_rts is not None or
+ old_imp_rts is not None or
+ re_export_needed or re_import_needed):
+ evt_value = (
+ new_imp_rts,
+ old_imp_rts,
+ import_maps,
+ re_export_needed,
+ re_import_needed
+ )
+ self._notify_listeners(VrfConf.VRF_CHG_EVT, evt_value)
+ return True
+
+ def _update_import_rts(self, **kwargs):
+ import_rts = kwargs.get(IMPORT_RTS)
+ get_validator(IMPORT_RTS)(import_rts)
+ curr_import_rts = set(self._settings[IMPORT_RTS])
+
+ import_rts = set(import_rts)
+ if not import_rts.symmetric_difference(curr_import_rts):
+ return (None, None)
+
+ # Get the difference between current and new RTs
+ new_import_rts = import_rts - curr_import_rts
+ old_import_rts = curr_import_rts - import_rts
+
+ # Update current RTs and notify listeners.
+ self._settings[IMPORT_RTS] = import_rts
+ return (new_import_rts, old_import_rts)
+
+ def _update_export_rts(self, **kwargs):
+ export_rts = kwargs.get(EXPORT_RTS)
+ get_validator(EXPORT_RTS)(export_rts)
+ curr_export_rts = set(self._settings[EXPORT_RTS])
+
+ if curr_export_rts.symmetric_difference(export_rts):
+ # Update current RTs and notify listeners.
+ self._settings[EXPORT_RTS] = list(export_rts)
+ return True
+
+ return False
+
+ def _update_soo_list(self, **kwargs):
+ soo_list = kwargs.get(SITE_OF_ORIGINS, [])
+ get_validator(SITE_OF_ORIGINS)(soo_list)
+ curr_soos = set(self.soo_list)
+
+ # If given list is different from existing settings, we update it
+ if curr_soos.symmetric_difference(soo_list):
+ self._settings[SITE_OF_ORIGINS] = soo_list[:]
+ return True
+
+ return False
+
+ def _update_med(self, **kwargs):
+ multi_exit_disc = kwargs.get(MULTI_EXIT_DISC, None)
+ if multi_exit_disc:
+ get_validator(MULTI_EXIT_DISC)(multi_exit_disc)
+
+ if multi_exit_disc != self.multi_exit_disc:
+ self._settings[MULTI_EXIT_DISC] = multi_exit_disc
+ return True
+
+ return False
+
+ def _update_importmaps(self, import_maps):
+ if set(self._settings[IMPORT_MAPS]).symmetric_difference(import_maps):
+ self._settings[IMPORT_MAPS] = import_maps
+ return True
+
+ return False
+
+ def __repr__(self):
+ return ('<%s(route_dist: %r, import_rts: %r, export_rts: %r, '
+ 'soo_list: %r)>' % (self.__class__.__name__,
+ self.route_dist, self.import_rts,
+ self.export_rts, self.soo_list))
+
+ def __str__(self):
+ return ('VrfConf-%s' % (self.route_dist))
+
+
+class VrfsConf(BaseConf):
+ """Container for all VRF configurations."""
+
+ ADD_VRF_CONF_EVT, REMOVE_VRF_CONF_EVT = xrange(2)
+
+ VALID_EVT = frozenset([ADD_VRF_CONF_EVT, REMOVE_VRF_CONF_EVT])
+
+ def __init__(self):
+ super(VrfsConf, self).__init__()
+ self._vrfs_by_rd_rf = {}
+ self._vrfs_by_id = {}
+
+ def _init_opt_settings(self, **kwargs):
+ pass
+
+ @property
+ def vrf_confs(self):
+ """Returns a list of configured `VrfConf`s
+ """
+ return self._vrfs_by_rd_rf.values()
+
+ @property
+ def vrf_interested_rts(self):
+ interested_rts = set()
+ for vrf_conf in self._vrfs_by_id.values():
+ interested_rts.update(vrf_conf.import_rts)
+ return interested_rts
+
+ def update(self, **kwargs):
+ raise NotImplementedError('Use either add/remove_vrf_conf'
+ ' methods instead.')
+
+ def add_vrf_conf(self, vrf_conf):
+ if vrf_conf.rd_rf_id in self._vrfs_by_rd_rf.keys():
+ raise RuntimeConfigError(
+ desc='VrfConf with rd_rf %s already exists'
+ % str(vrf_conf.rd_rf_id)
+ )
+ if vrf_conf.id in self._vrfs_by_id:
+ raise RuntimeConfigError(
+ desc='VrfConf with id %s already exists' % str(vrf_conf.id)
+ )
+
+ self._vrfs_by_rd_rf[vrf_conf.rd_rf_id] = vrf_conf
+ self._vrfs_by_id[vrf_conf.id] = vrf_conf
+ self._notify_listeners(VrfsConf.ADD_VRF_CONF_EVT, vrf_conf)
+
+ def remove_vrf_conf(self, route_dist=None, vrf_id=None,
+ vrf_rf=None):
+ """Removes any matching `VrfConf` for given `route_dist` or `vrf_id`
+
+ Paramters:
+ - `route_dist`: (str) route distinguisher of a configured VRF
+ - `vrf_id`: (str) vrf ID
+ - `vrf_rf`: (str) route family of the VRF configuration
+ If only `route_dist` is given, removes `VrfConf`s for all supported
+ address families for this `route_dist`. If `vrf_rf` is given, than only
+ removes `VrfConf` for that specific route family. If only `vrf_id` is
+ given, matching `VrfConf` will be removed.
+ """
+ if route_dist is None and vrf_id is None:
+ raise RuntimeConfigError(desc='To delete supply route_dist or id.')
+
+ # By default we remove all VRFs for given Id or RD
+ vrf_rfs = SUPPORTED_VRF_RF
+ # If asked to delete specific route family vrf conf.
+ if vrf_rf:
+ vrf_rfs = (vrf_rf)
+
+ # For all vrf route family asked to be deleted, we collect all deleted
+ # VrfConfs
+ removed_vrf_confs = []
+ for route_family in vrf_rfs:
+ if route_dist is not None:
+ rd_rf_id = VrfConf.create_rd_rf_id(route_dist, route_family)
+ vrf_conf = self._vrfs_by_rd_rf.pop(rd_rf_id, None)
+ if vrf_conf:
+ self._vrfs_by_id.pop(vrf_conf.id, None)
+ removed_vrf_confs.append(vrf_conf)
+ else:
+ vrf_conf = self._vrfs_by_id.pop(vrf_id, None)
+ if vrf_conf:
+ self._vrfs_by_rd_rf.pop(vrf_conf.rd_rd_id, None)
+ removed_vrf_confs.append(vrf_conf)
+
+ # We do not raise any exception if we cannot find asked VRF.
+ for vrf_conf in removed_vrf_confs:
+ self._notify_listeners(VrfsConf.REMOVE_VRF_CONF_EVT, vrf_conf)
+ return removed_vrf_confs
+
+ def get_vrf_conf(self, route_dist, vrf_rf, vrf_id=None):
+ if route_dist is None and vrf_id is None:
+ raise RuntimeConfigError(desc='To get VRF supply route_dist '
+ 'or vrf_id.')
+ vrf = None
+ if route_dist is not None and vrf_id is not None:
+ vrf1 = self._vrfs_by_id.get(vrf_id)
+ rd_rf_id = VrfConf.create_rd_rf_id(route_dist, vrf_rf)
+ vrf2 = self._vrfs_by_rd_rf.get(rd_rf_id)
+ if vrf1 is not vrf2:
+ raise RuntimeConfigError(desc='Given VRF ID (%s) and RD (%s)'
+ ' are not of same VRF.' %
+ (vrf_id, route_dist))
+ vrf = vrf1
+ elif route_dist is not None:
+ rd_rf_id = VrfConf.create_rd_rf_id(route_dist, vrf_rf)
+ vrf = self._vrfs_by_rd_rf.get(rd_rf_id)
+ else:
+ vrf = self._vrfs_by_id.get(vrf_id)
+ return vrf
+
+ @property
+ def vrfs_by_rd_rf_id(self):
+ return dict(self._vrfs_by_rd_rf)
+
+ @classmethod
+ def get_valid_evts(self):
+ self_valid_evts = super(VrfsConf, self).get_valid_evts()
+ self_valid_evts.update(VrfsConf.VALID_EVT)
+ return self_valid_evts
+
+ def __repr__(self):
+ return '<%s(%r)>' % (self.__class__.__name__, self._vrfs_by_id)
+
+ @property
+ def settings(self):
+ return [vrf.settings for vrf in self._vrfs_by_id.values()]
+
+
+class VrfConfListener(ConfWithIdListener, ConfWithStatsListener):
+ """Base listener for various VRF configuration change event."""
+
+ def __init__(self, vrf_conf):
+ super(VrfConfListener, self).__init__(vrf_conf)
+ vrf_conf.add_listener(VrfConf.VRF_CHG_EVT, self.on_chg_vrf_conf)
+
+ def on_chg_vrf_conf(self, evt):
+ raise NotImplementedError('This method should be overridden')
+
+
+class VrfsConfListener(BaseConfListener):
+ """Base listener for VRF container change events."""
+
+ def __init__(self, vrfs_conf):
+ super(VrfsConfListener, self).__init__(vrfs_conf)
+ vrfs_conf.add_listener(VrfsConf.ADD_VRF_CONF_EVT, self.on_add_vrf_conf)
+ vrfs_conf.add_listener(VrfsConf.REMOVE_VRF_CONF_EVT,
+ self.on_remove_vrf_conf)
+
+ @abc.abstractmethod
+ def on_add_vrf_conf(self, evt):
+ raise NotImplementedError('This method should be overridden')
+
+ @abc.abstractmethod
+ def on_remove_vrf_conf(self, evt):
+ raise NotImplementedError('This method should be overridden')
diff --git a/ryu/services/protocols/bgp/signals/__init__.py b/ryu/services/protocols/bgp/signals/__init__.py
new file mode 100644
index 00000000..0b69c99d
--- /dev/null
+++ b/ryu/services/protocols/bgp/signals/__init__.py
@@ -0,0 +1,5 @@
+__author__ = 'yak'
+
+from ryu.services.protocols.bgp.signals.base import SignalBus
+
+__all__ = [SignalBus]
diff --git a/ryu/services/protocols/bgp/signals/base.py b/ryu/services/protocols/bgp/signals/base.py
new file mode 100644
index 00000000..9a0b0e16
--- /dev/null
+++ b/ryu/services/protocols/bgp/signals/base.py
@@ -0,0 +1,33 @@
+import logging
+LOG = logging.getLogger('bgpspeaker.signals.base')
+
+
+class SignalBus(object):
+ def __init__(self):
+ self._listeners = {}
+
+ def emit_signal(self, identifier, data):
+ identifier = _to_tuple(identifier)
+ LOG.debug('SIGNAL: %s emited with data: %s ' % (identifier, data))
+ for func, filter_func in self._listeners.get(identifier, []):
+ if not filter_func or filter_func(data):
+ func(identifier, data)
+
+ def register_listener(self, identifier, func, filter_func=None):
+ identifier = _to_tuple(identifier)
+ substrings = (identifier[:i] for i in xrange(1, len(identifier) + 1))
+ for partial_id in substrings:
+ self._listeners.setdefault(
+ partial_id,
+ []
+ ).append((func, filter_func))
+
+ def unregister_all(self):
+ self._listeners = {}
+
+
+def _to_tuple(tuple_or_not):
+ if not isinstance(tuple_or_not, tuple):
+ return (tuple_or_not, )
+ else:
+ return tuple_or_not
diff --git a/ryu/services/protocols/bgp/signals/emit.py b/ryu/services/protocols/bgp/signals/emit.py
new file mode 100644
index 00000000..7f41c93f
--- /dev/null
+++ b/ryu/services/protocols/bgp/signals/emit.py
@@ -0,0 +1,55 @@
+from ryu.services.protocols.bgp.signals import SignalBus
+
+
+class BgpSignalBus(SignalBus):
+ BGP_ERROR = ('error', 'bgp')
+ BGP_DEST_CHANGED = ('core', 'dest', 'changed')
+ BGP_VRF_REMOVED = ('core', 'vrf', 'removed')
+ BGP_VRF_ADDED = ('core', 'vrf', 'added')
+ BGP_NOTIFICATION_RECEIVED = ('bgp', 'notification_received')
+ BGP_NOTIFICATION_SENT = ('bgp', 'notification_sent')
+ BGP_VRF_STATS_CONFIG_CHANGED = (
+ 'core', 'vrf', 'config', 'stats', 'changed'
+ )
+
+ def bgp_error(self, peer, code, subcode, reason):
+ return self.emit_signal(
+ self.BGP_ERROR + (peer, ),
+ {'code': code, 'subcode': subcode, 'reason': reason, 'peer': peer}
+ )
+
+ def bgp_notification_received(self, peer, notification):
+ return self.emit_signal(
+ self.BGP_NOTIFICATION_RECEIVED + (peer,),
+ notification
+ )
+
+ def bgp_notification_sent(self, peer, notification):
+ return self.emit_signal(
+ self.BGP_NOTIFICATION_SENT + (peer,),
+ notification
+ )
+
+ def dest_changed(self, dest):
+ return self.emit_signal(
+ self.BGP_DEST_CHANGED,
+ dest
+ )
+
+ def vrf_removed(self, route_dist):
+ return self.emit_signal(
+ self.BGP_VRF_REMOVED,
+ route_dist
+ )
+
+ def vrf_added(self, vrf_conf):
+ return self.emit_signal(
+ self.BGP_VRF_ADDED,
+ vrf_conf
+ )
+
+ def stats_config_changed(self, vrf_conf):
+ return self.emit_signal(
+ self.BGP_VRF_STATS_CONFIG_CHANGED,
+ vrf_conf
+ )
diff --git a/ryu/services/protocols/bgp/speaker.py b/ryu/services/protocols/bgp/speaker.py
new file mode 100644
index 00000000..320db03e
--- /dev/null
+++ b/ryu/services/protocols/bgp/speaker.py
@@ -0,0 +1,596 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ BGP protocol implementation.
+"""
+import logging
+import socket
+import struct
+import traceback
+
+from ryu.services.protocols.bgp.base import Activity
+from ryu.services.protocols.bgp.base import add_bgp_error_metadata
+from ryu.services.protocols.bgp.base import BGPSException
+from ryu.services.protocols.bgp.base import CORE_ERROR_CODE
+from ryu.services.protocols.bgp.constants import BGP_FSM_CONNECT
+from ryu.services.protocols.bgp.constants import BGP_FSM_OPEN_CONFIRM
+from ryu.services.protocols.bgp.constants import BGP_FSM_OPEN_SENT
+from ryu.services.protocols.bgp.constants import BGP_VERSION_NUM
+from ryu.services.protocols.bgp.protocol import Protocol
+from ryu.services.protocols.bgp.protocols.bgp.capabilities import \
+ EnhancedRouteRefreshCap
+from ryu.services.protocols.bgp.protocols.bgp.capabilities import \
+ MultiprotocolExtentionCap
+from ryu.services.protocols.bgp.protocols.bgp.capabilities import \
+ RouteRefreshCap
+import ryu.services.protocols.bgp.protocols.bgp.exceptions as exceptions
+from ryu.services.protocols.bgp.protocols.bgp.exceptions import BgpExc
+from ryu.services.protocols.bgp.protocols.bgp import messages
+from ryu.services.protocols.bgp.protocols.bgp.messages import Keepalive
+from ryu.services.protocols.bgp.protocols.bgp.messages import Notification
+from ryu.services.protocols.bgp.protocols.bgp.messages import Open
+from ryu.services.protocols.bgp.protocols.bgp.messages import RouteRefresh
+from ryu.services.protocols.bgp.protocols.bgp.messages import Update
+from ryu.services.protocols.bgp.protocols.bgp import nlri
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_RTC_UC
+from ryu.services.protocols.bgp.utils.validation import is_valid_old_asn
+
+
+LOG = logging.getLogger('bgpspeaker.speaker')
+
+# BGP min. and max. message lengths as per RFC.
+BGP_MIN_MSG_LEN = 19
+BGP_MAX_MSG_LEN = 4096
+
+# Keep-alive singleton.
+_KEEP_ALIVE = Keepalive()
+
+
+@add_bgp_error_metadata(code=CORE_ERROR_CODE, sub_code=2,
+ def_desc='Unknown error occurred related to Speaker.')
+class BgpProtocolException(BGPSException):
+ """Base exception related to peer connection management.
+ """
+ pass
+
+
+def nofitication_factory(code, subcode):
+ """Returns a `Notification` message corresponding to given codes.
+
+ Parameters:
+ - `code`: (int) BGP error code
+ - `subcode`: (int) BGP error sub-code
+ """
+ reason = Notification.REASONS.get((code, subcode))
+ if not reason:
+ raise ValueError('Invalid code/sub-code.')
+
+ return Notification(code, subcode)
+
+
+class BgpProtocol(Protocol, Activity):
+ """Protocol that handles BGP messages.
+ """
+ MESSAGE_MARKER = ('\xff\xff\xff\xff\xff\xff\xff\xff'
+ '\xff\xff\xff\xff\xff\xff\xff\xff')
+
+ def __init__(self, socket, signal_bus, is_reactive_conn=False):
+ # Validate input.
+ if socket is None:
+ raise ValueError('Invalid arguments passed.')
+ activity_name = ('BgpProtocol %s, %s, %s' % (
+ is_reactive_conn, socket.getpeername(), socket.getsockname())
+ )
+ Activity.__init__(self, name=activity_name)
+ # Intialize instance variables.
+ self._peer = None
+ self._recv_buff = ''
+ self._socket = socket
+ self._signal_bus = signal_bus
+ self._holdtime = None
+ self._keepalive = None
+ self._expiry = None
+ # Add socket to Activity's socket container for managing it.
+ if is_reactive_conn:
+ self._asso_socket_map['passive_conn'] = self._socket
+ else:
+ self._asso_socket_map['active_conn'] = self._socket
+ self._open_msg = None
+ self.state = BGP_FSM_CONNECT
+ self._is_reactive = is_reactive_conn
+ self.sent_open_msg = None
+ self.recv_open_msg = None
+ self._is_bound = False
+
+ def get_peername(self):
+ return self._socket.getpeername()
+
+ def get_sockname(self):
+ return self._socket.getsockname()
+
+ @property
+ def is_reactive(self):
+ return self._is_reactive
+
+ @property
+ def holdtime(self):
+ return self._holdtime
+
+ @property
+ def keepalive(self):
+ return self._keepalive
+
+ def is_colliding(self, other_protocol):
+ if not isinstance(other_protocol, BgpProtocol):
+ raise ValueError('Currently only support comparing with '
+ '`BgpProtocol`')
+
+ # Compare protocol connection end point's addresses
+ if (self.get_peername()[0] == other_protocol.get_peername()[0] and
+ self.get_sockname()[0] == other_protocol.get_sockname()[0]):
+ return True
+
+ return False
+
+ def is_local_router_id_greater(self):
+ """Compares *True* if local router id is greater when compared to peer
+ bgp id.
+
+ Should only be called after protocol has reached OpenConfirm state.
+ """
+ from ryu.services.protocols.bgp.speaker.utils.bgp import from_inet_ptoi
+
+ if not self.state == BGP_FSM_OPEN_CONFIRM:
+ raise BgpProtocolException(desc='Can access remote router id only'
+ ' after open message is received')
+ remote_id = self.recv_open_msg.bgpid
+ local_id = self.sent_open_msg.bgpid
+ return from_inet_ptoi(local_id) > from_inet_ptoi(remote_id)
+
+ def is_enhanced_rr_cap_valid(self):
+ """Checks is enhanced route refresh capability is enabled/valid.
+
+ Checks sent and received `Open` messages to see if this session with
+ peer is capable of enhanced route refresh capability.
+ """
+ if not self.recv_open_msg:
+ raise ValueError('Did not yet receive peers open message.')
+
+ err_cap_enabled = False
+ local_cap = self.sent_open_msg.caps
+ peer_cap = self.recv_open_msg.caps
+ # Both local and peer should advertise ERR capability for it to be
+ # enabled.
+ if (local_cap.get(EnhancedRouteRefreshCap.CODE) and
+ peer_cap.get(EnhancedRouteRefreshCap.CODE)):
+ err_cap_enabled = True
+
+ return err_cap_enabled
+
+ def _check_route_fmly_adv(self, open_msg, route_family):
+ match_found = False
+
+ local_caps = open_msg.caps
+ mbgp_cap = local_caps.get(MultiprotocolExtentionCap.CODE)
+ # Check MP_BGP capability was advertised.
+ if mbgp_cap:
+ # Iterate over all advertised mp_bgp caps to find a match.
+ for peer_cap in mbgp_cap:
+ if (route_family.afi == peer_cap.route_family.afi and
+ route_family.safi == peer_cap.route_family.safi):
+ match_found = True
+
+ return match_found
+
+ def is_route_family_adv(self, route_family):
+ """Checks if `route_family` was advertised to peer as per MP_BGP cap.
+
+ Returns:
+ - True: if given address family was advertised.
+ - False: if given address family was not advertised.
+ """
+ return self._check_route_fmly_adv(self.sent_open_msg, route_family)
+
+ def is_route_family_adv_recv(self, route_family):
+ """Checks if `route_family` was advertised by peer as per MP_BGP cap.
+
+ Returns:
+ - True: if given address family was advertised.
+ - False: if given address family was not advertised.
+ """
+ return self._check_route_fmly_adv(self.recv_open_msg, route_family)
+
+ @property
+ def negotiated_afs(self):
+ local_caps = self.sent_open_msg.caps
+ remote_caps = self.recv_open_msg.caps
+
+ local_mbgp_cap = local_caps.get(MultiprotocolExtentionCap.CODE)
+ remote_mbgp_cap = remote_caps.get(MultiprotocolExtentionCap.CODE)
+ # Check MP_BGP capabilities were advertised.
+ if local_mbgp_cap and remote_mbgp_cap:
+ local_families = {
+ (peer_cap.route_family.afi, peer_cap.route_family.safi)
+ for peer_cap in local_mbgp_cap
+ }
+ remote_families = {
+ (peer_cap.route_family.afi, peer_cap.route_family.safi)
+ for peer_cap in remote_mbgp_cap
+ }
+ afi_safi = local_families.intersection(remote_families)
+ else:
+ afi_safi = set()
+
+ afs = []
+ for afi, safi in afi_safi:
+ afs.append(nlri.get_rf(afi, safi))
+ return afs
+
+ def is_mbgp_cap_valid(self, route_family):
+ """Returns true if both sides of this protocol have advertise
+ capability for this address family.
+ """
+ return (self.is_route_family_adv(route_family) and
+ self.is_route_family_adv_recv(route_family))
+
+ def _run(self, peer):
+ """Sends open message to peer and handles received messages.
+
+ Parameters:
+ - `peer`: the peer to which this protocol instance is connected to.
+ """
+ # We know the peer we are connected to, we send open message.
+ self._peer = peer
+ self.connection_made()
+
+ # We wait for peer to send messages.
+ self._recv_loop()
+
+ def data_received(self, next_bytes):
+ try:
+ self._data_received(next_bytes)
+ except BgpExc as exc:
+ LOG.error(
+ "BGPExc Exception while receiving data: "
+ "%s \n Traceback %s \n"
+ % (str(exc), traceback.format_exc())
+ )
+ if exc.SEND_ERROR:
+ self.send_notification(exc.CODE, exc.SUB_CODE)
+ else:
+ self._socket.close()
+ raise exc
+
+ @staticmethod
+ def parse_msg_header(buff):
+ """Parses given `buff` into bgp message header format.
+
+ Returns a tuple of marker, length, type of bgp message.
+ """
+ return struct.unpack('!16sHB', buff)
+
+ def _data_received(self, next_bytes):
+ """Maintains buffer of bytes received from peer and extracts bgp
+ message from this buffer if enough data is received.
+
+ Validates bgp message marker, length, type and data and constructs
+ appropriate bgp message instance and calls handler.
+
+ :Parameters:
+ - `next_bytes`: next set of bytes received from peer.
+ """
+ # Append buffer with received bytes.
+ self._recv_buff += next_bytes
+
+ while True:
+ # If current buffer size is less then minimum bgp message size, we
+ # return as we do not have a complete bgp message to work with.
+ if len(self._recv_buff) < BGP_MIN_MSG_LEN:
+ return
+
+ # Parse message header into elements.
+ auth, length, ptype = BgpProtocol.parse_msg_header(
+ self._recv_buff[:BGP_MIN_MSG_LEN])
+
+ # Check if we have valid bgp message marker.
+ # We should get default marker since we are not supporting any
+ # authentication.
+ if (auth != BgpProtocol.MESSAGE_MARKER):
+ LOG.error('Invalid message marker received: %s' % auth)
+ raise exceptions.NotSync()
+
+ # Check if we have valid bgp message length.
+ check = lambda: length < BGP_MIN_MSG_LEN\
+ or length > BGP_MAX_MSG_LEN
+
+ # RFC says: The minimum length of the OPEN message is 29
+ # octets (including the message header).
+ check2 = lambda: ptype == Open.TYPE_CODE\
+ and length < Open.MIN_LENGTH
+
+ # RFC says: A KEEPALIVE message consists of only the
+ # message header and has a length of 19 octets.
+ check3 = lambda: ptype == Keepalive.TYPE_CODE\
+ and length != BGP_MIN_MSG_LEN
+
+ # RFC says: The minimum length of the UPDATE message is 23
+ # octets.
+ check4 = lambda: ptype == Update.TYPE_CODE\
+ and length < Update.MIN_LENGTH
+
+ if check() or check2() or check3() or check4():
+ raise exceptions.BadLen(ptype, length)
+
+ # If we have partial message we wait for rest of the message.
+ if len(self._recv_buff) < length:
+ return
+
+ # If we have full message, we get its payload/data.
+ payload = self._recv_buff[BGP_MIN_MSG_LEN:length]
+
+ # Update buffer to not contain any part of the current message.
+ self._recv_buff = self._recv_buff[length:]
+
+ # Try to decode payload into specified message type.
+ # If we have any error parsing the message, we send appropriate
+ # bgp notification message.
+ msg = messages.decode(ptype, payload, length)
+
+ # If we have a valid bgp message we call message handler.
+ self._handle_msg(msg)
+
+ def send_notification(self, code, subcode):
+ """Utility to send notification message.
+
+ Closes the socket after sending the message.
+ :Parameters:
+ - `socket`: (socket) - socket over which to send notification
+ message.
+ - `code`: (int) - BGP Notification code
+ - `subcode`: (int) - BGP Notification sub-code
+
+ RFC ref: http://tools.ietf.org/html/rfc4486
+ http://www.iana.org/assignments/bgp-parameters/bgp-parameters.xhtml
+ """
+ reason = Notification.REASONS.get((code, subcode))
+ if not reason:
+ # Not checking for type of parameters to allow some flexibility
+ # via. duck-typing.
+ raise ValueError('Unsupported code/sub-code given.')
+
+ notification = Notification(code, subcode, reason)
+ self._socket.sendall(notification.encode())
+ self._signal_bus.bgp_error(self._peer, code, subcode, reason)
+ LOG.error(
+ 'Sent notification to %r>> %s' %
+ (self._socket.getpeername(), notification)
+ )
+ self._socket.close()
+
+ def send(self, msg):
+ if not self.started:
+ raise BgpProtocolException('Tried to send message to peer when '
+ 'this protocol instance is not started'
+ ' or is no longer is started state.')
+ self._socket.sendall(msg.encode())
+ if msg.MSG_NAME == Notification.MSG_NAME:
+ LOG.error('Sent notification to %s>> %s' %
+ (self.get_peername(), msg))
+
+ self._signal_bus.bgp_notification_sent(self._peer, msg)
+
+ else:
+ LOG.debug('Sent msg. %s to %s>> %s' %
+ (msg.MSG_NAME, self.get_peername(), msg))
+
+ def stop(self):
+ Activity.stop(self)
+
+ def _validate_open_msg(self, open_msg):
+ """Validates BGP OPEN message according from application context.
+
+ Parsing modules takes care of validating OPEN message that need no
+ context. But here we validate it according to current application
+ settings. RTC or RR/ERR are MUST capability if peer does not support
+ either one of them we have to end session.
+ """
+ assert open_msg.TYPE_CODE == Open.TYPE_CODE
+ # Validate remote ASN.
+ remote_asnum = open_msg.asnum
+ # Since 4byte AS is not yet supported, we validate AS as old style AS.
+ if (not is_valid_old_asn(remote_asnum) or
+ remote_asnum != self._peer.remote_as):
+ raise exceptions.BadPeerAs()
+
+ # Validate bgp version number.
+ if open_msg.version != BGP_VERSION_NUM:
+ raise exceptions.UnsupportedVersion(BGP_VERSION_NUM)
+
+ adv_caps = open_msg.caps
+ rr_cap_adv = adv_caps.get(RouteRefreshCap.CODE)
+ err_cap_adv = adv_caps.get(EnhancedRouteRefreshCap.CODE)
+ # If either RTC or RR/ERR are MUST capability if peer does not support
+ # either one of them we have to end session as we have to request peer
+ # to send prefixes for new VPNs that may be created automatically.
+ # TODO(PH): Check with experts if error is suitable in this case
+ if not (rr_cap_adv or err_cap_adv or
+ self._check_route_fmly_adv(open_msg, RF_RTC_UC)):
+ raise exceptions.UnsupportedOptParam()
+
+ def _handle_msg(self, msg):
+ """When a BGP message is received, send it to peer.
+
+ Open messages are validated here. Peer handler is called to handle each
+ message except for *Open* and *Notification* message. On receiving
+ *Notification* message we close connection with peer.
+ """
+ LOG.debug('Received %s msg. from %s<< \n%s' %
+ (msg.MSG_NAME, str(self.get_peername()), msg))
+
+ # If we receive open message we try to bind to protocol
+ if (msg.MSG_NAME == Open.MSG_NAME):
+ if self.state == BGP_FSM_OPEN_SENT:
+ # Validate open message.
+ self._validate_open_msg(msg)
+ self.recv_open_msg = msg
+ self.state = BGP_FSM_OPEN_CONFIRM
+ self._peer.state.bgp_state = self.state
+
+ # Try to bind this protocol to peer.
+ self._is_bound = self._peer.bind_protocol(self)
+
+ # If this protocol failed to bind to peer.
+ if not self._is_bound:
+ # Failure to bind to peer indicates connection collision
+ # resolution choose different instance of protocol and this
+ # instance has to close. Before closing it sends
+ # appropriate notification msg. to peer.
+ raise exceptions.CollisionResolution()
+
+ # If peer sends Hold Time as zero, then according to RFC we do
+ # not set Hold Time and Keep Alive timer.
+ if msg.holdtime == 0:
+ LOG.info('The Hold Time sent by the peer is zero, hence '
+ 'not setting any Hold Time and Keep Alive'
+ ' timers.')
+ else:
+ # Start Keep Alive timer considering Hold Time preference
+ # of the peer.
+ self._start_timers(msg.holdtime)
+ self._send_keepalive()
+
+ # Peer does not see open message.
+ return
+ else:
+ # If we receive a Open message out of order
+ LOG.error('Open message received when current state is not '
+ 'OpenSent')
+ # Received out-of-order open message
+ # We raise Finite state machine error
+ raise exceptions.FiniteStateMachineError()
+ elif msg.MSG_NAME == Notification.MSG_NAME:
+ if self._peer:
+ self._signal_bus.bgp_notification_received(self._peer, msg)
+ # If we receive notification message
+ LOG.error('Received notification message, hence closing '
+ 'connection %s' % msg)
+ self._socket.close()
+ return
+
+ # If we receive keepalive or update message, we reset expire timer.
+ if (msg.MSG_NAME == Keepalive.MSG_NAME or
+ msg.MSG_NAME == Update.MSG_NAME):
+ if self._expiry:
+ self._expiry.reset()
+
+ # Call peer message handler for appropriate messages.
+ if (msg.MSG_NAME in
+ (Keepalive.MSG_NAME, Update.MSG_NAME, RouteRefresh.MSG_NAME)):
+ self._peer.handle_msg(msg)
+ # We give chance to other threads to run.
+ self.pause(0)
+
+ def _start_timers(self, peer_holdtime):
+ """Starts keepalive and expire timers.
+
+ Hold time is set to min. of peer and configured/default hold time.
+ Starts keep alive timer and expire timer based on this value.
+ """
+ neg_timer = min(self._holdtime, peer_holdtime)
+ if neg_timer < self._holdtime:
+ LOG.info('Negotiated hold time (%s) is lower then '
+ 'configured/default (%s).' % (neg_timer, self._holdtime))
+ # We use negotiated timer value.
+ self._holdtime = neg_timer
+ self._keepalive = self._create_timer('Keepalive Timer',
+ self._send_keepalive)
+ interval = self._holdtime / 3
+ self._keepalive.start(interval, now=False)
+ # Setup the expire timer.
+ self._expiry = self._create_timer('Holdtime Timer', self._expired)
+ self._expiry.start(self._holdtime, now=False)
+ LOG.debug('Started keep-alive and expire timer for negotiated hold'
+ 'time %s' % self._holdtime)
+
+ def _expired(self):
+ """Hold timer expired event handler.
+ """
+ LOG.info('Negotiated hold time %s expired.' % self._holdtime)
+ code = exceptions.HoldTimerExpired.CODE
+ subcode = exceptions.HoldTimerExpired.SUB_CODE
+ self.send_notification(code, subcode)
+ self.connection_lost('Negotiated hold time %s expired.' %
+ self._holdtime)
+ self.stop()
+
+ def _send_keepalive(self):
+ self.send(_KEEP_ALIVE)
+
+ def _recv_loop(self):
+ """Sits in tight loop collecting data received from peer and
+ processing it.
+ """
+ required_len = BGP_MIN_MSG_LEN
+ conn_lost_reason = "Connection lost as protocol is no longer active"
+ try:
+ while True:
+ next_bytes = self._socket.recv(required_len)
+ if len(next_bytes) == 0:
+ conn_lost_reason = 'Peer closed connection'
+ break
+ self.data_received(next_bytes)
+ except socket.error as err:
+ conn_lost_reason = 'Connection to peer lost: %s.' % err
+ except BgpExc as ex:
+ conn_lost_reason = 'Connection to peer lost, reason: %s.' % ex
+ except Exception as e:
+ LOG.debug(traceback.format_exc())
+ conn_lost_reason = str(e)
+ finally:
+ self.connection_lost(conn_lost_reason)
+
+ def connection_made(self):
+ """Connection to peer handler.
+
+ We send bgp open message to peer and intialize related attributes.
+ """
+ assert self.state == BGP_FSM_CONNECT
+ # We have a connection with peer we send open message.
+ open_msg = self._peer.create_open_msg()
+ self._holdtime = open_msg.holdtime
+ self.state = BGP_FSM_OPEN_SENT
+ if not self.is_reactive:
+ self._peer.state.bgp_state = self.state
+ self.sent_open_msg = open_msg
+ self.send(open_msg)
+ self._peer.connection_made()
+ LOG.debug('Sent open message %s' % open_msg)
+
+ def connection_lost(self, reason):
+ """Stops all timers and notifies peer that connection is lost.
+ """
+
+ if self._peer:
+ state = self._peer.state.bgp_state
+ if self._is_bound or state == BGP_FSM_OPEN_SENT:
+ self._peer.connection_lost(reason)
+
+ self._peer = None
+
+ if reason:
+ LOG.info(reason)
+ else:
+ LOG.info('Connection to peer closed for unknown reasons.')
diff --git a/ryu/services/protocols/bgp/utils/bgp.py b/ryu/services/protocols/bgp/utils/bgp.py
new file mode 100644
index 00000000..bee4d3b9
--- /dev/null
+++ b/ryu/services/protocols/bgp/utils/bgp.py
@@ -0,0 +1,121 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Utilities related to bgp data types and models.
+"""
+import logging
+import socket
+
+from ryu.services.protocols.bgp.protocols.bgp.messages import Update
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv4_VPN
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_IPv6_VPN
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_RTC_UC
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RtNlri
+from ryu.services.protocols.bgp.protocols.bgp import pathattr
+from ryu.services.protocols.bgp.protocols.bgp.pathattr import Med
+from ryu.services.protocols.bgp.info_base.rtc import RtcPath
+from ryu.services.protocols.bgp.info_base.vpnv4 import Vpnv4Path
+from ryu.services.protocols.bgp.info_base.vpnv6 import Vpnv6Path
+
+
+LOG = logging.getLogger('utils.bgp')
+
+# RouteFmaily to path sub-class mapping.
+_ROUTE_FAMILY_TO_PATH_MAP = {RF_IPv4_VPN: Vpnv4Path,
+ RF_IPv6_VPN: Vpnv6Path,
+ RF_RTC_UC: RtcPath}
+
+
+def create_path(src_peer, nlri, **kwargs):
+ route_family = nlri.route_family
+ assert route_family in _ROUTE_FAMILY_TO_PATH_MAP.keys()
+ path_cls = _ROUTE_FAMILY_TO_PATH_MAP.get(route_family)
+ return path_cls(src_peer, nlri, src_peer.version_num, **kwargs)
+
+
+def clone_path_and_update_med_for_target_neighbor(path, med):
+ assert path and med
+ route_family = path.route_family
+ if route_family not in _ROUTE_FAMILY_TO_PATH_MAP.keys():
+ raise ValueError('Clone is not supported for address-family %s' %
+ route_family)
+ path_cls = _ROUTE_FAMILY_TO_PATH_MAP.get(route_family)
+ pattrs = path.pathattr_map
+ pattrs[Med.ATTR_NAME] = Med(med)
+ return path_cls(
+ path.source, path.nlri, path.source_version_num,
+ pattrs=pattrs, nexthop=path.nexthop,
+ is_withdraw=path.is_withdraw,
+ med_set_by_target_neighbor=True
+ )
+
+
+def clone_rtcpath_update_rt_as(path, new_rt_as):
+ """Clones given RT NLRI `path`, and updates it with new RT_NLRI AS.
+
+ Parameters:
+ - `path`: (Path) RT_NLRI path
+ - `new_rt_as`: AS value of cloned paths' RT_NLRI
+ """
+ assert path and new_rt_as
+ if not path or path.route_family != RF_RTC_UC:
+ raise ValueError('Expected RT_NLRI path')
+ old_nlri = path.nlri
+ new_rt_nlri = RtNlri(new_rt_as, old_nlri.route_target)
+ return RtcPath(path.source, new_rt_nlri, path.source_version_num,
+ pattrs=path.pathattr_map, nexthop=path.nexthop,
+ is_withdraw=path.is_withdraw)
+
+
+def from_inet_ptoi(bgp_id):
+ """Convert an IPv4 address string format to a four byte long.
+ """
+ four_byte_id = None
+ try:
+ packed_byte = socket.inet_pton(socket.AF_INET, bgp_id)
+ four_byte_id = long(packed_byte.encode('hex'), 16)
+ except ValueError:
+ LOG.debug('Invalid bgp id given for conversion to integer value %s' %
+ bgp_id)
+
+ return four_byte_id
+
+
+def get_unknow_opttrans_attr(path):
+ """Utility method that gives a `dict` of unknown optional transitive
+ path attributes of `path`.
+
+ Returns dict: <key> - attribute type code, <value> - unknown path-attr.
+ """
+ path_attrs = path.pathattr_map
+ unknown_opt_tran_attrs = {}
+ for _, attr in path_attrs.iteritems():
+ if (isinstance(attr, pathattr.UnRcgPathAttr) and
+ attr.is_optional_transitive()):
+ unknown_opt_tran_attrs[attr.type_code] = attr
+ return unknown_opt_tran_attrs
+
+
+def create_end_of_rib_update():
+ """Construct end-of-rib (EOR) Update instance."""
+ mpunreach_attr = pathattr.MpUnreachNlri(RF_IPv4_VPN, [])
+ pathattr_map = {pathattr.MpUnreachNlri.ATTR_NAME: mpunreach_attr}
+ eor = Update(pathattr_map)
+ return eor
+
+
+# Bgp update message instance that can used as End of RIB marker.
+UPDATE_EOR = create_end_of_rib_update()
diff --git a/ryu/services/protocols/bgp/utils/circlist.py b/ryu/services/protocols/bgp/utils/circlist.py
new file mode 100644
index 00000000..df92ff77
--- /dev/null
+++ b/ryu/services/protocols/bgp/utils/circlist.py
@@ -0,0 +1,265 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class CircularListType(object):
+ """Instances of this class represent a specific type of list.
+ Nodes are linked in a circular fashion, using attributes on the
+ nodes themselves.
+
+ Example:
+
+ ItemList = CircularListType(next_attr='_next',
+ prev_attr='_prev')
+
+ l = ItemList()
+ l.prepend(item)
+
+ The created list has the following properties:
+
+ - A node can be inserted O(1) time at the head, tail, or
+ after/before another specified node.
+
+ - A node can be removed in O(1) time from any list it may be on,
+ without providing a reference to the list.
+
+ - The current node in an iteration can be deleted safely.
+
+ """
+
+ class List(object):
+ """An object that represents a list.
+
+ This class is not expected to be used directly by clients. Rather, they
+ would use the 'create' method of a CircularListType object to create an
+ instance.
+ """
+
+ # Define the set of valid attributes so as to make the list
+ # head lightweight.
+ #
+ # We override __getattr__ and __setattr__ so as to store the
+ # the next and previous references on the list head in
+ # _next_slot_ and _prev_slot_ respectively.
+ __slots__ = ["list_type", "head", "_next_slot_",
+ "_prev_slot_"]
+
+ def __init__(self, list_type):
+ self.list_type = list_type
+
+ # Save memory by using the List object itself as the head.
+ self.head = self
+ self.list_type.node_init(self.head)
+
+ def __getattr__(self, name):
+ if(name == self.list_type.next_name):
+ return self._next_slot_
+
+ if(name == self.list_type.prev_name):
+ return self._prev_slot_
+
+ raise AttributeError(name)
+
+ def __setattr__(self, name, value):
+ if(name in CircularListType.List.__slots__):
+ object.__setattr__(self, name, value)
+ return
+
+ if(name == self.list_type.next_name):
+ self._next_slot_ = value
+ return
+
+ if(name == self.list_type.prev_name):
+ self._prev_slot_ = value
+ return
+
+ raise AttributeError(name)
+
+ def is_empty(self):
+ return not self.list_type.node_is_on_list(self.head)
+
+ def clear(self):
+ """Remove all items from the list."""
+
+ # Make sure that all items are unlinked.
+ for node in self:
+ self.remove(node)
+
+ def is_on_list(self, node):
+ return self.list_type.node_is_on_list(node)
+
+ def append(self, node):
+ self.list_type.node_insert_before(self.head, node)
+
+ def prepend(self, node):
+ self.list_type.node_insert_after(self.head, node)
+
+ def __iter__(self):
+ return self.generator()
+
+ def remove(self, node):
+ """List the given node from the list.
+
+ Note that this does not verify that the node is on this
+ list. It could even be on a different list.
+ """
+ self.list_type.node_unlink(node)
+
+ self.list_type.node_del_attrs(node)
+
+ def pop_first(self):
+ """Remove the first item in the list and return it."""
+ node = self.list_type.node_next(self.head)
+ if(node is self.head):
+ return None
+
+ self.remove(node)
+ return node
+
+ def generator(self):
+ """Enables iteration over the list.
+
+ The current item can safely be removed from the list during
+ iteration.
+ """
+ # Keep a pointer to the next node when returning the
+ # current node. This allows the caller to remove the
+ # current node safely.
+ node = self.list_type.node_next(self.head)
+ next = self.list_type.node_next(node)
+ while(node is not self.head):
+ yield node
+
+ node = next
+ next = self.list_type.node_next(node)
+
+ #
+ # CircularListType methods
+ #
+
+ def __init__(self, next_attr_name=None, prev_attr_name=None):
+ """Initializes this list.
+
+ next_attr_name: The name of the attribute that holds a reference
+ to the next item in the list.
+
+ prev_attr_name: the name of the attribute that holds a reference
+ to the previous item in the list.
+ """
+
+ # Keep an interned version of the attribute names. This should
+ # speed up the process of looking up the attributes.
+ self.next_name = intern(next_attr_name)
+ self.prev_name = intern(prev_attr_name)
+
+ def create(self):
+ return CircularListType.List(self)
+
+ def __call__(self):
+ """Make a CircularListType instance look like a class by
+ creating a list object.
+ """
+ return self.create()
+
+ def node_init(self, node):
+ assert(not self.node_is_on_list(node))
+
+ # Set the node to point to itself as the previous and next
+ # entries.
+ self.node_set_next(node, node)
+ self.node_set_prev(node, node)
+
+ def node_next(self, node):
+ try:
+ return getattr(node, self.next_name)
+ except AttributeError:
+ return None
+
+ def node_set_next(self, node, next):
+ setattr(node, self.next_name, next)
+
+ def node_prev(self, node):
+ try:
+ return getattr(node, self.prev_name)
+ except AttributeError:
+ return None
+
+ def node_set_prev(self, node, prev):
+ setattr(node, self.prev_name, prev)
+
+ def node_del_attrs(self, node):
+ """Remove all attributes that are used for putting this node
+ on this type of list.
+ """
+ try:
+ delattr(node, self.next_name)
+ delattr(node, self.prev_name)
+ except AttributeError:
+ pass
+
+ def node_is_on_list(self, node):
+ """Returns True if this node is on *some* list.
+
+ A node is not on any list if it is linked to itself, or if it
+ does not have the next and/prev attributes at all.
+ """
+ next = self.node_next(node)
+ if next == node or next is None:
+ assert(self.node_prev(node) is next)
+ return False
+
+ return True
+
+ def node_insert_after(self, node, new_node):
+ """Insert the new node after node."""
+
+ assert(not self.node_is_on_list(new_node))
+ assert(node is not new_node)
+
+ next = self.node_next(node)
+ assert(next is not None)
+ self.node_set_next(node, new_node)
+ self.node_set_prev(new_node, node)
+
+ self.node_set_next(new_node, next)
+ self.node_set_prev(next, new_node)
+
+ def node_insert_before(self, node, new_node):
+ """Insert the new node before node."""
+
+ assert(not self.node_is_on_list(new_node))
+ assert(node is not new_node)
+
+ prev = self.node_prev(node)
+ assert(prev is not None)
+ self.node_set_prev(node, new_node)
+ self.node_set_next(new_node, node)
+
+ self.node_set_prev(new_node, prev)
+ self.node_set_next(prev, new_node)
+
+ def node_unlink(self, node):
+
+ if not self.node_is_on_list(node):
+ return
+
+ prev = self.node_prev(node)
+ next = self.node_next(node)
+
+ self.node_set_next(prev, next)
+ self.node_set_prev(next, prev)
+
+ self.node_set_next(node, node)
+ self.node_set_prev(node, node)
diff --git a/ryu/services/protocols/bgp/utils/dictconfig.py b/ryu/services/protocols/bgp/utils/dictconfig.py
new file mode 100644
index 00000000..c430d881
--- /dev/null
+++ b/ryu/services/protocols/bgp/utils/dictconfig.py
@@ -0,0 +1,562 @@
+# Copyright 2009-2010 by Vinay Sajip. All Rights Reserved.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose and without fee is hereby granted,
+# provided that the above copyright notice appear in all copies and that
+# both that copyright notice and this permission notice appear in
+# supporting documentation, and that the name of Vinay Sajip
+# not be used in advertising or publicity pertaining to distribution
+# of the software without specific, written prior permission.
+# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
+# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
+# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
+# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# Source: https://bitbucket.org/vinay.sajip/dictconfig/raw/53b3c32dea4694cd3fb2f14b3159d66d3da10bc0/src/dictconfig.py
+# flake8: noqa
+import logging.handlers
+import re
+import sys
+import types
+
+IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
+
+
+def valid_ident(s):
+ m = IDENTIFIER.match(s)
+ if not m:
+ raise ValueError('Not a valid Python identifier: %r' % s)
+ return True
+
+#
+# This function is defined in logging only in recent versions of Python
+#
+try:
+ from logging import _checkLevel
+except ImportError:
+ def _checkLevel(level):
+ if isinstance(level, int):
+ rv = level
+ elif str(level) == level:
+ if level not in logging._levelNames:
+ raise ValueError('Unknown level: %r' % level)
+ rv = logging._levelNames[level]
+ else:
+ raise TypeError('Level not an integer or a '
+ 'valid string: %r' % level)
+ return rv
+
+# The ConvertingXXX classes are wrappers around standard Python containers,
+# and they serve to convert any suitable values in the container. The
+# conversion converts base dicts, lists and tuples to their wrapped
+# equivalents, whereas strings which match a conversion format are converted
+# appropriately.
+#
+# Each wrapper should have a configurator attribute holding the actual
+# configurator to use for conversion.
+
+
+class ConvertingDict(dict):
+ """A converting dictionary wrapper."""
+
+ def __getitem__(self, key):
+ value = dict.__getitem__(self, key)
+ result = self.configurator.convert(value)
+ # If the converted value is different, save for next time
+ if value is not result:
+ self[key] = result
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ result.key = key
+ return result
+
+ def get(self, key, default=None):
+ value = dict.get(self, key, default)
+ result = self.configurator.convert(value)
+ # If the converted value is different, save for next time
+ if value is not result:
+ self[key] = result
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ result.key = key
+ return result
+
+ def pop(self, key, default=None):
+ value = dict.pop(self, key, default)
+ result = self.configurator.convert(value)
+ if value is not result:
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ result.key = key
+ return result
+
+
+class ConvertingList(list):
+ """A converting list wrapper."""
+ def __getitem__(self, key):
+ value = list.__getitem__(self, key)
+ result = self.configurator.convert(value)
+ # If the converted value is different, save for next time
+ if value is not result:
+ self[key] = result
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ result.key = key
+ return result
+
+ def pop(self, idx= -1):
+ value = list.pop(self, idx)
+ result = self.configurator.convert(value)
+ if value is not result:
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ return result
+
+
+class ConvertingTuple(tuple):
+ """A converting tuple wrapper."""
+ def __getitem__(self, key):
+ value = tuple.__getitem__(self, key)
+ result = self.configurator.convert(value)
+ if value is not result:
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ result.key = key
+ return result
+
+
+class BaseConfigurator(object):
+ """
+ The configurator base class which defines some useful defaults.
+ """
+
+ CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
+
+ WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
+ DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
+ INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
+ DIGIT_PATTERN = re.compile(r'^\d+$')
+
+ value_converters = {
+ 'ext': 'ext_convert',
+ 'cfg': 'cfg_convert',
+ }
+
+ # We might want to use a different one, e.g. importlib
+ importer = __import__
+
+ def __init__(self, config):
+ self.config = ConvertingDict(config)
+ self.config.configurator = self
+
+ def resolve(self, s):
+ """
+ Resolve strings to objects using standard import and attribute
+ syntax.
+ """
+ name = s.split('.')
+ used = name.pop(0)
+ try:
+ found = self.importer(used)
+ for frag in name:
+ used += '.' + frag
+ try:
+ found = getattr(found, frag)
+ except AttributeError:
+ self.importer(used)
+ found = getattr(found, frag)
+ return found
+ except ImportError:
+ e, tb = sys.exc_info()[1:]
+ v = ValueError('Cannot resolve %r: %s' % (s, e))
+ v.__cause__, v.__traceback__ = e, tb
+ raise v
+
+ def ext_convert(self, value):
+ """Default converter for the ext:// protocol."""
+ return self.resolve(value)
+
+ def cfg_convert(self, value):
+ """Default converter for the cfg:// protocol."""
+ rest = value
+ m = self.WORD_PATTERN.match(rest)
+ if m is None:
+ raise ValueError("Unable to convert %r" % value)
+ else:
+ rest = rest[m.end():]
+ d = self.config[m.groups()[0]]
+ # print d, rest
+ while rest:
+ m = self.DOT_PATTERN.match(rest)
+ if m:
+ d = d[m.groups()[0]]
+ else:
+ m = self.INDEX_PATTERN.match(rest)
+ if m:
+ idx = m.groups()[0]
+ if not self.DIGIT_PATTERN.match(idx):
+ d = d[idx]
+ else:
+ try:
+ # try as number first (most likely)
+ n = int(idx)
+ d = d[n]
+ except TypeError:
+ d = d[idx]
+ if m:
+ rest = rest[m.end():]
+ else:
+ raise ValueError('Unable to convert '
+ '%r at %r' % (value, rest))
+ # rest should be empty
+ return d
+
+ def convert(self, value):
+ """
+ Convert values to an appropriate type. dicts, lists and tuples are
+ replaced by their converting alternatives. Strings are checked to
+ see if they have a conversion format and are converted if they do.
+ """
+ if not isinstance(value, ConvertingDict) and isinstance(value, dict):
+ value = ConvertingDict(value)
+ value.configurator = self
+ elif not isinstance(value, ConvertingList) and isinstance(value, list):
+ value = ConvertingList(value)
+ value.configurator = self
+ elif not isinstance(value, ConvertingTuple) and\
+ isinstance(value, tuple):
+ value = ConvertingTuple(value)
+ value.configurator = self
+ elif isinstance(value, basestring): # str for py3k
+ m = self.CONVERT_PATTERN.match(value)
+ if m:
+ d = m.groupdict()
+ prefix = d['prefix']
+ converter = self.value_converters.get(prefix, None)
+ if converter:
+ suffix = d['suffix']
+ converter = getattr(self, converter)
+ value = converter(suffix)
+ return value
+
+ def configure_custom(self, config):
+ """Configure an object with a user-supplied factory."""
+ c = config.pop('()')
+ if (not hasattr(c, '__call__') and hasattr(types, 'ClassType') and
+ type(c) != types.ClassType):
+ c = self.resolve(c)
+ props = config.pop('.', None)
+ # Check for valid identifiers
+ kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
+ result = c(**kwargs)
+ if props:
+ for name, value in props.items():
+ setattr(result, name, value)
+ return result
+
+ def as_tuple(self, value):
+ """Utility function which converts lists to tuples."""
+ if isinstance(value, list):
+ value = tuple(value)
+ return value
+
+
+class DictConfigurator(BaseConfigurator):
+ """
+ Configure logging using a dictionary-like object to describe the
+ configuration.
+ """
+
+ def configure(self):
+ """Do the configuration."""
+
+ config = self.config
+ if 'version' not in config:
+ raise ValueError("dictionary doesn't specify a version")
+ if config['version'] != 1:
+ raise ValueError("Unsupported version: %s" % config['version'])
+ incremental = config.pop('incremental', False)
+ EMPTY_DICT = {}
+ logging._acquireLock()
+ try:
+ if incremental:
+ handlers = config.get('handlers', EMPTY_DICT)
+ # incremental handler config only if handler name
+ # ties in to logging._handlers (Python 2.7)
+ if sys.version_info[:2] == (2, 7):
+ for name in handlers:
+ if name not in logging._handlers:
+ raise ValueError('No handler found with '
+ 'name %r' % name)
+ else:
+ try:
+ handler = logging._handlers[name]
+ handler_config = handlers[name]
+ level = handler_config.get('level', None)
+ if level:
+ handler.setLevel(_checkLevel(level))
+ except StandardError, e:
+ raise ValueError('Unable to configure handler '
+ '%r: %s' % (name, e))
+ loggers = config.get('loggers', EMPTY_DICT)
+ for name in loggers:
+ try:
+ self.configure_logger(name, loggers[name], True)
+ except StandardError, e:
+ raise ValueError('Unable to configure logger '
+ '%r: %s' % (name, e))
+ root = config.get('root', None)
+ if root:
+ try:
+ self.configure_root(root, True)
+ except StandardError, e:
+ raise ValueError('Unable to configure root '
+ 'logger: %s' % e)
+ else:
+ disable_existing = config.pop('disable_existing_loggers', True)
+
+ logging._handlers.clear()
+ del logging._handlerList[:]
+
+ # Do formatters first - they don't refer to anything else
+ formatters = config.get('formatters', EMPTY_DICT)
+ for name in formatters:
+ try:
+ formatters[name] = self.configure_formatter(
+ formatters[name])
+ except StandardError, e:
+ raise ValueError('Unable to configure '
+ 'formatter %r: %s' % (name, e))
+ # Next, do filters - they don't refer to anything else, either
+ filters = config.get('filters', EMPTY_DICT)
+ for name in filters:
+ try:
+ filters[name] = self.configure_filter(filters[name])
+ except StandardError, e:
+ raise ValueError('Unable to configure '
+ 'filter %r: %s' % (name, e))
+
+ # Next, do handlers - they refer to formatters and filters
+ # As handlers can refer to other handlers, sort the keys
+ # to allow a deterministic order of configuration
+ handlers = config.get('handlers', EMPTY_DICT)
+ for name in sorted(handlers):
+ try:
+ handler = self.configure_handler(handlers[name])
+ handler.name = name
+ handlers[name] = handler
+ except StandardError, e:
+ raise ValueError('Unable to configure handler '
+ '%r: %s' % (name, e))
+ # Next, do loggers - they refer to handlers and filters
+
+ # we don't want to lose the existing loggers,
+ # since other threads may have pointers to them.
+ # existing is set to contain all existing loggers,
+ # and as we go through the new configuration we
+ # remove any which are configured. At the end,
+ # what's left in existing is the set of loggers
+ # which were in the previous configuration but
+ # which are not in the new configuration.
+ root = logging.root
+ existing = root.manager.loggerDict.keys()
+ # The list needs to be sorted so that we can
+ # avoid disabling child loggers of explicitly
+ # named loggers. With a sorted list it is easier
+ # to find the child loggers.
+ existing.sort()
+ # We'll keep the list of existing loggers
+ # which are children of named loggers here...
+ child_loggers = []
+ # now set up the new ones...
+ loggers = config.get('loggers', EMPTY_DICT)
+ for name in loggers:
+ if name in existing:
+ i = existing.index(name)
+ prefixed = name + "."
+ pflen = len(prefixed)
+ num_existing = len(existing)
+ i = i + 1 # look at the entry after name
+ while (i < num_existing) and\
+ (existing[i][:pflen] == prefixed):
+ child_loggers.append(existing[i])
+ i = i + 1
+ existing.remove(name)
+ try:
+ self.configure_logger(name, loggers[name])
+ except StandardError, e:
+ raise ValueError('Unable to configure logger '
+ '%r: %s' % (name, e))
+
+ # Disable any old loggers. There's no point deleting
+ # them as other threads may continue to hold references
+ # and by disabling them, you stop them doing any logging.
+ # However, don't disable children of named loggers, as that's
+ # probably not what was intended by the user.
+ for log in existing:
+ logger = root.manager.loggerDict[log]
+ if log in child_loggers:
+ logger.level = logging.NOTSET
+ logger.handlers = []
+ logger.propagate = True
+ elif disable_existing:
+ logger.disabled = True
+
+ # And finally, do the root logger
+ root = config.get('root', None)
+ if root:
+ try:
+ self.configure_root(root)
+ except StandardError, e:
+ raise ValueError('Unable to configure root '
+ 'logger: %s' % e)
+ finally:
+ logging._releaseLock()
+
+ def configure_formatter(self, config):
+ """Configure a formatter from a dictionary."""
+ if '()' in config:
+ factory = config['()'] # for use in exception handler
+ try:
+ result = self.configure_custom(config)
+ except TypeError, te:
+ if "'format'" not in str(te):
+ raise
+ # Name of parameter changed from fmt to format.
+ # Retry with old name.
+ # This is so that code can be used with older Python versions
+ # (e.g. by Django)
+ config['fmt'] = config.pop('format')
+ config['()'] = factory
+ result = self.configure_custom(config)
+ else:
+ fmt = config.get('format', None)
+ dfmt = config.get('datefmt', None)
+ result = logging.Formatter(fmt, dfmt)
+ return result
+
+ def configure_filter(self, config):
+ """Configure a filter from a dictionary."""
+ if '()' in config:
+ result = self.configure_custom(config)
+ else:
+ name = config.get('name', '')
+ result = logging.Filter(name)
+ return result
+
+ def add_filters(self, filterer, filters):
+ """Add filters to a filterer from a list of names."""
+ for f in filters:
+ try:
+ filterer.addFilter(self.config['filters'][f])
+ except StandardError, e:
+ raise ValueError('Unable to add filter %r: %s' % (f, e))
+
+ def configure_handler(self, config):
+ """Configure a handler from a dictionary."""
+ formatter = config.pop('formatter', None)
+ if formatter:
+ try:
+ formatter = self.config['formatters'][formatter]
+ except StandardError, e:
+ raise ValueError('Unable to set formatter '
+ '%r: %s' % (formatter, e))
+ level = config.pop('level', None)
+ filters = config.pop('filters', None)
+ if '()' in config:
+ c = config.pop('()')
+ if (not hasattr(c, '__call__') and
+ hasattr(types, 'ClassType') and
+ type(c) != types.ClassType):
+ c = self.resolve(c)
+ factory = c
+ else:
+ klass = self.resolve(config.pop('class'))
+ # Special case for handler which refers to another handler
+ if issubclass(klass, logging.handlers.MemoryHandler) and\
+ 'target' in config:
+ try:
+ trgt = self.config['handlers'][config['target']]
+ config['target'] = trgt
+ except StandardError, e:
+ raise ValueError('Unable to set target handler '
+ '%r: %s' % (config['target'], e))
+ elif issubclass(klass, logging.handlers.SMTPHandler) and\
+ 'mailhost' in config:
+ config['mailhost'] = self.as_tuple(config['mailhost'])
+ elif issubclass(klass, logging.handlers.SysLogHandler) and\
+ 'address' in config:
+ config['address'] = self.as_tuple(config['address'])
+ factory = klass
+ kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
+ try:
+ result = factory(**kwargs)
+ except TypeError, te:
+ if "'stream'" not in str(te):
+ raise
+ # The argument name changed from strm to stream
+ # Retry with old name.
+ # This is so that code can be used with older Python versions
+ # (e.g. by Django)
+ kwargs['strm'] = kwargs.pop('stream')
+ result = factory(**kwargs)
+ if formatter:
+ result.setFormatter(formatter)
+ if level is not None:
+ result.setLevel(_checkLevel(level))
+ if filters:
+ self.add_filters(result, filters)
+ return result
+
+ def add_handlers(self, logger, handlers):
+ """Add handlers to a logger from a list of names."""
+ for h in handlers:
+ try:
+ logger.addHandler(self.config['handlers'][h])
+ except StandardError, e:
+ raise ValueError('Unable to add handler %r: %s' % (h, e))
+
+ def common_logger_config(self, logger, config, incremental=False):
+ """
+ Perform configuration which is common to root and non-root loggers.
+ """
+ level = config.get('level', None)
+ if level is not None:
+ logger.setLevel(_checkLevel(level))
+ if not incremental:
+ # Remove any existing handlers
+ for h in logger.handlers[:]:
+ logger.removeHandler(h)
+ handlers = config.get('handlers', None)
+ if handlers:
+ self.add_handlers(logger, handlers)
+ filters = config.get('filters', None)
+ if filters:
+ self.add_filters(logger, filters)
+
+ def configure_logger(self, name, config, incremental=False):
+ """Configure a non-root logger from a dictionary."""
+ logger = logging.getLogger(name)
+ self.common_logger_config(logger, config, incremental)
+ propagate = config.get('propagate', None)
+ if propagate is not None:
+ logger.propagate = propagate
+
+ def configure_root(self, config, incremental=False):
+ """Configure a root logger from a dictionary."""
+ root = logging.getLogger()
+ self.common_logger_config(root, config, incremental)
+
+dictConfigClass = DictConfigurator
+
+
+def dictConfig(config):
+ """Configure logging using a dictionary."""
+ dictConfigClass(config).configure()
diff --git a/ryu/services/protocols/bgp/utils/evtlet.py b/ryu/services/protocols/bgp/utils/evtlet.py
new file mode 100644
index 00000000..4dc8a943
--- /dev/null
+++ b/ryu/services/protocols/bgp/utils/evtlet.py
@@ -0,0 +1,140 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Concurrent networking library - Eventlet, based utilities classes.
+"""
+import eventlet
+from eventlet import event
+import logging
+
+LOG = logging.getLogger('utils.evtlet')
+
+
+class EventletIOFactory(object):
+
+ @staticmethod
+ def create_custom_event():
+ LOG.debug('Create CustomEvent called')
+ return CustomEvent()
+
+ @staticmethod
+ def create_looping_call(funct, *args, **kwargs):
+ LOG.debug('create_looping_call called')
+ return LoopingCall(funct, *args, **kwargs)
+
+
+class CustomEvent(object):
+ """Encapsulates eventlet event to provide a event which can recur.
+
+ It has the same interface as threading.Event but works for eventlet.
+ """
+ def __init__(self,):
+ self._event = event.Event()
+ self._is_set = False
+
+ def is_set(self):
+ """Return true if and only if the internal flag is true."""
+ return self._is_set
+
+ def set(self):
+ """Set the internal flag to true.
+
+ All threads waiting for it to become true are awakened.
+ Threads that call wait() once the flag is true will not block at all.
+ """
+ if self._event and not self._event.ready():
+ self._event.send()
+ self._is_set = True
+
+ def clear(self):
+ """Reset the internal flag to false.
+
+ Subsequently, threads calling wait() will block until set() is called
+ to set the internal flag to true again.
+ """
+ if self._is_set:
+ self._is_set = False
+ self._event = event.Event()
+
+ def wait(self):
+ """Block until the internal flag is true.
+
+ If the internal flag is true on entry, return immediately. Otherwise,
+ block until another thread calls set() to set the flag to true, or
+ until the optional timeout occurs.
+ """
+ if not self._is_set:
+ self._event.wait()
+
+
+class LoopingCall(object):
+ """Call a function repeatedly.
+ """
+ def __init__(self, funct, *args, **kwargs):
+ self._funct = funct
+ self._args = args
+ self._kwargs = kwargs
+ self._running = False
+ self._interval = 0
+ self._self_thread = None
+
+ @property
+ def running(self):
+ return self._running
+
+ @property
+ def interval(self):
+ return self._interval
+
+ def __call__(self):
+ if self._running:
+ # Schedule next iteration of the call.
+ self._self_thread = eventlet.spawn_after(self._interval, self)
+ self._funct(*self._args, **self._kwargs)
+
+ def start(self, interval, now=True):
+ """Start running pre-set function every interval seconds.
+ """
+ if interval < 0:
+ raise ValueError('interval must be >= 0')
+
+ if self._running:
+ self.stop()
+
+ self._running = True
+ self._interval = interval
+ if now:
+ self._self_thread = eventlet.spawn_after(0, self)
+ else:
+ self._self_thread = eventlet.spawn_after(self._interval, self)
+
+ def stop(self):
+ """Stop running scheduled function.
+ """
+ self._running = False
+ if self._self_thread is not None:
+ self._self_thread.cancel()
+ self._self_thread = None
+
+ def reset(self):
+ """Skip the next iteration and reset timer.
+ """
+ if self._self_thread is not None:
+ # Cancel currently scheduled call
+ self._self_thread.cancel()
+ self._self_thread = None
+ # Schedule a new call
+ self._self_thread = eventlet.spawn_after(self._interval, self)
diff --git a/ryu/services/protocols/bgp/utils/internable.py b/ryu/services/protocols/bgp/utils/internable.py
new file mode 100644
index 00000000..ae39798c
--- /dev/null
+++ b/ryu/services/protocols/bgp/utils/internable.py
@@ -0,0 +1,102 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import weakref
+
+dict_name = intern('_internable_dict')
+
+
+#
+# Internable
+#
+class Internable(object):
+ """Class that allows instances to be 'interned'. That is, given an
+ instance of this class, one can obtain a canonical (interned)
+ copy.
+
+ This saves memory when there are likely to be many identical
+ instances of the class -- users hold references to a single
+ interned object instead of references to different objects that
+ are identical.
+
+ The interned version of a given instance is created on demand if
+ necessary, and automatically cleaned up when nobody holds a
+ reference to it.
+
+ Instances of sub-classes must be usable as dictionary keys for
+ Internable to work.
+ """
+
+ class Stats(object):
+
+ def __init__(self):
+ self.d = {}
+
+ def incr(self, name):
+ self.d[name] = self.d.get(name, 0) + 1
+
+ def __repr__(self):
+ return repr(self.d)
+
+ def __str__(self):
+ return str(self.d)
+
+ @classmethod
+ def _internable_init(kls):
+ # Objects to be interned are held as keys in a dictionary that
+ # only holds weak references to keys. As a result, when the
+ # last reference to an interned object goes away, the object
+ # will be removed from the dictionary.
+ kls._internable_dict = weakref.WeakKeyDictionary()
+ kls._internable_stats = Internable.Stats()
+
+ @classmethod
+ def intern_stats(kls):
+ return kls._internable_stats
+
+ def intern(self):
+ """Returns either itself or a canonical copy of itself."""
+
+ # If this is an interned object, return it
+ if hasattr(self, '_interned'):
+ return self._internable_stats.incr('self')
+
+ #
+ # Got to find or create an interned object identical to this
+ # one. Auto-initialize the class if need be.
+ #
+ kls = self.__class__
+
+ if not hasattr(kls, dict_name):
+ kls._internable_init()
+
+ obj = kls._internable_dict.get(self)
+ if (obj):
+ # Found an interned copy.
+ kls._internable_stats.incr('found')
+ return obj
+
+ # Create an interned copy. Take care to only keep a weak
+ # reference to the object itself.
+ def object_collected(obj):
+ kls._internable_stats.incr('collected')
+ # print("Object %s garbage collected" % obj)
+ pass
+
+ ref = weakref.ref(self, object_collected)
+ kls._internable_dict[self] = ref
+ self._interned = True
+ kls._internable_stats.incr('inserted')
+ return self
diff --git a/ryu/services/protocols/bgp/utils/logs.py b/ryu/services/protocols/bgp/utils/logs.py
new file mode 100644
index 00000000..6d94ce19
--- /dev/null
+++ b/ryu/services/protocols/bgp/utils/logs.py
@@ -0,0 +1,35 @@
+import json
+import logging
+import time
+
+from datetime import datetime
+
+
+class ApgwFormatter(logging.Formatter):
+ LOG_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
+ COMPONENT_NAME = 'BGPSpeaker'
+
+ def format(self, record):
+ msg = {
+ 'component_name': self.COMPONENT_NAME,
+ 'timestamp': datetime.utcfromtimestamp(
+ time.time()
+ ).strftime(self.LOG_TIME_FORMAT),
+ 'msg': unicode(record.msg),
+ 'level': record.levelname
+
+ }
+
+ if hasattr(record, 'log_type'):
+ assert record.log_type in ('log', 'stats', 'state')
+ msg['log_type'] = record.log_type
+ else:
+ msg['log_type'] = 'log'
+ if hasattr(record, 'resource_id'):
+ msg['resource_id'] = record.resource_id
+ if hasattr(record, 'resource_name'):
+ msg['resource_name'] = record.resource_name
+
+ record.msg = json.dumps(msg)
+
+ return super(ApgwFormatter, self).format(record)
diff --git a/ryu/services/protocols/bgp/utils/other.py b/ryu/services/protocols/bgp/utils/other.py
new file mode 100644
index 00000000..94f849a8
--- /dev/null
+++ b/ryu/services/protocols/bgp/utils/other.py
@@ -0,0 +1,11 @@
+def bytes2hex(given_bytes):
+ return ''.join(["%02X " % ord(x) for x in given_bytes]).strip()
+
+
+def hex2byte(given_hex):
+ given_hex = ''.join(given_hex.split())
+ result = []
+ for offset in range(0, len(given_hex), 2):
+ result.append(chr(int(given_hex[offset:offset + 2], 16)))
+
+ return ''.join(result)
diff --git a/ryu/services/protocols/bgp/utils/rtfilter.py b/ryu/services/protocols/bgp/utils/rtfilter.py
new file mode 100644
index 00000000..5209dd01
--- /dev/null
+++ b/ryu/services/protocols/bgp/utils/rtfilter.py
@@ -0,0 +1,219 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Module for RT Filter related functionality.
+"""
+import logging
+
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RF_RTC_UC
+from ryu.services.protocols.bgp.protocols.bgp.nlri import RtNlri
+from ryu.services.protocols.bgp.protocols.bgp.pathattr import AsPath
+from ryu.services.protocols.bgp.protocols.bgp.pathattr import Origin
+from ryu.services.protocols.bgp.base import OrderedDict
+from ryu.services.protocols.bgp.info_base.rtc import RtcPath
+
+LOG = logging.getLogger('bgpspeaker.util.rtfilter')
+
+
+class RouteTargetManager(object):
+ def __init__(self, core_service, neighbors_conf, vrfs_conf):
+ self._core_service = core_service
+ # TODO(PH): Consider extending VrfsConfListener and
+ # NeighborsConfListener
+ self._neighbors_conf = neighbors_conf
+ self._vrfs_conf = vrfs_conf
+
+ # Peer to its current RT filter map
+ # <key>/value = <peer_ip>/<rt filter set>
+ self._peer_to_rtfilter_map = {}
+
+ # Collection of import RTs of all configured VRFs
+ self._all_vrfs_import_rts_set = set()
+
+ # Collection of current RTC AS for all configured Neighbors
+ self._all_rtc_as_set = set()
+ # Interested RTs according to current entries in RTC global table
+ self._global_interested_rts = set()
+
+ @property
+ def peer_to_rtfilter_map(self):
+ return self._peer_to_rtfilter_map.copy()
+
+ @peer_to_rtfilter_map.setter
+ def peer_to_rtfilter_map(self, new_map):
+ self._peer_to_rtfilter_map = new_map.copy()
+
+ @property
+ def global_interested_rts(self):
+ return set(self._global_interested_rts)
+
+ def add_rt_nlri(self, route_target, is_withdraw=False):
+ assert route_target
+ # Since we allow RTC AS setting for each neighbor, we collect all we
+ # collect allRTC AS settings and add a RT NLRI using each AS number
+ rtc_as_set = set()
+ # Add RT NLRI with local AS
+ rtc_as_set.add(self._core_service.asn)
+ # Collect RTC AS from neighbor settings
+ rtc_as_set.update(self._neighbors_conf.rtc_as_set)
+ # Add RT NLRI path (withdraw) for each RTC AS
+ for rtc_as in rtc_as_set:
+ self._add_rt_nlri_for_as(rtc_as, route_target, is_withdraw)
+
+ def _add_rt_nlri_for_as(self, rtc_as, route_target, is_withdraw=False):
+ from ryu.services.protocols.bgp.speaker.core import EXPECTED_ORIGIN
+ rt_nlri = RtNlri(rtc_as, route_target)
+ # Create a dictionary for path-attrs.
+ pattrs = OrderedDict()
+ if not is_withdraw:
+ # MpReachNlri and/or MpUnReachNlri attribute info. is contained
+ # in the path. Hence we do not add these attributes here.
+ pattrs[Origin.ATTR_NAME] = Origin(EXPECTED_ORIGIN)
+ pattrs[AsPath.ATTR_NAME] = AsPath([])
+
+ # Create Path instance and initialize appropriately.
+ path = RtcPath(None, rt_nlri, 0, is_withdraw=is_withdraw,
+ pattrs=pattrs)
+ tm = self._core_service.table_manager
+ tm.learn_path(path)
+
+ def update_rtc_as_set(self):
+ """Syncs RT NLRIs for new and removed RTC_ASes.
+
+ This method should be called when a neighbor is added or removed.
+ """
+ # Compute the diffs in RTC_ASes
+ curr_rtc_as_set = self._neighbors_conf.rtc_as_set
+ # Always add local AS to RTC AS set
+ curr_rtc_as_set.add(self._core_service.asn)
+ removed_rtc_as_set = self._all_rtc_as_set - curr_rtc_as_set
+ new_rtc_as_set = curr_rtc_as_set - self._all_rtc_as_set
+
+ # Update to new RTC_AS set
+ self._all_rtc_as_set = curr_rtc_as_set
+
+ # Sync RT NLRI by adding/withdrawing as appropriate
+ for new_rtc_as in new_rtc_as_set:
+ for import_rt in self._all_vrfs_import_rts_set:
+ self._add_rt_nlri_for_as(new_rtc_as, import_rt)
+ for removed_rtc_as in removed_rtc_as_set:
+ for import_rt in self._all_vrfs_import_rts_set:
+ self._add_rt_nlri_for_as(removed_rtc_as, import_rt,
+ is_withdraw=True)
+
+ def update_local_rt_nlris(self):
+ """Does book-keeping of local RT NLRIs based on all configured VRFs.
+
+ Syncs all import RTs and RT NLRIs.
+ The method should be called when any VRFs are added/removed/changed.
+ """
+ current_conf_import_rts = set()
+ for vrf in self._vrfs_conf.vrf_confs:
+ current_conf_import_rts.update(vrf.import_rts)
+
+ removed_rts = self._all_vrfs_import_rts_set - current_conf_import_rts
+ new_rts = current_conf_import_rts - self._all_vrfs_import_rts_set
+ self._all_vrfs_import_rts_set = current_conf_import_rts
+
+ # Add new and withdraw removed local RtNlris
+ for new_rt in new_rts:
+ self.add_rt_nlri(new_rt)
+ for removed_rt in removed_rts:
+ self.add_rt_nlri(removed_rt, is_withdraw=True)
+
+ def on_rt_filter_chg_sync_peer(self, peer, new_rts, old_rts, table):
+ LOG.debug('RT Filter changed for peer %s, new_rts %s, old_rts %s ' %
+ (peer, new_rts, old_rts))
+ for dest in table.itervalues():
+ # If this destination does not have best path, we ignore it
+ if not dest.best_path:
+ continue
+
+ desired_rts = set(dest.best_path.get_rts())
+
+ # If this path was sent to peer and if all path RTs are now not of
+ # interest, we need to send withdraw for this path to this peer
+ if dest.was_sent_to(peer):
+ if not (desired_rts - old_rts):
+ dest.withdraw_if_sent_to(peer)
+ else:
+ # New RT could be Default RT, which means we need to share this
+ # path
+ desired_rts.add(RtNlri.DEFAULT_RT)
+ # If we have RT filter has new RTs that are common with path
+ # RTs, then we send this path to peer
+ if desired_rts.intersection(new_rts):
+ peer.communicate_path(dest.best_path)
+
+ def _compute_global_intrested_rts(self):
+ """Computes current global interested RTs for global tables.
+
+ Computes interested RTs based on current RT filters for peers. This
+ filter should be used to check if for RTs on a path that is installed
+ in any global table (expect RT Table).
+ """
+ interested_rts = set()
+ for rtfilter in self._peer_to_rtfilter_map.values():
+ interested_rts.update(rtfilter)
+
+ interested_rts.update(self._vrfs_conf.vrf_interested_rts)
+ # Remove default RT as it is not a valid RT for paths
+ # TODO(PH): Check if we have better alternative than add and remove
+ interested_rts.add(RtNlri.DEFAULT_RT)
+ interested_rts.remove(RtNlri.DEFAULT_RT)
+ return interested_rts
+
+ def update_interested_rts(self):
+ """Updates interested RT list.
+
+ Check if interested RTs have changes from previous check.
+ Takes appropriate action for new interesting RTs and removal of un-
+ interesting RTs.
+ """
+ prev_global_rts = self._global_interested_rts
+ curr_global_rts = self._compute_global_intrested_rts()
+
+ new_global_rts = curr_global_rts - prev_global_rts
+ removed_global_rts = prev_global_rts - curr_global_rts
+
+ # Update current interested RTs for next iteration
+ self._global_interested_rts = curr_global_rts
+
+ LOG.debug('Global Interested RT changed, new RTs %s, removed RTs %s' %
+ (new_global_rts, removed_global_rts))
+ tm = self._core_service.table_manager
+ tm.on_interesting_rts_change(new_global_rts, removed_global_rts)
+
+ def filter_by_origin_as(self, new_best_path, qualified_peers):
+ path_rf = new_best_path.route_family
+
+ # We need not filter any peer if this is not a RT NLRI path or if
+ # source of this path is remote peer (i.e. if this is not a local path)
+ if path_rf != RF_RTC_UC or new_best_path.source is not None:
+ return qualified_peers
+ else:
+ filtered_qualified_peers = []
+ rt_origin_as = new_best_path.nlri.origin_as
+ # Collect peers whose RTC_AS setting match paths RT Origin AS
+ for qualified_peer in qualified_peers:
+ neigh_conf = \
+ self._neighbors_conf.get_neighbor_conf(
+ qualified_peer.ip_address)
+ if neigh_conf.rtc_as == rt_origin_as:
+ filtered_qualified_peers.append(qualified_peer)
+
+ # Update qualified peers to include only filtered peers
+ return filtered_qualified_peers
diff --git a/ryu/services/protocols/bgp/utils/stats.py b/ryu/services/protocols/bgp/utils/stats.py
new file mode 100644
index 00000000..7cd2a116
--- /dev/null
+++ b/ryu/services/protocols/bgp/utils/stats.py
@@ -0,0 +1,100 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Module for stats related classes and utilities.
+"""
+import datetime
+import json
+import logging
+import time
+
+from ryu.services.protocols.bgp.rtconf.base import ConfWithId
+
+
+_STATS_LOGGER = logging.getLogger('stats')
+
+# Various stats related constants.
+DEFAULT_LOG_LEVEL = logging.INFO
+
+RESOURCE_ID = 'resource_id'
+RESOURCE_NAME = 'resource_name'
+TIMESTAMP = 'timestamp'
+LOG_LEVEL = 'log_level'
+
+STATS_RESOURCE = 'stats_resource'
+STATS_SOURCE = 'stats_source'
+
+# VRF related stat constants
+REMOTE_ROUTES = 'remote_routes'
+LOCAL_ROUTES = 'local_routes'
+
+# Peer related stat constant.
+UPDATE_MSG_IN = 'update_message_in'
+UPDATE_MSG_OUT = 'update_message_out'
+TOTAL_MSG_IN = 'total_message_in'
+TOTAL_MSG_OUT = 'total_message_out'
+FMS_EST_TRANS = 'fsm_established_transitions'
+UPTIME = 'uptime'
+
+
+def log(stats_resource=None, stats_source=None, log_level=DEFAULT_LOG_LEVEL,
+ **kwargs):
+ """Utility to log given stats to *stats* logger.
+
+ Stats to log are given by `stats_source` and in its absence we log
+ `kwargs`. *stats* logger is configured independently from any logger.
+ Only stats should be logged to this logger. Will add current timestamp
+ to the logged stats if not given.
+
+ Parameters:
+ - `stats_resource`: any object that complies with `id` and `name`
+ attrs.
+ - `stats_source`: any callable that give a `dict` that will be
+ logged to *stats* logger.
+ - `log_level`: str representing level at which to log this stats
+ message.
+ - `**kwargs`: if `stats_source` is not given, we log this `dict`.
+ """
+
+ # Get stats from source if given.
+ if stats_source is not None:
+ kwargs = stats_source()
+
+ if stats_resource is None:
+ if RESOURCE_ID not in kwargs or RESOURCE_NAME not in kwargs:
+ raise ValueError('Missing required stats labels.')
+ else:
+ if not (hasattr(stats_resource, ConfWithId.ID) and
+ hasattr(stats_resource, ConfWithId.NAME)):
+ raise ValueError('Given stats source is missing id or name'
+ ' attributes.')
+ kwargs[RESOURCE_ID] = stats_resource.id
+ kwargs[RESOURCE_NAME] = stats_resource.name
+
+ if TIMESTAMP not in kwargs:
+ kwargs[TIMESTAMP] = datetime.datetime.utcfromtimestamp(
+ time.time()).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+
+ _STATS_LOGGER.log(log_level,
+ json.dumps(kwargs))
+
+
+def logd(**kwargs):
+ log(log_level=logging.DEBUG, **kwargs)
+
+
+def logi(**kwargs):
+ log(log_level=logging.INFO, **kwargs)
diff --git a/ryu/services/protocols/bgp/utils/validation.py b/ryu/services/protocols/bgp/utils/validation.py
new file mode 100644
index 00000000..6359183d
--- /dev/null
+++ b/ryu/services/protocols/bgp/utils/validation.py
@@ -0,0 +1,234 @@
+# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+ Module provides utilities for validation.
+"""
+import socket
+
+
+def is_valid_ipv4(ipv4):
+ """Returns True if given is a valid ipv4 address.
+
+ Given value should be a dot-decimal notation string.
+ """
+ valid = True
+
+ if not isinstance(ipv4, str):
+ valid = False
+ else:
+ try:
+ a, b, c, d = map(lambda x: int(x), ipv4.split('.'))
+ if (a < 0 or a > 255 or b < 0 or b > 255 or c < 0 or c > 255 or
+ d < 0 or d > 255):
+ valid = False
+ except ValueError:
+ valid = False
+
+ return valid
+
+
+def is_valid_ipv4_prefix(ipv4_prefix):
+ """Returns True if *ipv4_prefix* is a valid prefix with mask.
+
+ Samples:
+ - valid prefix: 1.1.1.0/32, 244.244.244.1/10
+ - invalid prefix: 255.2.2.2/2, 2.2.2/22, etc.
+ """
+ if not isinstance(ipv4_prefix, str):
+ return False
+
+ valid = True
+ tokens = ipv4_prefix.split('/')
+ if len(tokens) != 2:
+ valid = False
+ else:
+ if not is_valid_ipv4(tokens[0]):
+ valid = False
+ else:
+ # Validate mask
+ try:
+ # Mask is a number
+ mask = int(tokens[1])
+ # Mask is number between 0 to 32
+ if mask < 0 or mask > 32:
+ valid = False
+ except ValueError:
+ valid = False
+
+ return valid
+
+
+def is_valid_ipv6(ipv6):
+ """Returns True if given `ipv6` is a valid IPv6 address
+
+ Uses `socket.inet_pton` to determine validity.
+ """
+ valid = True
+ try:
+ socket.inet_pton(socket.AF_INET6, ipv6)
+ except socket.error:
+ valid = False
+
+ return valid
+
+
+def is_valid_ipv6_prefix(ipv6_prefix):
+ """Returns True if given `ipv6_prefix` is a valid IPv6 prefix."""
+
+ # Validate input type
+ if not isinstance(ipv6_prefix, str):
+ return False
+
+ valid = True
+ tokens = ipv6_prefix.split('/')
+ if len(tokens) != 2:
+ valid = False
+ else:
+ if not is_valid_ipv6(tokens[0]):
+ valid = False
+ else:
+ # Validate mask
+ try:
+ # Mask is a number
+ mask = int(tokens[1])
+ # Mask is number between 0 to 128
+ if mask < 0 or mask > 128:
+ valid = False
+ except ValueError:
+ valid = False
+
+ return valid
+
+
+def is_valid_old_asn(asn):
+ """Returns true if given asn is a 16 bit number.
+
+ Old AS numbers are 16 but unsigned number.
+ """
+ valid = True
+ # AS number should be a 16 bit number
+ if (not isinstance(asn, (int, long)) or (asn < 0) or
+ (asn > ((2 ** 16) - 1))):
+ valid = False
+
+ return valid
+
+
+def is_valid_vpnv4_prefix(prefix):
+ """Returns True if given prefix is a string represent vpnv4 prefix.
+
+ Vpnv4 prefix is made up of RD:Ipv4, where RD is represents route
+ distinguisher and Ipv4 represents valid dot-decimal ipv4 notation string.
+ """
+ valid = True
+
+ if not isinstance(prefix, str):
+ valid = False
+ else:
+ # Split the prefix into route distinguisher and IP
+ tokens = prefix.split(':')
+ if len(tokens) != 3:
+ valid = False
+ else:
+ # Check if first two tokens can form a valid RD
+ try:
+ # admin_subfield
+ int(tokens[0])
+ # assigned_subfield
+ int(tokens[1])
+ except ValueError:
+ valid = False
+
+ # Check if ip part is valid
+ valid = is_valid_ipv4_prefix(tokens[2])
+
+ return valid
+
+
+def is_valid_med(med):
+ """Returns True if value of *med* is valid as per RFC.
+
+ According to RFC MED is a four octet non-negative integer.
+ """
+ valid = True
+
+ if not isinstance(med, (int, long)):
+ valid = False
+ else:
+ if med < 0 or med > (2 ** 32) - 1:
+ valid = False
+
+ return valid
+
+
+def is_valid_mpls_label(label):
+ """Validates `label` according to MPLS label rules
+
+ RFC says:
+ This 20-bit field.
+ A value of 0 represents the "IPv4 Explicit NULL Label".
+ A value of 1 represents the "Router Alert Label".
+ A value of 2 represents the "IPv6 Explicit NULL Label".
+ A value of 3 represents the "Implicit NULL Label".
+ Values 4-15 are reserved.
+ """
+ valid = True
+
+ if (not isinstance(label, (int, long)) or
+ (label >= 4 and label <= 15) or
+ (label < 0 or label > 2 ** 20)):
+ valid = False
+
+ return valid
+
+
+def is_valid_route_disc(route_disc):
+ """Validates *route_disc* as string representation of route distinguisher.
+
+ Returns True if *route_disc* is as per our convention of RD, else False.
+ Our convention is to represent RD as a string in format:
+ *admin_sub_field:assigned_num_field* and *admin_sub_field* can be valid
+ IPv4 string representation.
+ Valid examples: '65000:222', '1.2.3.4:4432'.
+ Invalid examples: '1.11.1: 333'
+ """
+ # TODO(PH): Provide complete implementation.
+ return is_valid_ext_comm_attr(route_disc)
+
+
+def is_valid_ext_comm_attr(attr):
+ """Validates *attr* as string representation of RT or SOO.
+
+ Returns True if *attr* is as per our convention of RT or SOO, else
+ False. Our convention is to represent RT/SOO is a string with format:
+ *global_admin_part:local_admin_path*
+ """
+ is_valid = True
+
+ if not isinstance(attr, str):
+ is_valid = False
+ else:
+ first, second = attr.split(':')
+ try:
+ if '.' in first:
+ socket.inet_aton(first)
+ else:
+ int(first)
+ int(second)
+ except (ValueError, socket.error):
+ is_valid = False
+
+ return is_valid