summaryrefslogtreecommitdiffhomepage
path: root/tests/integrated
diff options
context:
space:
mode:
Diffstat (limited to 'tests/integrated')
-rw-r--r--tests/integrated/__init__.py0
-rw-r--r--tests/integrated/bgp/__init__.py0
-rw-r--r--tests/integrated/bgp/base.py84
-rw-r--r--tests/integrated/bgp/base_ip6.py84
-rw-r--r--tests/integrated/bgp/test_basic.py49
-rw-r--r--tests/integrated/bgp/test_ip6_basic.py49
-rw-r--r--tests/integrated/common/__init__.py0
-rw-r--r--tests/integrated/common/docker_base.py801
-rw-r--r--tests/integrated/common/install_docker_test_pkg.sh43
-rw-r--r--tests/integrated/common/install_docker_test_pkg_common.sh39
-rw-r--r--tests/integrated/common/install_docker_test_pkg_for_travis.sh12
-rw-r--r--tests/integrated/common/quagga.py332
-rw-r--r--tests/integrated/common/ryubgp.py212
-rw-r--r--tests/integrated/run_test.py54
-rwxr-xr-xtests/integrated/run_tests_with_ovs12.py110
-rw-r--r--tests/integrated/test_add_flow_v10.py258
-rw-r--r--tests/integrated/test_add_flow_v12_actions.py496
-rw-r--r--tests/integrated/test_add_flow_v12_matches.py1200
-rw-r--r--tests/integrated/test_of_config.py328
-rw-r--r--tests/integrated/test_request_reply_v12.py1055
-rw-r--r--tests/integrated/test_vrrp_linux_multi.py103
-rw-r--r--tests/integrated/test_vrrp_linux_multi.sh34
-rw-r--r--tests/integrated/test_vrrp_multi.py143
-rw-r--r--tests/integrated/test_vrrp_multi.sh42
-rw-r--r--tests/integrated/tester.py214
-rw-r--r--tests/integrated/vrrp_common.py221
26 files changed, 5963 insertions, 0 deletions
diff --git a/tests/integrated/__init__.py b/tests/integrated/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/integrated/__init__.py
diff --git a/tests/integrated/bgp/__init__.py b/tests/integrated/bgp/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/integrated/bgp/__init__.py
diff --git a/tests/integrated/bgp/base.py b/tests/integrated/bgp/base.py
new file mode 100644
index 00000000..9d057075
--- /dev/null
+++ b/tests/integrated/bgp/base.py
@@ -0,0 +1,84 @@
+# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
+# Copyright (C) 2016 Fumihiko Kakuma <kakuma at valinux co jp>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import sys
+import unittest
+
+from tests.integrated.common import docker_base as ctn_base
+from tests.integrated.common import ryubgp
+from tests.integrated.common import quagga
+
+
+LOG = logging.getLogger(__name__)
+
+
+class BgpSpeakerTestBase(unittest.TestCase):
+ images = []
+ containers = []
+ bridges = []
+ checktime = 120
+
+ @classmethod
+ def setUpClass(cls):
+ cls.brdc1 = ctn_base.Bridge(name='brdc1',
+ subnet='192.168.10.0/24')
+ cls.bridges.append(cls.brdc1)
+
+ cls.dockerimg = ctn_base.DockerImage()
+ image = 'python:%d.%d' % (
+ sys.version_info.major, sys.version_info.minor)
+ cls.r_img = cls.dockerimg.create_ryu(image=image, check_exist=True)
+ cls.images.append(cls.r_img)
+ cls.q_img = 'osrg/quagga'
+ cls.images.append(cls.q_img)
+
+ cls.r1 = ryubgp.RyuBGPContainer(name='r1', asn=64512,
+ router_id='192.168.0.1',
+ ctn_image_name=cls.r_img)
+ cls.containers.append(cls.r1)
+ cls.r1.add_route('10.10.0.0/28')
+ cls.r1.run(wait=True)
+ cls.r1_ip_cidr = cls.brdc1.addif(cls.r1)
+ cls.r1_ip = cls.r1_ip_cidr.split('/')[0]
+
+ cls.q1 = quagga.QuaggaBGPContainer(name='q1', asn=64522,
+ router_id='192.168.0.2',
+ ctn_image_name=cls.q_img)
+ cls.containers.append(cls.q1)
+ cls.q1.add_route('192.168.160.0/24')
+ cls.q1.run(wait=True)
+ cls.q1_ip_cidr = cls.brdc1.addif(cls.q1)
+ cls.q1_ip = cls.q1_ip_cidr.split('/')[0]
+
+ cls.r1.add_peer(cls.q1, bridge=cls.brdc1.name)
+ cls.q1.add_peer(cls.r1, bridge=cls.brdc1.name)
+
+ super(BgpSpeakerTestBase, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ for ctn in cls.containers:
+ try:
+ ctn.stop()
+ except ctn_base.CommandError as e:
+ LOG.exception('Exception when stopping containers: %s', e)
+ ctn.remove()
+ for br in cls.bridges:
+ br.delete()
+ super(BgpSpeakerTestBase, cls).tearDownClass()
diff --git a/tests/integrated/bgp/base_ip6.py b/tests/integrated/bgp/base_ip6.py
new file mode 100644
index 00000000..8d3ef419
--- /dev/null
+++ b/tests/integrated/bgp/base_ip6.py
@@ -0,0 +1,84 @@
+# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
+# Copyright (C) 2016 Fumihiko Kakuma <kakuma at valinux co jp>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import sys
+import unittest
+
+from tests.integrated.common import docker_base as ctn_base
+from tests.integrated.common import ryubgp
+from tests.integrated.common import quagga
+
+
+LOG = logging.getLogger(__name__)
+
+
+class BgpSpeakerTestBase(unittest.TestCase):
+ images = []
+ containers = []
+ bridges = []
+ checktime = 120
+
+ @classmethod
+ def setUpClass(cls):
+ cls.brdc1 = ctn_base.Bridge(name='brip6dc1',
+ subnet='2001:10::/32')
+ cls.bridges.append(cls.brdc1)
+
+ cls.dockerimg = ctn_base.DockerImage()
+ image = 'python:%d.%d' % (
+ sys.version_info.major, sys.version_info.minor)
+ cls.r_img = cls.dockerimg.create_ryu(image=image, check_exist=True)
+ cls.images.append(cls.r_img)
+ cls.q_img = 'osrg/quagga'
+ cls.images.append(cls.q_img)
+
+ cls.r1 = ryubgp.RyuBGPContainer(name='r1', asn=64512,
+ router_id='192.168.0.1',
+ ctn_image_name=cls.r_img)
+ cls.containers.append(cls.r1)
+ cls.r1.add_route('fc00:10::/64', route_info={'rf': 'ipv6'})
+ cls.r1.run(wait=True)
+ cls.r1_ip_cidr = cls.brdc1.addif(cls.r1)
+ cls.r1_ip = cls.r1_ip_cidr.split('/')[0]
+
+ cls.q1 = quagga.QuaggaBGPContainer(name='q1', asn=64522,
+ router_id='192.168.0.2',
+ ctn_image_name=cls.q_img)
+ cls.containers.append(cls.q1)
+ cls.q1.add_route('fc00:100::/64', route_info={'rf': 'ipv6'})
+ cls.q1.run(wait=True)
+ cls.q1_ip_cidr = cls.brdc1.addif(cls.q1)
+ cls.q1_ip = cls.q1_ip_cidr.split('/')[0]
+
+ cls.r1.add_peer(cls.q1, bridge=cls.brdc1.name, v6=True)
+ cls.q1.add_peer(cls.r1, bridge=cls.brdc1.name, v6=True)
+
+ super(BgpSpeakerTestBase, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ for ctn in cls.containers:
+ try:
+ ctn.stop()
+ except ctn_base.CommandError as e:
+ LOG.exception('Exception when stopping containers: %s', e)
+ ctn.remove()
+ for br in cls.bridges:
+ br.delete()
+ super(BgpSpeakerTestBase, cls).tearDownClass()
diff --git a/tests/integrated/bgp/test_basic.py b/tests/integrated/bgp/test_basic.py
new file mode 100644
index 00000000..9ef430c7
--- /dev/null
+++ b/tests/integrated/bgp/test_basic.py
@@ -0,0 +1,49 @@
+# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
+# Copyright (C) 2016 Fumihiko Kakuma <kakuma at valinux co jp>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import time
+
+from tests.integrated.common import docker_base as ctn_base
+from . import base
+
+
+class BgpSpeakerBasicTest(base.BgpSpeakerTestBase):
+ def setUp(self):
+ super(BgpSpeakerBasicTest, self).setUp()
+ self.r1.stop_ryubgp(retry=True)
+ self.r1.start_ryubgp(retry=True)
+
+ def test_check_neighbor_established(self):
+ neighbor_state = ctn_base.BGP_FSM_IDLE
+ for _ in range(0, self.checktime):
+ neighbor_state = self.q1.get_neighbor_state(self.r1)
+ if neighbor_state == ctn_base.BGP_FSM_ESTABLISHED:
+ break
+ time.sleep(1)
+ self.assertEqual(neighbor_state, ctn_base.BGP_FSM_ESTABLISHED)
+
+ def test_check_rib_nexthop(self):
+ neighbor_state = ctn_base.BGP_FSM_IDLE
+ for _ in range(0, self.checktime):
+ neighbor_state = self.q1.get_neighbor_state(self.r1)
+ if neighbor_state == ctn_base.BGP_FSM_ESTABLISHED:
+ break
+ time.sleep(1)
+ self.assertEqual(neighbor_state, ctn_base.BGP_FSM_ESTABLISHED)
+ rib = self.q1.get_global_rib(prefix='10.10.0.0/28')
+ self.assertEqual(self.r1_ip, rib[0]['nexthop'])
diff --git a/tests/integrated/bgp/test_ip6_basic.py b/tests/integrated/bgp/test_ip6_basic.py
new file mode 100644
index 00000000..a92e4edb
--- /dev/null
+++ b/tests/integrated/bgp/test_ip6_basic.py
@@ -0,0 +1,49 @@
+# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
+# Copyright (C) 2016 Fumihiko Kakuma <kakuma at valinux co jp>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import time
+
+from tests.integrated.common import docker_base as ctn_base
+from . import base_ip6 as base
+
+
+class BgpSpeakerBasicTest(base.BgpSpeakerTestBase):
+ def setUp(self):
+ super(BgpSpeakerBasicTest, self).setUp()
+ self.r1.stop_ryubgp(retry=True)
+ self.r1.start_ryubgp(retry=True)
+
+ def test_check_neighbor_established(self):
+ neighbor_state = ctn_base.BGP_FSM_IDLE
+ for _ in range(0, self.checktime):
+ neighbor_state = self.q1.get_neighbor_state(self.r1)
+ if neighbor_state == ctn_base.BGP_FSM_ESTABLISHED:
+ break
+ time.sleep(1)
+ self.assertEqual(neighbor_state, ctn_base.BGP_FSM_ESTABLISHED)
+
+ def test_check_rib_nexthop(self):
+ neighbor_state = ctn_base.BGP_FSM_IDLE
+ for _ in range(0, self.checktime):
+ neighbor_state = self.q1.get_neighbor_state(self.r1)
+ if neighbor_state == ctn_base.BGP_FSM_ESTABLISHED:
+ break
+ time.sleep(1)
+ self.assertEqual(neighbor_state, ctn_base.BGP_FSM_ESTABLISHED)
+ rib = self.q1.get_global_rib(prefix='fc00:10::/64', rf='ipv6')
+ self.assertEqual(self.r1_ip, rib[0]['nexthop'])
diff --git a/tests/integrated/common/__init__.py b/tests/integrated/common/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/integrated/common/__init__.py
diff --git a/tests/integrated/common/docker_base.py b/tests/integrated/common/docker_base.py
new file mode 100644
index 00000000..1ae2cc27
--- /dev/null
+++ b/tests/integrated/common/docker_base.py
@@ -0,0 +1,801 @@
+# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
+#
+# This is based on the following
+# https://github.com/osrg/gobgp/test/lib/base.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import itertools
+import logging
+import os
+import subprocess
+import time
+
+import netaddr
+import six
+
+LOG = logging.getLogger(__name__)
+
+DEFAULT_TEST_PREFIX = ''
+DEFAULT_TEST_BASE_DIR = '/tmp/ctn_docker/bgp'
+TEST_PREFIX = DEFAULT_TEST_PREFIX
+TEST_BASE_DIR = DEFAULT_TEST_BASE_DIR
+
+BGP_FSM_IDLE = 'BGP_FSM_IDLE'
+BGP_FSM_ACTIVE = 'BGP_FSM_ACTIVE'
+BGP_FSM_ESTABLISHED = 'BGP_FSM_ESTABLISHED'
+
+BGP_ATTR_TYPE_ORIGIN = 1
+BGP_ATTR_TYPE_AS_PATH = 2
+BGP_ATTR_TYPE_NEXT_HOP = 3
+BGP_ATTR_TYPE_MULTI_EXIT_DISC = 4
+BGP_ATTR_TYPE_LOCAL_PREF = 5
+BGP_ATTR_TYPE_COMMUNITIES = 8
+BGP_ATTR_TYPE_ORIGINATOR_ID = 9
+BGP_ATTR_TYPE_CLUSTER_LIST = 10
+BGP_ATTR_TYPE_MP_REACH_NLRI = 14
+BGP_ATTR_TYPE_EXTENDED_COMMUNITIES = 16
+
+BRIDGE_TYPE_DOCKER = 'docker'
+BRIDGE_TYPE_BRCTL = 'brctl'
+BRIDGE_TYPE_OVS = 'ovs'
+
+
+class CommandError(Exception):
+ def __init__(self, out):
+ super(CommandError, self).__init__()
+ self.out = out
+
+
+def try_several_times(f, t=3, s=1):
+ e = RuntimeError()
+ for _ in range(t):
+ try:
+ r = f()
+ except RuntimeError as e:
+ time.sleep(s)
+ else:
+ return r
+ raise e
+
+
+class CmdBuffer(list):
+ def __init__(self, delim='\n'):
+ super(CmdBuffer, self).__init__()
+ self.delim = delim
+
+ def __lshift__(self, value):
+ self.append(value)
+
+ def __str__(self):
+ return self.delim.join(self)
+
+
+class CommandOut(str):
+
+ def __new__(cls, stdout, stderr, command, returncode, **kwargs):
+ stdout = stdout or ''
+ obj = super(CommandOut, cls).__new__(cls, stdout, **kwargs)
+ obj.stderr = stderr or ''
+ obj.command = command
+ obj.returncode = returncode
+ return obj
+
+
+class Command(object):
+
+ def _execute(self, cmd, capture=False, executable=None):
+ """Execute a command using subprocess.Popen()
+ :Parameters:
+ - out: stdout from subprocess.Popen()
+ out has some attributes.
+ out.returncode: returncode of subprocess.Popen()
+ out.stderr: stderr from subprocess.Popen()
+ """
+ if capture:
+ p_stdout = subprocess.PIPE
+ p_stderr = subprocess.PIPE
+ else:
+ p_stdout = None
+ p_stderr = None
+ pop = subprocess.Popen(cmd, shell=True, executable=executable,
+ stdout=p_stdout,
+ stderr=p_stderr)
+ __stdout, __stderr = pop.communicate()
+ _stdout = six.text_type(__stdout, 'utf-8')
+ _stderr = six.text_type(__stderr, 'utf-8')
+ out = CommandOut(_stdout, _stderr, cmd, pop.returncode)
+ return out
+
+ def execute(self, cmd, capture=True, try_times=1, interval=1):
+ out = None
+ for i in range(try_times):
+ out = self._execute(cmd, capture=capture)
+ LOG.info(out.command)
+ if out.returncode == 0:
+ return out
+ LOG.error("stdout: %s", out)
+ LOG.error("stderr: %s", out.stderr)
+ if i + 1 >= try_times:
+ break
+ time.sleep(interval)
+ raise CommandError(out)
+
+ def sudo(self, cmd, capture=True, try_times=1, interval=1):
+ cmd = 'sudo %s' % cmd
+ return self.execute(cmd, capture=capture,
+ try_times=try_times, interval=interval)
+
+
+class DockerImage(object):
+ def __init__(self, baseimage='ubuntu:16.04'):
+ self.baseimage = baseimage
+ self.cmd = Command()
+
+ def get_images(self):
+ out = self.cmd.sudo('sudo docker images')
+ images = []
+ for line in out.splitlines()[1:]:
+ images.append(line.split()[0])
+ return images
+
+ def exist(self, name):
+ return name in self.get_images()
+
+ def build(self, tagname, dockerfile_dir):
+ self.cmd.sudo(
+ "docker build -t {0} {1}".format(tagname, dockerfile_dir),
+ try_times=3)
+
+ def remove(self, tagname, check_exist=False):
+ if check_exist and not self.exist(tagname):
+ return tagname
+ self.cmd.sudo("docker rmi -f %s" % tagname, try_times=3)
+
+ def create_quagga(self, tagname='quagga', image=None, check_exist=False):
+ if check_exist and self.exist(tagname):
+ return tagname
+ workdir = os.path.join(TEST_BASE_DIR, tagname)
+ pkges = ' '.join([
+ 'telnet',
+ 'tcpdump',
+ 'quagga',
+ ])
+ if image:
+ use_image = image
+ else:
+ use_image = self.baseimage
+ c = CmdBuffer()
+ c << 'FROM %s' % use_image
+ c << 'RUN apt-get update'
+ c << 'RUN apt-get install -qy --no-install-recommends %s' % pkges
+ c << 'CMD /usr/lib/quagga/bgpd'
+
+ self.cmd.sudo('rm -rf %s' % workdir)
+ self.cmd.execute('mkdir -p %s' % workdir)
+ self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c), workdir))
+ self.build(tagname, workdir)
+ return tagname
+
+ def create_ryu(self, tagname='ryu', image=None, check_exist=False):
+ if check_exist and self.exist(tagname):
+ return tagname
+ workdir = os.path.join(TEST_BASE_DIR, tagname)
+ workdir_ctn = '/root/osrg/ryu'
+ pkges = ' '.join([
+ 'tcpdump',
+ 'iproute2',
+ ])
+ if image:
+ use_image = image
+ else:
+ use_image = self.baseimage
+ c = CmdBuffer()
+ c << 'FROM %s' % use_image
+ c << 'ADD ryu %s' % workdir_ctn
+ install = ' '.join([
+ 'RUN apt-get update',
+ '&& apt-get install -qy --no-install-recommends %s' % pkges,
+ '&& cd %s' % workdir_ctn,
+ # Note: Clean previous builds, because "python setup.py install"
+ # might fail if the current directory contains the symlink to
+ # Docker host file systems.
+ '&& rm -rf *.egg-info/ build/ dist/ .tox/ *.log'
+ '&& pip install -r tools/pip-requires -r tools/optional-requires',
+ '&& python setup.py install',
+ ])
+ c << install
+
+ self.cmd.sudo('rm -rf %s' % workdir)
+ self.cmd.execute('mkdir -p %s' % workdir)
+ self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c), workdir))
+ self.cmd.execute('cp -r ../ryu %s/' % workdir)
+ self.build(tagname, workdir)
+ return tagname
+
+
+class Bridge(object):
+ def __init__(self, name, subnet='', start_ip=None, end_ip=None,
+ with_ip=True, self_ip=False,
+ fixed_ip=None, reuse=False,
+ br_type='docker'):
+ """Manage a bridge
+ :Parameters:
+ - name: bridge name
+ - subnet: network cider to be used in this bridge
+ - start_ip: start address of an ip to be used in the subnet
+ - end_ip: end address of an ip to be used in the subnet
+ - with_ip: specify if assign automatically an ip address
+ - self_ip: specify if assign an ip address for the bridge
+ - fixed_ip: an ip address to be assigned to the bridge
+ - reuse: specify if use an existing bridge
+ - br_type: One either in a 'docker', 'brctl' or 'ovs'
+ """
+ self.cmd = Command()
+ self.name = name
+ if br_type not in (BRIDGE_TYPE_DOCKER, BRIDGE_TYPE_BRCTL,
+ BRIDGE_TYPE_OVS):
+ raise Exception("argument error br_type: %s" % br_type)
+ self.br_type = br_type
+ self.docker_nw = bool(self.br_type == BRIDGE_TYPE_DOCKER)
+ if TEST_PREFIX != '':
+ self.name = '{0}_{1}'.format(TEST_PREFIX, name)
+ self.with_ip = with_ip
+ if with_ip:
+ self.subnet = netaddr.IPNetwork(subnet)
+ if start_ip:
+ self.start_ip = start_ip
+ else:
+ self.start_ip = netaddr.IPAddress(self.subnet.first)
+ if end_ip:
+ self.end_ip = end_ip
+ else:
+ self.end_ip = netaddr.IPAddress(self.subnet.last)
+
+ def _ip_gen():
+ for host in netaddr.IPRange(self.start_ip, self.end_ip):
+ yield host
+ self._ip_generator = _ip_gen()
+ # throw away first network address
+ self.next_ip_address()
+
+ self.self_ip = self_ip
+ if fixed_ip:
+ self.ip_addr = fixed_ip
+ else:
+ self.ip_addr = self.next_ip_address()
+ if not reuse:
+ def f():
+ if self.br_type == BRIDGE_TYPE_DOCKER:
+ gw = "--gateway %s" % self.ip_addr.split('/')[0]
+ v6 = ''
+ if self.subnet.version == 6:
+ v6 = '--ipv6'
+ cmd = ("docker network create --driver bridge %s "
+ "%s --subnet %s %s" % (v6, gw, subnet, self.name))
+ elif self.br_type == BRIDGE_TYPE_BRCTL:
+ cmd = "ip link add {0} type bridge".format(self.name)
+ elif self.br_type == BRIDGE_TYPE_OVS:
+ cmd = "ovs-vsctl add-br {0}".format(self.name)
+ else:
+ raise ValueError('Unsupported br_type: %s' % self.br_type)
+ self.delete()
+ self.execute(cmd, sudo=True, retry=True)
+ try_several_times(f)
+ if not self.docker_nw:
+ self.execute("ip link set up dev {0}".format(self.name),
+ sudo=True, retry=True)
+
+ if not self.docker_nw and self_ip:
+ ips = self.check_br_addr(self.name)
+ for key, ip in ips.items():
+ if self.subnet.version == key:
+ self.execute(
+ "ip addr del {0} dev {1}".format(ip, self.name),
+ sudo=True, retry=True)
+ self.execute(
+ "ip addr add {0} dev {1}".format(self.ip_addr, self.name),
+ sudo=True, retry=True)
+ self.ctns = []
+
+ def get_bridges_dc(self):
+ out = self.execute('docker network ls', sudo=True, retry=True)
+ bridges = []
+ for line in out.splitlines()[1:]:
+ bridges.append(line.split()[1])
+ return bridges
+
+ def get_bridges_brctl(self):
+ out = self.execute('brctl show', retry=True)
+ bridges = []
+ for line in out.splitlines()[1:]:
+ bridges.append(line.split()[0])
+ return bridges
+
+ def get_bridges_ovs(self):
+ out = self.execute('ovs-vsctl list-br', sudo=True, retry=True)
+ return out.splitlines()
+
+ def get_bridges(self):
+ if self.br_type == BRIDGE_TYPE_DOCKER:
+ return self.get_bridges_dc()
+ elif self.br_type == BRIDGE_TYPE_BRCTL:
+ return self.get_bridges_brctl()
+ elif self.br_type == BRIDGE_TYPE_OVS:
+ return self.get_bridges_ovs()
+
+ def exist(self):
+ return self.name in self.get_bridges()
+
+ def execute(self, cmd, capture=True, sudo=False, retry=False):
+ if sudo:
+ m = self.cmd.sudo
+ else:
+ m = self.cmd.execute
+ if retry:
+ return m(cmd, capture=capture, try_times=3, interval=1)
+ else:
+ return m(cmd, capture=capture)
+
+ def check_br_addr(self, br):
+ ips = {}
+ cmd = "ip a show dev %s" % br
+ for line in self.execute(cmd, sudo=True).split('\n'):
+ if line.strip().startswith("inet "):
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ips[4] = elems[1]
+ elif line.strip().startswith("inet6 "):
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ips[6] = elems[1]
+ return ips
+
+ def next_ip_address(self):
+ return "{0}/{1}".format(next(self._ip_generator),
+ self.subnet.prefixlen)
+
+ def addif(self, ctn):
+ name = ctn.next_if_name()
+ self.ctns.append(ctn)
+ ip_address = None
+ if self.docker_nw:
+ ipv4 = None
+ ipv6 = None
+ ip_address = self.next_ip_address()
+ ip_address_ip = ip_address.split('/')[0]
+ version = 4
+ if netaddr.IPNetwork(ip_address).version == 6:
+ version = 6
+ opt_ip = "--ip %s" % ip_address_ip
+ if version == 4:
+ ipv4 = ip_address
+ else:
+ opt_ip = "--ip6 %s" % ip_address_ip
+ ipv6 = ip_address
+ cmd = "docker network connect %s %s %s" % (
+ opt_ip, self.name, ctn.docker_name())
+ self.execute(cmd, sudo=True)
+ ctn.set_addr_info(bridge=self.name, ipv4=ipv4, ipv6=ipv6,
+ ifname=name)
+ else:
+ if self.with_ip:
+ ip_address = self.next_ip_address()
+ version = 4
+ if netaddr.IPNetwork(ip_address).version == 6:
+ version = 6
+ ctn.pipework(self, ip_address, name, version=version)
+ else:
+ ctn.pipework(self, '0/0', name)
+ return ip_address
+
+ def delete(self, check_exist=True):
+ if check_exist:
+ if not self.exist():
+ return
+ if self.br_type == BRIDGE_TYPE_DOCKER:
+ self.execute("docker network rm %s" % self.name,
+ sudo=True, retry=True)
+ elif self.br_type == BRIDGE_TYPE_BRCTL:
+ self.execute("ip link set down dev %s" % self.name,
+ sudo=True, retry=True)
+ self.execute(
+ "ip link delete %s type bridge" % self.name,
+ sudo=True, retry=True)
+ elif self.br_type == BRIDGE_TYPE_OVS:
+ self.execute(
+ "ovs-vsctl del-br %s" % self.name,
+ sudo=True, retry=True)
+
+
+class Container(object):
+ def __init__(self, name, image=None):
+ self.name = name
+ self.image = image
+ self.shared_volumes = []
+ self.ip_addrs = []
+ self.ip6_addrs = []
+ self.is_running = False
+ self.eths = []
+ self.id = None
+
+ self.cmd = Command()
+ self.remove()
+
+ def docker_name(self):
+ if TEST_PREFIX == DEFAULT_TEST_PREFIX:
+ return self.name
+ return '{0}_{1}'.format(TEST_PREFIX, self.name)
+
+ def get_docker_id(self):
+ if self.id:
+ return self.id
+ else:
+ return self.docker_name()
+
+ def next_if_name(self):
+ name = 'eth{0}'.format(len(self.eths) + 1)
+ self.eths.append(name)
+ return name
+
+ def set_addr_info(self, bridge, ipv4=None, ipv6=None, ifname='eth0'):
+ if ipv4:
+ self.ip_addrs.append((ifname, ipv4, bridge))
+ if ipv6:
+ self.ip6_addrs.append((ifname, ipv6, bridge))
+
+ def get_addr_info(self, bridge, ipv=4):
+ addrinfo = {}
+ if ipv == 4:
+ ip_addrs = self.ip_addrs
+ elif ipv == 6:
+ ip_addrs = self.ip6_addrs
+ else:
+ return None
+ for addr in ip_addrs:
+ if addr[2] == bridge:
+ addrinfo[addr[1]] = addr[0]
+ return addrinfo
+
+ def execute(self, cmd, capture=True, sudo=False, retry=False):
+ if sudo:
+ m = self.cmd.sudo
+ else:
+ m = self.cmd.execute
+ if retry:
+ return m(cmd, capture=capture, try_times=3, interval=1)
+ else:
+ return m(cmd, capture=capture)
+
+ def dcexec(self, cmd, capture=True, retry=False):
+ if retry:
+ return self.cmd.sudo(cmd, capture=capture, try_times=3, interval=1)
+ else:
+ return self.cmd.sudo(cmd, capture=capture)
+
+ def exec_on_ctn(self, cmd, capture=True, detach=False):
+ name = self.docker_name()
+ flag = '-d' if detach else ''
+ return self.dcexec('docker exec {0} {1} {2}'.format(
+ flag, name, cmd), capture=capture)
+
+ def get_containers(self, allctn=False):
+ cmd = 'docker ps --no-trunc=true'
+ if allctn:
+ cmd += ' --all=true'
+ out = self.dcexec(cmd, retry=True)
+ containers = []
+ for line in out.splitlines()[1:]:
+ containers.append(line.split()[-1])
+ return containers
+
+ def exist(self, allctn=False):
+ return self.docker_name() in self.get_containers(allctn=allctn)
+
+ def run(self):
+ c = CmdBuffer(' ')
+ c << "docker run --privileged=true"
+ for sv in self.shared_volumes:
+ c << "-v {0}:{1}".format(sv[0], sv[1])
+ c << "--name {0} --hostname {0} -id {1}".format(self.docker_name(),
+ self.image)
+ self.id = self.dcexec(str(c), retry=True)
+ self.is_running = True
+ self.exec_on_ctn("ip li set up dev lo")
+ ipv4 = None
+ ipv6 = None
+ for line in self.exec_on_ctn("ip a show dev eth0").split('\n'):
+ if line.strip().startswith("inet "):
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ipv4 = elems[1]
+ elif line.strip().startswith("inet6 "):
+ elems = [e.strip() for e in line.strip().split(' ')]
+ ipv6 = elems[1]
+ self.set_addr_info(bridge='docker0', ipv4=ipv4, ipv6=ipv6,
+ ifname='eth0')
+ return 0
+
+ def stop(self, check_exist=True):
+ if check_exist:
+ if not self.exist(allctn=False):
+ return
+ ctn_id = self.get_docker_id()
+ out = self.dcexec('docker stop -t 0 %s' % ctn_id, retry=True)
+ self.is_running = False
+ return out
+
+ def remove(self, check_exist=True):
+ if check_exist:
+ if not self.exist(allctn=True):
+ return
+ ctn_id = self.get_docker_id()
+ out = self.dcexec('docker rm -f %s' % ctn_id, retry=True)
+ self.is_running = False
+ return out
+
+ def pipework(self, bridge, ip_addr, intf_name="", version=4):
+ if not self.is_running:
+ LOG.warning('Call run() before pipeworking')
+ return
+ c = CmdBuffer(' ')
+ c << "pipework {0}".format(bridge.name)
+
+ if intf_name != "":
+ c << "-i {0}".format(intf_name)
+ else:
+ intf_name = "eth1"
+ ipv4 = None
+ ipv6 = None
+ if version == 4:
+ ipv4 = ip_addr
+ else:
+ c << '-a 6'
+ ipv6 = ip_addr
+ c << "{0} {1}".format(self.docker_name(), ip_addr)
+ self.set_addr_info(bridge=bridge.name, ipv4=ipv4, ipv6=ipv6,
+ ifname=intf_name)
+ self.execute(str(c), sudo=True, retry=True)
+
+ def get_pid(self):
+ if self.is_running:
+ cmd = "docker inspect -f '{{.State.Pid}}' %s" % self.docker_name()
+ return int(self.dcexec(cmd))
+ return -1
+
+ def start_tcpdump(self, interface=None, filename=None):
+ if not interface:
+ interface = "eth0"
+ if not filename:
+ filename = "{0}/{1}.dump".format(
+ self.shared_volumes[0][1], interface)
+ self.exec_on_ctn(
+ "tcpdump -i {0} -w {1}".format(interface, filename),
+ detach=True)
+
+
+class BGPContainer(Container):
+
+ WAIT_FOR_BOOT = 1
+ RETRY_INTERVAL = 5
+ DEFAULT_PEER_ARG = {'neigh_addr': '',
+ 'passwd': None,
+ 'vpn': False,
+ 'flowspec': False,
+ 'is_rs_client': False,
+ 'is_rr_client': False,
+ 'cluster_id': None,
+ 'policies': None,
+ 'passive': False,
+ 'local_addr': '',
+ 'as2': False,
+ 'graceful_restart': None,
+ 'local_as': None,
+ 'prefix_limit': None}
+ default_peer_keys = sorted(DEFAULT_PEER_ARG.keys())
+ DEFAULT_ROUTE_ARG = {'prefix': None,
+ 'rf': 'ipv4',
+ 'attr': None,
+ 'next-hop': None,
+ 'as-path': None,
+ 'community': None,
+ 'med': None,
+ 'local-pref': None,
+ 'extended-community': None,
+ 'matchs': None,
+ 'thens': None}
+ default_route_keys = sorted(DEFAULT_ROUTE_ARG.keys())
+
+ def __init__(self, name, asn, router_id, ctn_image_name=None):
+ self.config_dir = TEST_BASE_DIR
+ if TEST_PREFIX:
+ self.config_dir = os.path.join(self.config_dir, TEST_PREFIX)
+ self.config_dir = os.path.join(self.config_dir, name)
+ self.asn = asn
+ self.router_id = router_id
+ self.peers = {}
+ self.routes = {}
+ self.policies = {}
+ super(BGPContainer, self).__init__(name, ctn_image_name)
+ self.execute(
+ 'rm -rf {0}'.format(self.config_dir), sudo=True)
+ self.execute('mkdir -p {0}'.format(self.config_dir))
+ self.execute('chmod 777 {0}'.format(self.config_dir))
+
+ def __repr__(self):
+ return str({'name': self.name, 'asn': self.asn,
+ 'router_id': self.router_id})
+
+ def run(self, wait=False, w_time=WAIT_FOR_BOOT):
+ self.create_config()
+ super(BGPContainer, self).run()
+ if wait:
+ time.sleep(w_time)
+ return w_time
+
+ def add_peer(self, peer, bridge='', reload_config=True, v6=False,
+ peer_info=None):
+ peer_info = peer_info or {}
+ self.peers[peer] = self.DEFAULT_PEER_ARG.copy()
+ self.peers[peer].update(peer_info)
+ peer_keys = sorted(self.peers[peer].keys())
+ if peer_keys != self.default_peer_keys:
+ raise Exception("argument error peer_info: %s" % peer_info)
+
+ neigh_addr = ''
+ local_addr = ''
+ it = itertools.product(self.ip_addrs, peer.ip_addrs)
+ if v6:
+ it = itertools.product(self.ip6_addrs, peer.ip6_addrs)
+
+ for me, you in it:
+ if bridge != '' and bridge != me[2]:
+ continue
+ if me[2] == you[2]:
+ neigh_addr = you[1]
+ local_addr = me[1]
+ if v6:
+ addr, mask = local_addr.split('/')
+ local_addr = "{0}%{1}/{2}".format(addr, me[0], mask)
+ break
+
+ if neigh_addr == '':
+ raise Exception('peer {0} seems not ip reachable'.format(peer))
+
+ if not self.peers[peer]['policies']:
+ self.peers[peer]['policies'] = {}
+
+ self.peers[peer]['neigh_addr'] = neigh_addr
+ self.peers[peer]['local_addr'] = local_addr
+ if self.is_running and reload_config:
+ self.create_config()
+ self.reload_config()
+
+ def del_peer(self, peer, reload_config=True):
+ del self.peers[peer]
+ if self.is_running and reload_config:
+ self.create_config()
+ self.reload_config()
+
+ def disable_peer(self, peer):
+ raise NotImplementedError()
+
+ def enable_peer(self, peer):
+ raise NotImplementedError()
+
+ def log(self):
+ return self.execute('cat {0}/*.log'.format(self.config_dir))
+
+ def add_route(self, route, reload_config=True, route_info=None):
+ route_info = route_info or {}
+ self.routes[route] = self.DEFAULT_ROUTE_ARG.copy()
+ self.routes[route].update(route_info)
+ route_keys = sorted(self.routes[route].keys())
+ if route_keys != self.default_route_keys:
+ raise Exception("argument error route_info: %s" % route_info)
+ self.routes[route]['prefix'] = route
+ if self.is_running and reload_config:
+ self.create_config()
+ self.reload_config()
+
+ def add_policy(self, policy, peer, typ, default='accept',
+ reload_config=True):
+ self.set_default_policy(peer, typ, default)
+ self.define_policy(policy)
+ self.assign_policy(peer, policy, typ)
+ if self.is_running and reload_config:
+ self.create_config()
+ self.reload_config()
+
+ def set_default_policy(self, peer, typ, default):
+ if (typ in ['in', 'out', 'import', 'export'] and
+ default in ['reject', 'accept']):
+ if 'default-policy' not in self.peers[peer]:
+ self.peers[peer]['default-policy'] = {}
+ self.peers[peer]['default-policy'][typ] = default
+ else:
+ raise Exception('wrong type or default')
+
+ def define_policy(self, policy):
+ self.policies[policy['name']] = policy
+
+ def assign_policy(self, peer, policy, typ):
+ if peer not in self.peers:
+ raise Exception('peer {0} not found'.format(peer.name))
+ name = policy['name']
+ if name not in self.policies:
+ raise Exception('policy {0} not found'.format(name))
+ self.peers[peer]['policies'][typ] = policy
+
+ def get_local_rib(self, peer, rf):
+ raise NotImplementedError()
+
+ def get_global_rib(self, rf):
+ raise NotImplementedError()
+
+ def get_neighbor_state(self, peer_id):
+ raise NotImplementedError()
+
+ def get_reachablily(self, prefix, timeout=20):
+ version = netaddr.IPNetwork(prefix).version
+ addr = prefix.split('/')[0]
+ if version == 4:
+ ping_cmd = 'ping'
+ elif version == 6:
+ ping_cmd = 'ping6'
+ else:
+ raise Exception(
+ 'unsupported route family: {0}'.format(version))
+ cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs echo"'.format(
+ ping_cmd, addr)
+ interval = 1
+ count = 0
+ while True:
+ res = self.exec_on_ctn(cmd)
+ LOG.info(res)
+ if '1 packets received' in res and '0% packet loss':
+ break
+ time.sleep(interval)
+ count += interval
+ if count >= timeout:
+ raise Exception('timeout')
+ return True
+
+ def wait_for(self, expected_state, peer, timeout=120):
+ interval = 1
+ count = 0
+ while True:
+ state = self.get_neighbor_state(peer)
+ LOG.info("%s's peer %s state: %s",
+ self.router_id, peer.router_id, state)
+ if state == expected_state:
+ return
+
+ time.sleep(interval)
+ count += interval
+ if count >= timeout:
+ raise Exception('timeout')
+
+ def add_static_route(self, network, next_hop):
+ cmd = '/sbin/ip route add {0} via {1}'.format(network, next_hop)
+ self.exec_on_ctn(cmd)
+
+ def set_ipv6_forward(self):
+ cmd = 'sysctl -w net.ipv6.conf.all.forwarding=1'
+ self.exec_on_ctn(cmd)
+
+ def create_config(self):
+ raise NotImplementedError()
+
+ def reload_config(self):
+ raise NotImplementedError()
diff --git a/tests/integrated/common/install_docker_test_pkg.sh b/tests/integrated/common/install_docker_test_pkg.sh
new file mode 100644
index 00000000..a771dfc1
--- /dev/null
+++ b/tests/integrated/common/install_docker_test_pkg.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+set -ex
+
+RYU_PATH=`dirname $0`
+
+source ${RYU_PATH}/install_docker_test_pkg_common.sh
+
+function add_docker_aptline {
+ sudo apt-get update
+ if ! apt-cache search docker-engine | grep docker-engine; then
+ VER=`lsb_release -r`
+ if echo $VER | grep 12.04; then
+ REL_NAME=precise
+ elif echo $VER | grep 14.04; then
+ REL_NAME=trusty
+ elif echo $VER | grep 15.10; then
+ REL_NAME=wily
+ elif echo $VER | grep 16.04; then
+ REL_NAME=xenial
+ else
+ retrun 1
+ fi
+ RELEASE=ubuntu-$REL_NAME
+ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
+ sudo sh -c "echo deb https://apt.dockerproject.org/repo $RELEASE main > /etc/apt/sources.list.d/docker.list"
+ fi
+}
+
+init_variables
+process_options "$@"
+
+if [ $APTLINE_DOCKER -eq 1 ]; then
+ add_docker_aptline
+fi
+
+sudo apt-get update
+if apt-cache search docker-engine | grep docker-engine; then
+ DOCKER_PKG=docker-engine
+else
+ DOCKER_PKG=docker.io
+fi
+sudo apt-get install -y $DOCKER_PKG
+install_depends_pkg
diff --git a/tests/integrated/common/install_docker_test_pkg_common.sh b/tests/integrated/common/install_docker_test_pkg_common.sh
new file mode 100644
index 00000000..44a3e107
--- /dev/null
+++ b/tests/integrated/common/install_docker_test_pkg_common.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+set -ex
+
+function init_variables {
+ APTLINE_DOCKER=0
+ DIR_BASE=/tmp
+}
+
+function process_options {
+ local max
+ local i
+ max=$#
+ i=1
+ while [ $i -le $max ]; do
+ case "$1" in
+ -a|--add-docker-aptline)
+ APTLINE_DOCKER=1
+ ;;
+ -d|--download-dir)
+ shift; ((i++))
+ DIR_BASE=$1
+ ;;
+ esac
+ shift; ((i++))
+ done
+}
+
+function install_pipework {
+ if ! which /usr/local/bin/pipework >/dev/null
+ then
+ sudo rm -rf $DIR_BASE/pipework
+ git clone https://github.com/jpetazzo/pipework.git $DIR_BASE/pipework
+ sudo install -m 0755 $DIR_BASE/pipework/pipework /usr/local/bin/pipework
+ fi
+}
+
+function install_depends_pkg {
+ install_pipework
+}
diff --git a/tests/integrated/common/install_docker_test_pkg_for_travis.sh b/tests/integrated/common/install_docker_test_pkg_for_travis.sh
new file mode 100644
index 00000000..d8c3b499
--- /dev/null
+++ b/tests/integrated/common/install_docker_test_pkg_for_travis.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -ex
+
+RYU_PATH=`dirname $0`
+
+source ${RYU_PATH}/install_docker_test_pkg_common.sh
+
+init_variables
+process_options "$@"
+
+sudo apt-get update
+install_depends_pkg
diff --git a/tests/integrated/common/quagga.py b/tests/integrated/common/quagga.py
new file mode 100644
index 00000000..9b6d2183
--- /dev/null
+++ b/tests/integrated/common/quagga.py
@@ -0,0 +1,332 @@
+# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
+#
+# This is based on the following
+# https://github.com/osrg/gobgp/test/lib/quagga.py
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+
+import netaddr
+
+from . import docker_base as base
+
+LOG = logging.getLogger(__name__)
+
+
+class QuaggaBGPContainer(base.BGPContainer):
+
+ WAIT_FOR_BOOT = 1
+ SHARED_VOLUME = '/etc/quagga'
+
+ def __init__(self, name, asn, router_id, ctn_image_name, zebra=False):
+ super(QuaggaBGPContainer, self).__init__(name, asn, router_id,
+ ctn_image_name)
+ self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
+ self.zebra = zebra
+ self._create_config_debian()
+
+ def run(self, wait=False, w_time=WAIT_FOR_BOOT):
+ w_time = super(QuaggaBGPContainer,
+ self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
+ return w_time
+
+ def get_global_rib(self, prefix='', rf='ipv4'):
+ rib = []
+ if prefix != '':
+ return self.get_global_rib_with_prefix(prefix, rf)
+
+ out = self.vtysh('show bgp {0} unicast'.format(rf), config=False)
+ if out.startswith('No BGP network exists'):
+ return rib
+
+ read_next = False
+
+ for line in out.split('\n'):
+ ibgp = False
+ if line[:2] == '*>':
+ line = line[2:]
+ if line[0] == 'i':
+ line = line[1:]
+ ibgp = True
+ elif not read_next:
+ continue
+
+ elems = line.split()
+
+ if len(elems) == 1:
+ read_next = True
+ prefix = elems[0]
+ continue
+ elif read_next:
+ nexthop = elems[0]
+ else:
+ prefix = elems[0]
+ nexthop = elems[1]
+ read_next = False
+
+ rib.append({'prefix': prefix, 'nexthop': nexthop,
+ 'ibgp': ibgp})
+
+ return rib
+
+ def get_global_rib_with_prefix(self, prefix, rf):
+ rib = []
+
+ lines = [line.strip() for line in self.vtysh(
+ 'show bgp {0} unicast {1}'.format(rf, prefix),
+ config=False).split('\n')]
+
+ if lines[0] == '% Network not in table':
+ return rib
+
+ lines = lines[2:]
+
+ if lines[0].startswith('Not advertised'):
+ lines.pop(0) # another useless line
+ elif lines[0].startswith('Advertised to non peer-group peers:'):
+ lines = lines[2:] # other useless lines
+ else:
+ raise Exception('unknown output format {0}'.format(lines))
+
+ if lines[0] == 'Local':
+ aspath = []
+ else:
+ aspath = [int(asn) for asn in lines[0].split()]
+
+ nexthop = lines[1].split()[0].strip()
+ info = [s.strip(',') for s in lines[2].split()]
+ attrs = []
+ if 'metric' in info:
+ med = info[info.index('metric') + 1]
+ attrs.append({'type': base.BGP_ATTR_TYPE_MULTI_EXIT_DISC,
+ 'metric': int(med)})
+ if 'localpref' in info:
+ localpref = info[info.index('localpref') + 1]
+ attrs.append({'type': base.BGP_ATTR_TYPE_LOCAL_PREF,
+ 'value': int(localpref)})
+
+ rib.append({'prefix': prefix, 'nexthop': nexthop,
+ 'aspath': aspath, 'attrs': attrs})
+
+ return rib
+
+ def get_neighbor_state(self, peer):
+ if peer not in self.peers:
+ raise Exception('not found peer {0}'.format(peer.router_id))
+
+ neigh_addr = self.peers[peer]['neigh_addr'].split('/')[0]
+
+ info = [l.strip() for l in self.vtysh(
+ 'show bgp neighbors {0}'.format(neigh_addr),
+ config=False).split('\n')]
+
+ if not info[0].startswith('BGP neighbor is'):
+ raise Exception('unknown format')
+
+ idx1 = info[0].index('BGP neighbor is ')
+ idx2 = info[0].index(',')
+ n_addr = info[0][idx1 + len('BGP neighbor is '):idx2]
+ if n_addr == neigh_addr:
+ idx1 = info[2].index('= ')
+ state = info[2][idx1 + len('= '):]
+ if state.startswith('Idle'):
+ return base.BGP_FSM_IDLE
+ elif state.startswith('Active'):
+ return base.BGP_FSM_ACTIVE
+ elif state.startswith('Established'):
+ return base.BGP_FSM_ESTABLISHED
+ else:
+ return state
+
+ raise Exception('not found peer {0}'.format(peer.router_id))
+
+ def send_route_refresh(self):
+ self.vtysh('clear ip bgp * soft', config=False)
+
+ def create_config(self):
+ zebra = 'no'
+ self._create_config_bgp()
+ if self.zebra:
+ zebra = 'yes'
+ self._create_config_zebra()
+ self._create_config_daemons(zebra)
+
+ def _create_config_debian(self):
+ c = base.CmdBuffer()
+ c << 'vtysh_enable=yes'
+ c << 'zebra_options=" --daemon -A 127.0.0.1"'
+ c << 'bgpd_options=" --daemon -A 127.0.0.1"'
+ c << 'ospfd_options=" --daemon -A 127.0.0.1"'
+ c << 'ospf6d_options=" --daemon -A ::1"'
+ c << 'ripd_options=" --daemon -A 127.0.0.1"'
+ c << 'ripngd_options=" --daemon -A ::1"'
+ c << 'isisd_options=" --daemon -A 127.0.0.1"'
+ c << 'babeld_options=" --daemon -A 127.0.0.1"'
+ c << 'watchquagga_enable=yes'
+ c << 'watchquagga_options=(--daemon)'
+ with open('{0}/debian.conf'.format(self.config_dir), 'w') as f:
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ def _create_config_daemons(self, zebra='no'):
+ c = base.CmdBuffer()
+ c << 'zebra=%s' % zebra
+ c << 'bgpd=yes'
+ c << 'ospfd=no'
+ c << 'ospf6d=no'
+ c << 'ripd=no'
+ c << 'ripngd=no'
+ c << 'isisd=no'
+ c << 'babeld=no'
+ with open('{0}/daemons'.format(self.config_dir), 'w') as f:
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ def _create_config_bgp(self):
+
+ c = base.CmdBuffer()
+ c << 'hostname bgpd'
+ c << 'password zebra'
+ c << 'router bgp {0}'.format(self.asn)
+ c << 'bgp router-id {0}'.format(self.router_id)
+ if any(info['graceful_restart'] for info in self.peers.values()):
+ c << 'bgp graceful-restart'
+
+ version = 4
+ for peer, info in self.peers.items():
+ version = netaddr.IPNetwork(info['neigh_addr']).version
+ n_addr = info['neigh_addr'].split('/')[0]
+ if version == 6:
+ c << 'no bgp default ipv4-unicast'
+
+ c << 'neighbor {0} remote-as {1}'.format(n_addr, peer.asn)
+ if info['is_rs_client']:
+ c << 'neighbor {0} route-server-client'.format(n_addr)
+ for typ, p in info['policies'].items():
+ c << 'neighbor {0} route-map {1} {2}'.format(n_addr, p['name'],
+ typ)
+ if info['passwd']:
+ c << 'neighbor {0} password {1}'.format(n_addr, info['passwd'])
+ if info['passive']:
+ c << 'neighbor {0} passive'.format(n_addr)
+ if version == 6:
+ c << 'address-family ipv6 unicast'
+ c << 'neighbor {0} activate'.format(n_addr)
+ c << 'exit-address-family'
+
+ for route in self.routes.values():
+ if route['rf'] == 'ipv4':
+ c << 'network {0}'.format(route['prefix'])
+ elif route['rf'] == 'ipv6':
+ c << 'address-family ipv6 unicast'
+ c << 'network {0}'.format(route['prefix'])
+ c << 'exit-address-family'
+ else:
+ raise Exception(
+ 'unsupported route faily: {0}'.format(route['rf']))
+
+ if self.zebra:
+ if version == 6:
+ c << 'address-family ipv6 unicast'
+ c << 'redistribute connected'
+ c << 'exit-address-family'
+ else:
+ c << 'redistribute connected'
+
+ for name, policy in self.policies.items():
+ c << 'access-list {0} {1} {2}'.format(name, policy['type'],
+ policy['match'])
+ c << 'route-map {0} permit 10'.format(name)
+ c << 'match ip address {0}'.format(name)
+ c << 'set metric {0}'.format(policy['med'])
+
+ c << 'debug bgp as4'
+ c << 'debug bgp fsm'
+ c << 'debug bgp updates'
+ c << 'debug bgp events'
+ c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME)
+
+ with open('{0}/bgpd.conf'.format(self.config_dir), 'w') as f:
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ def _create_config_zebra(self):
+ c = base.CmdBuffer()
+ c << 'hostname zebra'
+ c << 'password zebra'
+ c << 'log file {0}/zebra.log'.format(self.SHARED_VOLUME)
+ c << 'debug zebra packet'
+ c << 'debug zebra kernel'
+ c << 'debug zebra rib'
+ c << ''
+
+ with open('{0}/zebra.conf'.format(self.config_dir), 'w') as f:
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ def vtysh(self, cmd, config=True):
+ if not isinstance(cmd, list):
+ cmd = [cmd]
+ cmd = ' '.join("-c '{0}'".format(c) for c in cmd)
+ if config:
+ return self.exec_on_ctn(
+ "vtysh -d bgpd -c 'en' -c 'conf t' -c "
+ "'router bgp {0}' {1}".format(self.asn, cmd),
+ capture=True)
+ else:
+ return self.exec_on_ctn("vtysh -d bgpd {0}".format(cmd),
+ capture=True)
+
+ def reload_config(self):
+ daemon = []
+ daemon.append('bgpd')
+ if self.zebra:
+ daemon.append('zebra')
+ for d in daemon:
+ cmd = '/usr/bin/pkill {0} -SIGHUP'.format(d)
+ self.exec_on_ctn(cmd, capture=True)
+
+
+class RawQuaggaBGPContainer(QuaggaBGPContainer):
+ def __init__(self, name, config, ctn_image_name,
+ zebra=False):
+ asn = None
+ router_id = None
+ for line in config.split('\n'):
+ line = line.strip()
+ if line.startswith('router bgp'):
+ asn = int(line[len('router bgp'):].strip())
+ if line.startswith('bgp router-id'):
+ router_id = line[len('bgp router-id'):].strip()
+ if not asn:
+ raise Exception('asn not in quagga config')
+ if not router_id:
+ raise Exception('router-id not in quagga config')
+ self.config = config
+ super(RawQuaggaBGPContainer, self).__init__(name, asn, router_id,
+ ctn_image_name, zebra)
+
+ def create_config(self):
+ with open(os.path.join(self.config_dir, 'bgpd.conf'), 'w') as f:
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(self.config)
+ f.writelines(self.config)
diff --git a/tests/integrated/common/ryubgp.py b/tests/integrated/common/ryubgp.py
new file mode 100644
index 00000000..8fe16f49
--- /dev/null
+++ b/tests/integrated/common/ryubgp.py
@@ -0,0 +1,212 @@
+# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+import time
+
+from . import docker_base as base
+
+LOG = logging.getLogger(__name__)
+
+
+class RyuBGPContainer(base.BGPContainer):
+
+ WAIT_FOR_BOOT = 1
+ SHARED_VOLUME = '/etc/ryu'
+
+ def __init__(self, name, asn, router_id, ctn_image_name):
+ super(RyuBGPContainer, self).__init__(name, asn, router_id,
+ ctn_image_name)
+ self.RYU_CONF = os.path.join(self.config_dir, 'ryu.conf')
+ self.SHARED_RYU_CONF = os.path.join(self.SHARED_VOLUME, 'ryu.conf')
+ self.SHARED_BGP_CONF = os.path.join(self.SHARED_VOLUME, 'bgp_conf.py')
+ self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME))
+
+ def _create_config_ryu(self):
+ c = base.CmdBuffer()
+ c << '[DEFAULT]'
+ c << 'verbose=True'
+ c << 'log_file=/etc/ryu/manager.log'
+ with open(self.RYU_CONF, 'w') as f:
+ LOG.info("[%s's new config]" % self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ def _create_config_ryu_bgp(self):
+ c = base.CmdBuffer()
+ c << 'import os'
+ c << ''
+ c << 'BGP = {'
+ c << " 'local_as': %s," % str(self.asn)
+ c << " 'router_id': '%s'," % self.router_id
+ c << " 'neighbors': ["
+ c << " {"
+ for peer, info in self.peers.items():
+ n_addr = info['neigh_addr'].split('/')[0]
+ c << " 'address': '%s'," % n_addr
+ c << " 'remote_as': %s," % str(peer.asn)
+ c << " 'enable_ipv4': True,"
+ c << " 'enable_ipv6': True,"
+ c << " 'enable_vpnv4': True,"
+ c << " 'enable_vpnv6': True,"
+ c << ' },'
+ c << ' ],'
+ c << " 'routes': ["
+ for route in self.routes.values():
+ c << " {"
+ c << " 'prefix': '%s'," % route['prefix']
+ c << " },"
+ c << " ],"
+ c << "}"
+ log_conf = """LOGGING = {
+
+ # We use python logging package for logging.
+ 'version': 1,
+ 'disable_existing_loggers': False,
+
+ 'formatters': {
+ 'verbose': {
+ 'format': '%(levelname)s %(asctime)s %(module)s ' +
+ '[%(process)d %(thread)d] %(message)s'
+ },
+ 'simple': {
+ 'format': '%(levelname)s %(asctime)s %(module)s %(lineno)s ' +
+ '%(message)s'
+ },
+ 'stats': {
+ 'format': '%(message)s'
+ },
+ },
+
+ 'handlers': {
+ # Outputs log to console.
+ 'console': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'simple'
+ },
+ 'console_stats': {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'stats'
+ },
+ # Rotates log file when its size reaches 10MB.
+ 'log_file': {
+ 'level': 'DEBUG',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': os.path.join('.', 'bgpspeaker.log'),
+ 'maxBytes': '10000000',
+ 'formatter': 'verbose'
+ },
+ 'stats_file': {
+ 'level': 'DEBUG',
+ 'class': 'logging.handlers.RotatingFileHandler',
+ 'filename': os.path.join('.', 'statistics_bgps.log'),
+ 'maxBytes': '10000000',
+ 'formatter': 'stats'
+ },
+ },
+
+ # Fine-grained control of logging per instance.
+ 'loggers': {
+ 'bgpspeaker': {
+ 'handlers': ['console', 'log_file'],
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'stats': {
+ 'handlers': ['stats_file', 'console_stats'],
+ 'level': 'INFO',
+ 'propagate': False,
+ 'formatter': 'stats',
+ },
+ },
+
+ # Root loggers.
+ 'root': {
+ 'handlers': ['console', 'log_file'],
+ 'level': 'DEBUG',
+ 'propagate': True,
+ },
+}"""
+ c << log_conf
+ with open(os.path.join(self.config_dir, 'bgp_conf.py'), 'w') as f:
+ LOG.info("[%s's new config]", self.name)
+ LOG.info(str(c))
+ f.writelines(str(c))
+
+ def create_config(self):
+ self._create_config_ryu()
+ self._create_config_ryu_bgp()
+
+ def is_running_ryu(self):
+ results = self.exec_on_ctn('ps ax')
+ running = False
+ for line in results.split('\n')[1:]:
+ if 'ryu-manager' in line:
+ running = True
+ return running
+
+ def start_ryubgp(self, check_running=True, retry=False):
+ if check_running:
+ if self.is_running_ryu():
+ return True
+ result = False
+ if retry:
+ try_times = 3
+ else:
+ try_times = 1
+ cmd = "ryu-manager --verbose "
+ cmd += "--config-file %s " % self.SHARED_RYU_CONF
+ cmd += "--bgp-app-config-file %s " % self.SHARED_BGP_CONF
+ cmd += "ryu.services.protocols.bgp.application"
+ for _ in range(try_times):
+ self.exec_on_ctn(cmd, detach=True)
+ if self.is_running_ryu():
+ result = True
+ break
+ time.sleep(1)
+ return result
+
+ def stop_ryubgp(self, check_running=True, retry=False):
+ if check_running:
+ if not self.is_running_ryu():
+ return True
+ result = False
+ if retry:
+ try_times = 3
+ else:
+ try_times = 1
+ for _ in range(try_times):
+ cmd = '/usr/bin/pkill ryu-manager -SIGTERM'
+ self.exec_on_ctn(cmd)
+ if not self.is_running_ryu():
+ result = True
+ break
+ time.sleep(1)
+ return result
+
+ def run(self, wait=False, w_time=WAIT_FOR_BOOT):
+ w_time = super(RyuBGPContainer,
+ self).run(wait=wait, w_time=self.WAIT_FOR_BOOT)
+ return w_time
+
+ def reload_config(self):
+ self.stop_ryubgp(retry=True)
+ self.start_ryubgp(retry=True)
diff --git a/tests/integrated/run_test.py b/tests/integrated/run_test.py
new file mode 100644
index 00000000..77d39345
--- /dev/null
+++ b/tests/integrated/run_test.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
+# Copyright (C) 2016 Fumihiko Kakuma <kakuma at valinux co jp>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+import os
+import sys
+import unittest
+
+from ryu import log
+
+
+def load_tests(loader, tests, pattern):
+ dirname = os.path.dirname(os.path.abspath(__file__))
+ base_path = os.path.abspath(dirname + '/../..')
+ suite = unittest.TestSuite()
+ for test_dir in ['tests/integrated/bgp']:
+ if not pattern:
+ suite.addTests(loader.discover(test_dir,
+ top_level_dir=base_path))
+ else:
+ suite.addTests(loader.discover(test_dir, pattern=pattern,
+ top_level_dir=base_path))
+ return suite
+
+
+if __name__ == '__main__':
+ log.early_init_log(logging.DEBUG)
+ log.init_log()
+ LOG = logging.getLogger(__name__)
+ pattern = None
+ if len(sys.argv) == 2:
+ pattern = sys.argv[1]
+ loader = unittest.defaultTestLoader
+ suite = load_tests(loader, None, pattern)
+ res = unittest.TextTestRunner(verbosity=2).run(suite)
+ ret = 0
+ if res.errors or res.failures:
+ ret = 1
+ sys.exit(ret)
diff --git a/tests/integrated/run_tests_with_ovs12.py b/tests/integrated/run_tests_with_ovs12.py
new file mode 100755
index 00000000..b6393c43
--- /dev/null
+++ b/tests/integrated/run_tests_with_ovs12.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import unittest
+from nose.tools import ok_, eq_, timed, nottest
+
+from subprocess import Popen, PIPE, STDOUT
+import time
+
+from mininet.net import Mininet
+from mininet.node import RemoteController, OVSKernelSwitch
+
+TIMEOUT = 60
+RYU_HOST = '127.0.0.1'
+RYU_PORT = 6633
+PYTHON_BIN = '.venv/bin/python'
+RYU_MGR = './bin/ryu-manager'
+
+
+class OVS12KernelSwitch(OVSKernelSwitch):
+ """Set protocols parameter for OVS version 1.10"""
+ def start(self, controllers):
+ super(OVS12KernelSwitch, self).start(controllers)
+ self.cmd('ovs-vsctl set Bridge', self,
+ "protocols='[OpenFlow10, OpenFlow12]'")
+
+
+class TestWithOVS12(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.mn = Mininet()
+ c = cls.mn.addController(controller=RemoteController,
+ ip=RYU_HOST, port=RYU_PORT)
+ c.start()
+
+ s1 = cls.mn.addSwitch('s1', cls=OVS12KernelSwitch)
+ s1.start(cls.mn.controllers)
+
+ h1 = cls.mn.addHost('h1', ip='0.0.0.0/0')
+
+ link = cls.mn.addLink(h1, s1)
+ s1.attach(link.intf2)
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.mn.stop()
+
+ @timed(TIMEOUT)
+ def test_add_flow_v10(self):
+ app = 'test/integrated/test_add_flow_v10.py'
+ self._run_ryu_manager_and_check_output(app)
+
+ @timed(TIMEOUT)
+ def test_request_reply_v12(self):
+ app = 'test/integrated/test_request_reply_v12.py'
+ self._run_ryu_manager_and_check_output(app)
+
+ @timed(TIMEOUT)
+ def test_add_flow_v12_actions(self):
+ app = 'test/integrated/test_add_flow_v12_actions.py'
+ self._run_ryu_manager_and_check_output(app)
+
+ @timed(TIMEOUT)
+ def test_add_flow_v12_matches(self):
+ app = 'test/integrated/test_add_flow_v12_matches.py'
+ self._run_ryu_manager_and_check_output(app)
+
+ @nottest
+ def test_of_config(self):
+ # OVS 1.10 does not support of_config
+ pass
+
+ def _run_ryu_manager_and_check_output(self, app):
+ cmd = [PYTHON_BIN, RYU_MGR, app]
+ p = Popen(cmd, stdout=PIPE, stderr=STDOUT)
+
+ while True:
+ if p.poll() is not None:
+ raise Exception('Another ryu-manager already running?')
+
+ line = p.stdout.readline().strip()
+ if line == '':
+ time.sleep(1)
+ continue
+
+ print("ryu-manager: %s" % line)
+ if line.find('TEST_FINISHED') != -1:
+ ok_(line.find('Completed=[True]') != -1)
+ p.terminate()
+ p.communicate() # wait for subprocess is terminated
+ break
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/integrated/test_add_flow_v10.py b/tests/integrated/test_add_flow_v10.py
new file mode 100644
index 00000000..895cf15d
--- /dev/null
+++ b/tests/integrated/test_add_flow_v10.py
@@ -0,0 +1,258 @@
+# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+import logging
+
+from ryu.ofproto import ofproto_v1_0
+from ryu.ofproto import ether
+from ryu.ofproto import nx_match
+
+from tests.integrated import tester
+
+LOG = logging.getLogger(__name__)
+
+
+class RunTest(tester.TestFlowBase):
+ """ Test case for add flows of1.0
+ """
+ OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION]
+
+ def __init__(self, *args, **kwargs):
+ super(RunTest, self).__init__(*args, **kwargs)
+ self._verify = []
+
+ def add_action(self, dp, action):
+ rule = nx_match.ClsRule()
+ self.send_flow_mod(
+ dp, rule, 0, dp.ofproto.OFPFC_ADD, 0, 0, None,
+ 0xffffffff, None, dp.ofproto.OFPFF_SEND_FLOW_REM, action)
+
+ def add_rule(self, dp, rule):
+ self.send_flow_mod(
+ dp, rule, 0, dp.ofproto.OFPFC_ADD, 0, 0, None,
+ 0xffffffff, None, dp.ofproto.OFPFF_SEND_FLOW_REM, [])
+
+ def send_flow_mod(self, dp, rule, cookie, command, idle_timeout,
+ hard_timeout, priority=None, buffer_id=0xffffffff,
+ out_port=None, flags=0, actions=None):
+
+ if priority is None:
+ priority = dp.ofproto.OFP_DEFAULT_PRIORITY
+ if out_port is None:
+ out_port = dp.ofproto.OFPP_NONE
+
+ match_tuple = rule.match_tuple()
+ match = dp.ofproto_parser.OFPMatch(*match_tuple)
+
+ m = dp.ofproto_parser.OFPFlowMod(
+ dp, match, cookie, command, idle_timeout, hard_timeout,
+ priority, buffer_id, out_port, flags, actions)
+
+ dp.send_msg(m)
+
+ def _verify_action(self, actions, type_, name, value):
+ try:
+ action = actions[0]
+ if action.cls_action_type != type_:
+ return "Action type error. send:%s, val:%s" \
+ % (type_, action.cls_action_type)
+ except IndexError:
+ return "Action is not setting."
+
+ f_value = None
+ if name:
+ try:
+ if isinstance(name, list):
+ f_value = [getattr(action, n) for n in name]
+ else:
+ f_value = getattr(action, name)
+ except AttributeError:
+ pass
+
+ if f_value != value:
+ return "Value error. send:%s=%s val:%s" \
+ % (name, value, f_value)
+ return True
+
+ def _verify_rule(self, rule, name, value):
+ f_value = getattr(rule, name)
+ if f_value != value:
+ return "Value error. send:%s=%s val:%s" \
+ % (name, value, f_value)
+ return True
+
+ def verify_default(self, dp, stats):
+ verify = self._verify
+ self._verify = []
+ match = stats[0].match
+ actions = stats[0].actions
+
+ if len(verify) == 2:
+ return self._verify_rule(match, *verify)
+ elif len(verify) == 3:
+ return self._verify_action(actions, *verify)
+ else:
+ return "self._verify is invalid."
+
+ # Test of Actions
+ def test_action_output(self, dp):
+ out_port = 2
+ self._verify = [dp.ofproto.OFPAT_OUTPUT,
+ 'port', out_port]
+ action = dp.ofproto_parser.OFPActionOutput(out_port)
+ self.add_action(dp, [action, ])
+
+ def test_action_vlan_vid(self, dp):
+ vlan_vid = 2
+ self._verify = [dp.ofproto.OFPAT_SET_VLAN_VID,
+ 'vlan_vid', vlan_vid]
+ action = dp.ofproto_parser.OFPActionVlanVid(vlan_vid)
+ self.add_action(dp, [action, ])
+
+ def test_action_vlan_pcp(self, dp):
+ vlan_pcp = 4
+ self._verify = [dp.ofproto.OFPAT_SET_VLAN_PCP,
+ 'vlan_pcp', vlan_pcp]
+ action = dp.ofproto_parser.OFPActionVlanPcp(vlan_pcp)
+ self.add_action(dp, [action, ])
+
+ def test_action_strip_vlan(self, dp):
+ vlan_pcp = 4
+ self._verify = [dp.ofproto.OFPAT_STRIP_VLAN,
+ None, None]
+ action = dp.ofproto_parser.OFPActionStripVlan()
+ self.add_action(dp, [action, ])
+
+ def test_action_set_dl_src(self, dp):
+ dl_src = '56:b3:42:04:b2:7a'
+ dl_src_bin = self.haddr_to_bin(dl_src)
+ self._verify = [dp.ofproto.OFPAT_SET_DL_SRC,
+ 'dl_addr', dl_src_bin]
+ action = dp.ofproto_parser.OFPActionSetDlSrc(dl_src_bin)
+ self.add_action(dp, [action, ])
+
+ def test_action_set_dl_dst(self, dp):
+ dl_dst = 'c2:93:a2:fb:d0:f4'
+ dl_dst_bin = self.haddr_to_bin(dl_dst)
+ self._verify = [dp.ofproto.OFPAT_SET_DL_DST,
+ 'dl_addr', dl_dst_bin]
+ action = dp.ofproto_parser.OFPActionSetDlDst(dl_dst_bin)
+ self.add_action(dp, [action, ])
+
+ def test_action_set_nw_src(self, dp):
+ nw_src = '216.132.81.105'
+ nw_src_int = self.ipv4_to_int(nw_src)
+ self._verify = [dp.ofproto.OFPAT_SET_NW_SRC,
+ 'nw_addr', nw_src_int]
+ action = dp.ofproto_parser.OFPActionSetNwSrc(nw_src_int)
+ self.add_action(dp, [action, ])
+
+ def test_action_set_nw_dst(self, dp):
+ nw_dst = '223.201.206.3'
+ nw_dst_int = self.ipv4_to_int(nw_dst)
+ self._verify = [dp.ofproto.OFPAT_SET_NW_DST,
+ 'nw_addr', nw_dst_int]
+ action = dp.ofproto_parser.OFPActionSetNwDst(nw_dst_int)
+ self.add_action(dp, [action, ])
+
+ def test_action_set_nw_tos(self, dp):
+ # lowest two bits must be zero
+ nw_tos = 1 << 2
+ self._verify = [dp.ofproto.OFPAT_SET_NW_TOS,
+ 'tos', nw_tos]
+ action = dp.ofproto_parser.OFPActionSetNwTos(nw_tos)
+ self.add_action(dp, [action, ])
+
+ def test_action_set_tp_src(self, dp):
+ tp_src = 55420
+ self._verify = [dp.ofproto.OFPAT_SET_TP_SRC,
+ 'tp', tp_src]
+ action = dp.ofproto_parser.OFPActionSetTpSrc(tp_src)
+ self.add_action(dp, [action, ])
+
+ def test_action_set_tp_dst(self, dp):
+ tp_dst = 15430
+ self._verify = [dp.ofproto.OFPAT_SET_TP_DST,
+ 'tp', tp_dst]
+ action = dp.ofproto_parser.OFPActionSetTpDst(tp_dst)
+ self.add_action(dp, [action, ])
+
+ def test_action_enqueue(self, dp):
+ port = 207
+ queue_id = 4287508753
+ self._verify = [dp.ofproto.OFPAT_ENQUEUE,
+ ['port', 'queue_id'], [port, queue_id]]
+ action = dp.ofproto_parser.OFPActionEnqueue(port, queue_id)
+ self.add_action(dp, [action, ])
+
+ # Test of Rules
+ def test_rule_set_in_port(self, dp):
+ in_port = 32
+ self._verify = ['in_port', in_port]
+
+ rule = nx_match.ClsRule()
+ rule.set_in_port(in_port)
+ self.add_rule(dp, rule)
+
+ def test_rule_set_dl_src(self, dp):
+ dl_src = 'b8:a1:94:51:78:83'
+ dl_src_bin = self.haddr_to_bin(dl_src)
+ self._verify = ['dl_src', dl_src_bin]
+
+ rule = nx_match.ClsRule()
+ rule.set_dl_src(dl_src_bin)
+ self.add_rule(dp, rule)
+
+ def test_rule_set_dl_type_ip(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ self._verify = ['dl_type', dl_type]
+
+ rule = nx_match.ClsRule()
+ rule.set_dl_type(dl_type)
+ self.add_rule(dp, rule)
+
+ def test_rule_set_dl_type_arp(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ self._verify = ['dl_type', dl_type]
+
+ rule = nx_match.ClsRule()
+ rule.set_dl_type(dl_type)
+ self.add_rule(dp, rule)
+
+ def test_rule_set_dl_type_vlan(self, dp):
+ dl_type = ether.ETH_TYPE_8021Q
+ self._verify = ['dl_type', dl_type]
+
+ rule = nx_match.ClsRule()
+ rule.set_dl_type(dl_type)
+ self.add_rule(dp, rule)
+
+ def test_rule_set_dl_type_ipv6(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ self._verify = ['dl_type', dl_type]
+
+ rule = nx_match.ClsRule()
+ rule.set_dl_type(dl_type)
+ self.add_rule(dp, rule)
+
+ def test_rule_set_dl_type_lacp(self, dp):
+ dl_type = ether.ETH_TYPE_SLOW
+ self._verify = ['dl_type', dl_type]
+
+ rule = nx_match.ClsRule()
+ rule.set_dl_type(dl_type)
+ self.add_rule(dp, rule)
diff --git a/tests/integrated/test_add_flow_v12_actions.py b/tests/integrated/test_add_flow_v12_actions.py
new file mode 100644
index 00000000..3c1625ad
--- /dev/null
+++ b/tests/integrated/test_add_flow_v12_actions.py
@@ -0,0 +1,496 @@
+# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+import logging
+
+from ryu.ofproto import ofproto_v1_2
+from ryu.ofproto import ether
+from ryu.ofproto import inet
+
+from tests.integrated import tester
+
+LOG = logging.getLogger(__name__)
+
+
+class RunTest(tester.TestFlowBase):
+ """ Test case for add flows of Actions
+ """
+ OFP_VERSIONS = [ofproto_v1_2.OFP_VERSION]
+
+ def __init__(self, *args, **kwargs):
+ super(RunTest, self).__init__(*args, **kwargs)
+
+ self._verify = []
+
+ def add_apply_actions(self, dp, actions, match=None):
+ inst = [dp.ofproto_parser.OFPInstructionActions(
+ dp.ofproto.OFPIT_APPLY_ACTIONS, actions)]
+ if match is None:
+ match = dp.ofproto_parser.OFPMatch()
+ m = dp.ofproto_parser.OFPFlowMod(dp, 0, 0, 0,
+ dp.ofproto.OFPFC_ADD,
+ 0, 0, 0xff, 0xffffffff,
+ dp.ofproto.OFPP_ANY,
+ dp.ofproto.OFPG_ANY,
+ 0, match, inst)
+ dp.send_msg(m)
+
+ def add_set_field_action(self, dp, field, value, match=None):
+ self._verify = [dp.ofproto.OFPAT_SET_FIELD,
+ 'field', field, value]
+ f = dp.ofproto_parser.OFPMatchField.make(field, value)
+ actions = [dp.ofproto_parser.OFPActionSetField(f), ]
+ self.add_apply_actions(dp, actions, match=match)
+
+ def verify_default(self, dp, stats):
+ verify = self._verify
+ self._verify = []
+
+ type_ = name = field = value = None
+ if len(verify) == 1:
+ (type_, ) = verify
+ elif len(verify) == 3:
+ (type_, name, value) = verify
+ elif len(verify) == 4:
+ (type_, name, field, value) = verify
+ else:
+ return "self._verify is invalid."
+
+ try:
+ action = stats[0].instructions[0].actions[0]
+ if action.cls_action_type != type_:
+ return "Action type error. send:%s, val:%s" \
+ % (type_, action.cls_action_type)
+ except IndexError:
+ return "Action is not setting."
+
+ s_val = None
+ if name:
+ try:
+ s_val = getattr(action, name)
+ except AttributeError:
+ pass
+
+ if name == 'field':
+ if s_val.header != field:
+ return "Field error. send:%s val:%s" \
+ % (field, s_val.header)
+ s_val = s_val.value
+
+ if name and s_val != value:
+ return "Value error. send:%s=%s val:%s" \
+ % (name, value, s_val)
+
+ return True
+
+ def verify_action_drop(self, dp, stats):
+ for s in stats:
+ for i in s.instructions:
+ if len(i.actions):
+ return "has actions. %s" % (i.actions)
+ return True
+
+ # Test of General Actions
+ def test_action_output(self, dp):
+ out_port = 255
+ self._verify = [dp.ofproto.OFPAT_OUTPUT,
+ 'port', out_port]
+
+ actions = [dp.ofproto_parser.OFPActionOutput(out_port, 0), ]
+ self.add_apply_actions(dp, actions)
+
+ def test_action_drop(self, dp):
+ self.add_apply_actions(dp, [])
+
+ # Test of Push-Tag/Pop-Tag Actions
+ def test_action_push_vlan(self, dp):
+ ethertype = ether.ETH_TYPE_8021Q
+ self._verify = [dp.ofproto.OFPAT_PUSH_VLAN,
+ 'ethertype', ethertype]
+
+ actions = [dp.ofproto_parser.OFPActionPushVlan(ethertype)]
+ self.add_apply_actions(dp, actions)
+
+ def test_action_pop_vlan(self, dp):
+ self._verify = [dp.ofproto.OFPAT_POP_VLAN, ]
+
+ actions = [dp.ofproto_parser.OFPActionPopVlan(), ]
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_vlan_vid(1)
+ self.add_apply_actions(dp, actions, match)
+
+ def test_action_push_mpls(self, dp):
+ ethertype = ether.ETH_TYPE_MPLS
+ self._verify = [dp.ofproto.OFPAT_PUSH_MPLS,
+ 'ethertype', ethertype]
+
+ actions = [dp.ofproto_parser.OFPActionPushMpls(ethertype), ]
+ self.add_apply_actions(dp, actions)
+
+ def test_action_pop_mpls(self, dp):
+ ethertype = ether.ETH_TYPE_8021Q
+ self._verify = [dp.ofproto.OFPAT_POP_MPLS,
+ 'ethertype', ethertype]
+ actions = [dp.ofproto_parser.OFPActionPopMpls(ethertype), ]
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_MPLS)
+ self.add_apply_actions(dp, actions, match)
+
+ # Test of Set-Filed Actions
+ def test_action_set_field_dl_dst(self, dp):
+ field = dp.ofproto.OXM_OF_ETH_DST
+ dl_dst = 'e2:7a:09:79:0b:0f'
+ value = self.haddr_to_bin(dl_dst)
+
+ self.add_set_field_action(dp, field, value)
+
+ def test_action_set_field_dl_src(self, dp):
+ field = dp.ofproto.OXM_OF_ETH_SRC
+ dl_src = '08:82:63:b6:62:05'
+ value = self.haddr_to_bin(dl_src)
+
+ self.add_set_field_action(dp, field, value)
+
+ def test_action_set_field_dl_type(self, dp):
+ field = dp.ofproto.OXM_OF_ETH_TYPE
+ value = ether.ETH_TYPE_IPV6
+
+ self.add_set_field_action(dp, field, value)
+
+ def test_action_set_field_vlan_vid(self, dp):
+ field = dp.ofproto.OXM_OF_VLAN_VID
+ value = 0x1e4
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_vlan_vid(1)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_vlan_pcp(self, dp):
+ field = dp.ofproto.OXM_OF_VLAN_PCP
+ value = 3
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_vlan_vid(1)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_nw_dscp(self, dp):
+ field = dp.ofproto.OXM_OF_IP_DSCP
+ value = 32
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_IP)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_nw_ecn(self, dp):
+ field = dp.ofproto.OXM_OF_IP_ECN
+ value = 1
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_IP)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_ip_proto(self, dp):
+ field = dp.ofproto.OXM_OF_IP_PROTO
+ value = inet.IPPROTO_TCP
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_IP)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_ipv4_src(self, dp):
+ field = dp.ofproto.OXM_OF_IPV4_SRC
+ ipv4_src = '192.168.3.92'
+ value = self.ipv4_to_int(ipv4_src)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_IP)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_ipv4_dst(self, dp):
+ field = dp.ofproto.OXM_OF_IPV4_DST
+ ipv4_dst = '192.168.74.122'
+ value = self.ipv4_to_int(ipv4_dst)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_IP)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_tcp_src(self, dp):
+ field = dp.ofproto.OXM_OF_TCP_SRC
+ value = 105
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_IP)
+ match.set_ip_proto(inet.IPPROTO_TCP)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_tcp_dst(self, dp):
+ field = dp.ofproto.OXM_OF_TCP_DST
+ value = 75
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_IP)
+ match.set_ip_proto(inet.IPPROTO_TCP)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_udp_src(self, dp):
+ field = dp.ofproto.OXM_OF_UDP_SRC
+ value = 197
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_IP)
+ match.set_ip_proto(inet.IPPROTO_UDP)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_udp_dst(self, dp):
+ field = dp.ofproto.OXM_OF_UDP_DST
+ value = 17
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_IP)
+ match.set_ip_proto(inet.IPPROTO_UDP)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_icmpv4_type(self, dp):
+ field = dp.ofproto.OXM_OF_ICMPV4_TYPE
+ value = 8
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_ip_proto(inet.IPPROTO_ICMP)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_icmpv4_code(self, dp):
+ field = dp.ofproto.OXM_OF_ICMPV4_CODE
+ value = 2
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_ip_proto(inet.IPPROTO_ICMP)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_arp_op(self, dp):
+ field = dp.ofproto.OXM_OF_ARP_OP
+ value = 2
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_ARP)
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_arp_spa(self, dp):
+ field = dp.ofproto.OXM_OF_ARP_SPA
+ nw_src = '192.168.132.179'
+ value = self.ipv4_to_int(nw_src)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_ARP)
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_arp_tpa(self, dp):
+ field = dp.ofproto.OXM_OF_ARP_TPA
+ nw_dst = '192.168.118.85'
+ value = self.ipv4_to_int(nw_dst)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_ARP)
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_arp_sha(self, dp):
+ field = dp.ofproto.OXM_OF_ARP_SHA
+ arp_sha = '50:29:e7:7f:6c:7f'
+ value = self.haddr_to_bin(arp_sha)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_ARP)
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_arp_tha(self, dp):
+ field = dp.ofproto.OXM_OF_ARP_THA
+ arp_tha = '71:c8:72:2f:47:fd'
+ value = self.haddr_to_bin(arp_tha)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_ARP)
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_ipv6_src(self, dp):
+ field = dp.ofproto.OXM_OF_IPV6_SRC
+ ipv6_src = '7527:c798:c772:4a18:117a:14ff:c1b6:e4ef'
+ value = self.ipv6_to_int(ipv6_src)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(0x86dd)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_ipv6_dst(self, dp):
+ field = dp.ofproto.OXM_OF_IPV6_DST
+ ipv6_dst = '8893:65b3:6b49:3bdb:3d2:9401:866c:c96'
+ value = self.ipv6_to_int(ipv6_dst)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(0x86dd)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_ipv6_flabel(self, dp):
+ field = dp.ofproto.OXM_OF_IPV6_FLABEL
+ value = 0x2c12
+
+ self.add_set_field_action(dp, field, value)
+
+ def test_action_set_field_icmpv6_type(self, dp):
+ field = dp.ofproto.OXM_OF_ICMPV6_TYPE
+ value = 129
+
+ self.add_set_field_action(dp, field, value)
+
+ def test_action_set_field_icmpv6_code(self, dp):
+ field = dp.ofproto.OXM_OF_ICMPV6_CODE
+ value = 2
+
+ self.add_set_field_action(dp, field, value)
+
+ def test_action_set_field_ipv6_nd_target(self, dp):
+ field = dp.ofproto.OXM_OF_IPV6_ND_TARGET
+ target = "5420:db3f:921b:3e33:2791:98f:dd7f:2e19"
+ value = self.ipv6_to_int(target)
+
+ self.add_set_field_action(dp, field, value)
+
+ def test_action_set_field_ipv6_nd_sll(self, dp):
+ field = dp.ofproto.OXM_OF_IPV6_ND_SLL
+ sll = "54:db:3f:3e:27:19"
+ value = self.haddr_to_bin(sll)
+
+ self.add_set_field_action(dp, field, value)
+
+ def test_action_set_field_ipv6_nd_tll(self, dp):
+ field = dp.ofproto.OXM_OF_IPV6_ND_TLL
+ tll = "83:13:48:1e:d0:b0"
+ value = self.haddr_to_bin(tll)
+
+ self.add_set_field_action(dp, field, value)
+
+ def test_action_set_field_mpls_label(self, dp):
+ field = dp.ofproto.OXM_OF_MPLS_LABEL
+ value = 0x4c
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_MPLS)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ def test_action_set_field_mpls_tc(self, dp):
+ field = dp.ofproto.OXM_OF_MPLS_TC
+ value = 0b101
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_MPLS)
+
+ self.add_set_field_action(dp, field, value, match)
+
+ # Test of Change-TTL Actions
+ def test_action_set_mpls_ttl(self, dp):
+ mpls_ttl = 8
+ self._verify = [dp.ofproto.OFPAT_SET_MPLS_TTL,
+ 'mpls_ttl', mpls_ttl]
+ actions = [dp.ofproto_parser.OFPActionSetMplsTtl(mpls_ttl), ]
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_MPLS)
+ self.add_apply_actions(dp, actions, match)
+
+ def test_action_dec_mpls_ttl(self, dp):
+ self._verify = [dp.ofproto.OFPAT_DEC_MPLS_TTL]
+ actions = [dp.ofproto_parser.OFPActionDecMplsTtl(), ]
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(ether.ETH_TYPE_MPLS)
+ self.add_apply_actions(dp, actions, match)
+
+ def test_action_set_nw_ttl_ipv4(self, dp):
+ nw_ttl = 64
+ self._verify = [dp.ofproto.OFPAT_SET_NW_TTL,
+ 'nw_ttl', nw_ttl]
+ actions = [dp.ofproto_parser.OFPActionSetNwTtl(nw_ttl), ]
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(0x0800)
+ self.add_apply_actions(dp, actions, match)
+
+ def test_action_set_nw_ttl_ipv6(self, dp):
+ nw_ttl = 64
+ self._verify = [dp.ofproto.OFPAT_SET_NW_TTL,
+ 'nw_ttl', nw_ttl]
+ actions = [dp.ofproto_parser.OFPActionSetNwTtl(nw_ttl), ]
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(0x86dd)
+ self.add_apply_actions(dp, actions, match)
+
+ def test_action_dec_nw_ttl_ipv4(self, dp):
+ self._verify = [dp.ofproto.OFPAT_DEC_NW_TTL]
+ actions = [dp.ofproto_parser.OFPActionDecNwTtl(), ]
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(0x0800)
+ self.add_apply_actions(dp, actions, match)
+
+ def test_action_dec_nw_ttl_ipv6(self, dp):
+ self._verify = [dp.ofproto.OFPAT_DEC_NW_TTL]
+ actions = [dp.ofproto_parser.OFPActionDecNwTtl(), ]
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(0x86dd)
+ self.add_apply_actions(dp, actions, match)
+
+ def test_action_copy_ttl_out(self, dp):
+ self._verify = [dp.ofproto.OFPAT_COPY_TTL_OUT]
+ actions = [dp.ofproto_parser.OFPActionCopyTtlOut(), ]
+ self.add_apply_actions(dp, actions)
+
+ def test_action_copy_ttl_in(self, dp):
+ self._verify = [dp.ofproto.OFPAT_COPY_TTL_IN]
+ actions = [dp.ofproto_parser.OFPActionCopyTtlIn(), ]
+ self.add_apply_actions(dp, actions)
+
+ def is_supported(self, t):
+ # Open vSwitch 1.10 does not support MPLS yet.
+ unsupported = [
+ 'test_action_set_field_ip_proto',
+ 'test_action_set_field_dl_type',
+ 'test_action_set_field_icmp',
+ 'test_action_set_field_icmpv6_code',
+ 'test_action_set_field_icmpv6_type',
+ 'test_action_set_field_ipv6_flabel',
+ 'test_action_set_field_ipv6_nd_sll',
+ 'test_action_set_field_ipv6_nd_target',
+ 'test_action_set_field_ipv6_nd_tll',
+ 'test_action_copy_ttl_in',
+ 'test_action_copy_ttl_out'
+ ]
+ for u in unsupported:
+ if t.find(u) != -1:
+ return False
+
+ return True
diff --git a/tests/integrated/test_add_flow_v12_matches.py b/tests/integrated/test_add_flow_v12_matches.py
new file mode 100644
index 00000000..404e45ef
--- /dev/null
+++ b/tests/integrated/test_add_flow_v12_matches.py
@@ -0,0 +1,1200 @@
+# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+import logging
+
+from ryu.ofproto import ofproto_v1_2
+from ryu.ofproto import ether
+from ryu.ofproto import inet
+
+from tests.integrated import tester
+
+LOG = logging.getLogger(__name__)
+
+
+class RunTest(tester.TestFlowBase):
+ """ Test case for add flows of Matches
+ """
+ OFP_VERSIONS = [ofproto_v1_2.OFP_VERSION]
+
+ def __init__(self, *args, **kwargs):
+ super(RunTest, self).__init__(*args, **kwargs)
+
+ self._verify = {}
+
+ def add_matches(self, dp, match):
+ m = dp.ofproto_parser.OFPFlowMod(dp, 0, 0, 0,
+ dp.ofproto.OFPFC_ADD,
+ 0, 0, 0, 0xffffffff,
+ dp.ofproto.OFPP_ANY,
+ 0xffffffff, 0, match, [])
+ dp.send_msg(m)
+
+ def _set_verify(self, headers, value, mask=None,
+ all_bits_masked=False, type_='int'):
+ self._verify = {}
+ self._verify['headers'] = headers
+ self._verify['value'] = value
+ self._verify['mask'] = mask
+ self._verify['all_bits_masked'] = all_bits_masked
+ self._verify['type'] = type_
+
+ def verify_default(self, dp, stats):
+ type_ = self._verify['type']
+ headers = self._verify['headers']
+ value = self._verify['value']
+ mask = self._verify['mask']
+ value_masked = self._masked(type_, value, mask)
+ all_bits_masked = self._verify['all_bits_masked']
+
+ field = None
+ for s in stats:
+ for f in s.match.fields:
+ if f.header in headers:
+ field = f
+ break
+
+ if field is None:
+ if self._is_all_zero_bit(type_, mask):
+ return True
+ return 'Field not found.'
+
+ f_value = field.value
+ if hasattr(field, 'mask'):
+ f_mask = field.mask
+ else:
+ f_mask = None
+
+ if (f_value == value) or (f_value == value_masked):
+ if (f_mask == mask) or (all_bits_masked and f_mask is None):
+ return True
+
+ return "send: %s/%s, reply: %s/%s" \
+ % (self._cnv_to_str(type_, value, mask, f_value, f_mask))
+
+ def _masked(self, type_, value, mask):
+ if mask is None:
+ v = value
+ elif type_ == 'int':
+ v = value & mask
+ elif type_ == 'mac':
+ v = self.haddr_masked(value, mask)
+ elif type_ == 'ipv4':
+ v = self.ipv4_masked(value, mask)
+ elif type_ == 'ipv6':
+ v = self.ipv6_masked(value, mask)
+ else:
+ raise Exception('Unknown type')
+ return v
+
+ def _is_all_zero_bit(self, type_, val):
+ if type_ == 'int' or type_ == 'ipv4':
+ return val == 0
+ elif type_ == 'mac':
+ for v in val:
+ if v != b'\x00':
+ return False
+ return True
+ elif type_ == 'ipv6':
+ for v in val:
+ if v != 0:
+ return False
+ return True
+ else:
+ raise Exception('Unknown type')
+
+ def _cnv_to_str(self, type_, value, mask, f_value, f_mask):
+ func = None
+ if type_ == 'int':
+ pass
+ elif type_ == 'mac':
+ func = self.haddr_to_str
+ elif type_ == 'ipv4':
+ func = self.ipv4_to_str
+ elif type_ == 'ipv6':
+ func = self.ipv6_to_str
+ else:
+ raise Exception('Unknown type')
+
+ if func:
+ value = func(value)
+ f_value = func(f_value)
+ if mask:
+ mask = func(mask)
+ if f_mask:
+ f_mask = func(f_mask)
+
+ return value, mask, f_value, f_mask
+
+ def test_rule_set_dl_dst(self, dp):
+ dl_dst = 'e2:7a:09:79:0b:0f'
+ dl_dst_bin = self.haddr_to_bin(dl_dst)
+
+ headers = [dp.ofproto.OXM_OF_ETH_DST, dp.ofproto.OXM_OF_ETH_DST_W]
+ self._set_verify(headers, dl_dst_bin, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_dst(dl_dst_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_dl_dst_masked_ff(self, dp):
+ dl_dst = 'd0:98:79:b4:75:b5'
+ dl_dst_bin = self.haddr_to_bin(dl_dst)
+ mask = 'ff:ff:ff:ff:ff:ff'
+ mask_bin = self.haddr_to_bin(mask)
+
+ headers = [dp.ofproto.OXM_OF_ETH_DST, dp.ofproto.OXM_OF_ETH_DST_W]
+ self._set_verify(headers, dl_dst_bin, mask_bin, True, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_dst_masked(dl_dst_bin, mask_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_dl_dst_masked_f0(self, dp):
+ dl_dst = 'e2:7a:09:79:0b:0f'
+ dl_dst_bin = self.haddr_to_bin(dl_dst)
+ mask = 'ff:ff:ff:ff:ff:00'
+ mask_bin = self.haddr_to_bin(mask)
+
+ headers = [dp.ofproto.OXM_OF_ETH_DST, dp.ofproto.OXM_OF_ETH_DST_W]
+ self._set_verify(headers, dl_dst_bin, mask_bin, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_dst_masked(dl_dst_bin, mask_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_dl_dst_masked_00(self, dp):
+ dl_dst = 'e2:7a:09:79:0b:0f'
+ dl_dst_bin = self.haddr_to_bin(dl_dst)
+ mask = '00:00:00:00:00:00'
+ mask_bin = self.haddr_to_bin(mask)
+
+ headers = [dp.ofproto.OXM_OF_ETH_DST, dp.ofproto.OXM_OF_ETH_DST_W]
+ self._set_verify(headers, dl_dst_bin, mask_bin, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_dst_masked(dl_dst_bin, mask_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_dl_src(self, dp):
+ dl_src = 'e2:7a:09:79:0b:0f'
+ dl_src_bin = self.haddr_to_bin(dl_src)
+
+ headers = [dp.ofproto.OXM_OF_ETH_SRC, dp.ofproto.OXM_OF_ETH_SRC_W]
+ self._set_verify(headers, dl_src_bin, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_src(dl_src_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_dl_src_masked_ff(self, dp):
+ dl_src = 'e2:7a:09:79:0b:0f'
+ dl_src_bin = self.haddr_to_bin(dl_src)
+ mask = 'ff:ff:ff:ff:ff:ff'
+ mask_bin = self.haddr_to_bin(mask)
+
+ headers = [dp.ofproto.OXM_OF_ETH_SRC, dp.ofproto.OXM_OF_ETH_SRC_W]
+ self._set_verify(headers, dl_src_bin, mask_bin, True, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_src_masked(dl_src_bin, mask_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_dl_src_masked_f0(self, dp):
+ dl_src = 'e2:7a:09:79:0b:0f'
+ dl_src_bin = self.haddr_to_bin(dl_src)
+ mask = 'ff:ff:ff:ff:ff:00'
+ mask_bin = self.haddr_to_bin(mask)
+
+ headers = [dp.ofproto.OXM_OF_ETH_SRC, dp.ofproto.OXM_OF_ETH_SRC_W]
+ self._set_verify(headers, dl_src_bin, mask_bin, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_src_masked(dl_src_bin, mask_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_dl_src_masked_00(self, dp):
+ dl_src = 'e2:7a:09:79:0b:0f'
+ dl_src_bin = self.haddr_to_bin(dl_src)
+ mask = '00:00:00:00:00:00'
+ mask_bin = self.haddr_to_bin(mask)
+
+ headers = [dp.ofproto.OXM_OF_ETH_SRC, dp.ofproto.OXM_OF_ETH_SRC_W]
+ self._set_verify(headers, dl_src_bin, mask_bin, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_src_masked(dl_src_bin, mask_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_dl_type_ip(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+
+ headers = [dp.ofproto.OXM_OF_ETH_TYPE]
+ self._set_verify(headers, dl_type)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ self.add_matches(dp, match)
+
+ def test_rule_set_dl_type_arp(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+
+ headers = [dp.ofproto.OXM_OF_ETH_TYPE]
+ self._set_verify(headers, dl_type)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ self.add_matches(dp, match)
+
+ def test_rule_set_dl_type_vlan(self, dp):
+ dl_type = ether.ETH_TYPE_8021Q
+
+ headers = [dp.ofproto.OXM_OF_ETH_TYPE]
+ self._set_verify(headers, dl_type)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ self.add_matches(dp, match)
+
+ def test_rule_set_dl_type_ipv6(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+
+ headers = [dp.ofproto.OXM_OF_ETH_TYPE]
+ self._set_verify(headers, dl_type)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ self.add_matches(dp, match)
+
+ def test_rule_set_dl_type_lacp(self, dp):
+ dl_type = ether.ETH_TYPE_SLOW
+
+ headers = [dp.ofproto.OXM_OF_ETH_TYPE]
+ self._set_verify(headers, dl_type)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ip_dscp(self, dp):
+ ip_dscp = 36
+ dl_type = ether.ETH_TYPE_IP
+
+ headers = [dp.ofproto.OXM_OF_IP_DSCP]
+ self._set_verify(headers, ip_dscp)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_dscp(ip_dscp)
+ self.add_matches(dp, match)
+
+ def test_rule_set_vlan_vid(self, dp):
+ vlan_vid = 0x4ef
+
+ headers = [dp.ofproto.OXM_OF_VLAN_VID, dp.ofproto.OXM_OF_VLAN_VID_W]
+ self._set_verify(headers, vlan_vid)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_vlan_vid(vlan_vid)
+ self.add_matches(dp, match)
+
+ def test_rule_set_vlan_vid_masked_ff(self, dp):
+ vlan_vid = 0x4ef
+ mask = 0xfff
+
+ headers = [dp.ofproto.OXM_OF_VLAN_VID, dp.ofproto.OXM_OF_VLAN_VID_W]
+ self._set_verify(headers, vlan_vid, mask, True)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_vlan_vid_masked(vlan_vid, mask)
+ self.add_matches(dp, match)
+
+ def test_rule_set_vlan_vid_masked_f0(self, dp):
+ vlan_vid = 0x4ef
+ mask = 0xff0
+
+ headers = [dp.ofproto.OXM_OF_VLAN_VID, dp.ofproto.OXM_OF_VLAN_VID_W]
+ self._set_verify(headers, vlan_vid, mask)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_vlan_vid_masked(vlan_vid, mask)
+ self.add_matches(dp, match)
+
+ def test_rule_set_vlan_vid_masked_00(self, dp):
+ vlan_vid = 0x4ef
+ mask = 0x000
+
+ headers = [dp.ofproto.OXM_OF_VLAN_VID, dp.ofproto.OXM_OF_VLAN_VID_W]
+ self._set_verify(headers, vlan_vid, mask)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_vlan_vid_masked(vlan_vid, mask)
+ self.add_matches(dp, match)
+
+ def test_rule_set_vlan_pcp(self, dp):
+ vlan_vid = 0x4ef
+ vlan_pcp = 5
+
+ headers = [dp.ofproto.OXM_OF_VLAN_PCP]
+ self._set_verify(headers, vlan_pcp)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_vlan_vid(vlan_vid)
+ match.set_vlan_pcp(vlan_pcp)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ip_ecn(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ ip_ecn = 3
+
+ headers = [dp.ofproto.OXM_OF_IP_ECN]
+ self._set_verify(headers, ip_ecn)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_ecn(ip_ecn)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ip_proto_icmp(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ ip_proto = inet.IPPROTO_ICMP
+
+ headers = [dp.ofproto.OXM_OF_IP_PROTO]
+ self._set_verify(headers, ip_proto)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ip_proto_tcp(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ ip_proto = inet.IPPROTO_TCP
+
+ headers = [dp.ofproto.OXM_OF_IP_PROTO]
+ self._set_verify(headers, ip_proto)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ip_proto_udp(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ ip_proto = inet.IPPROTO_UDP
+
+ headers = [dp.ofproto.OXM_OF_IP_PROTO]
+ self._set_verify(headers, ip_proto)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ip_proto_ipv6_route(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ip_proto = inet.IPPROTO_ROUTING
+
+ headers = [dp.ofproto.OXM_OF_IP_PROTO]
+ self._set_verify(headers, ip_proto)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ip_proto_ipv6_frag(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ip_proto = inet.IPPROTO_FRAGMENT
+
+ headers = [dp.ofproto.OXM_OF_IP_PROTO]
+ self._set_verify(headers, ip_proto)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ip_proto_ipv6_icmp(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ip_proto = inet.IPPROTO_ICMPV6
+
+ headers = [dp.ofproto.OXM_OF_IP_PROTO]
+ self._set_verify(headers, ip_proto)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ip_proto_ipv6_none(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ip_proto = inet.IPPROTO_NONE
+
+ headers = [dp.ofproto.OXM_OF_IP_PROTO]
+ self._set_verify(headers, ip_proto)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ip_proto_ipv6_dstopts(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ip_proto = inet.IPPROTO_DSTOPTS
+
+ headers = [dp.ofproto.OXM_OF_IP_PROTO]
+ self._set_verify(headers, ip_proto)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv4_src(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ src = '192.168.196.250'
+ src_int = self.ipv4_to_int(src)
+
+ headers = [dp.ofproto.OXM_OF_IPV4_SRC, dp.ofproto.OXM_OF_IPV4_SRC_W]
+ self._set_verify(headers, src_int, type_='ipv4')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv4_src(src_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv4_src_masked_32(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ src = '192.168.196.250'
+ src_int = self.ipv4_to_int(src)
+ mask = '255.255.255.255'
+ mask_int = self.ipv4_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_IPV4_SRC, dp.ofproto.OXM_OF_IPV4_SRC_W]
+ self._set_verify(headers, src_int, mask_int, True, type_='ipv4')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv4_src_masked(src_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv4_src_masked_24(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ src = '192.168.196.250'
+ src_int = self.ipv4_to_int(src)
+ mask = '255.255.255.0'
+ mask_int = self.ipv4_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_IPV4_SRC, dp.ofproto.OXM_OF_IPV4_SRC_W]
+ self._set_verify(headers, src_int, mask_int, type_='ipv4')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv4_src_masked(src_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv4_src_masked_0(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ src = '192.168.196.250'
+ src_int = self.ipv4_to_int(src)
+ mask = '0.0.0.0'
+ mask_int = self.ipv4_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_IPV4_SRC, dp.ofproto.OXM_OF_IPV4_SRC_W]
+ self._set_verify(headers, src_int, mask_int, type_='ipv4')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv4_src_masked(src_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv4_dst(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ dst = '192.168.54.155'
+ dst_int = self.ipv4_to_int(dst)
+
+ headers = [dp.ofproto.OXM_OF_IPV4_DST, dp.ofproto.OXM_OF_IPV4_DST_W]
+ self._set_verify(headers, dst_int, type_='ipv4')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv4_dst(dst_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv4_dst_masked_32(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ dst = '192.168.54.155'
+ dst_int = self.ipv4_to_int(dst)
+ mask = '255.255.255.255'
+ mask_int = self.ipv4_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_IPV4_DST, dp.ofproto.OXM_OF_IPV4_DST_W]
+ self._set_verify(headers, dst_int, mask_int, True, type_='ipv4')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv4_dst_masked(dst_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv4_dst_masked_24(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ dst = '192.168.54.155'
+ dst_int = self.ipv4_to_int(dst)
+ mask = '255.255.255.0'
+ mask_int = self.ipv4_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_IPV4_DST, dp.ofproto.OXM_OF_IPV4_DST_W]
+ self._set_verify(headers, dst_int, mask_int, type_='ipv4')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv4_dst_masked(dst_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv4_dst_masked_0(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ dst = '192.168.54.155'
+ dst_int = self.ipv4_to_int(dst)
+ mask = '0.0.0.0'
+ mask_int = self.ipv4_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_IPV4_DST, dp.ofproto.OXM_OF_IPV4_DST_W]
+ self._set_verify(headers, dst_int, mask_int, type_='ipv4')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv4_dst_masked(dst_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_tcp_src(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ ip_proto = inet.IPPROTO_TCP
+ tp_src = 1103
+
+ headers = [dp.ofproto.OXM_OF_TCP_SRC]
+ self._set_verify(headers, tp_src)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ match.set_tcp_src(tp_src)
+ self.add_matches(dp, match)
+
+ def test_rule_set_tcp_dst(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ ip_proto = inet.IPPROTO_TCP
+ tp_dst = 236
+
+ headers = [dp.ofproto.OXM_OF_TCP_DST]
+ self._set_verify(headers, tp_dst)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ match.set_tcp_dst(tp_dst)
+ self.add_matches(dp, match)
+
+ def test_rule_set_udp_src(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ ip_proto = inet.IPPROTO_UDP
+ tp_src = 56617
+
+ headers = [dp.ofproto.OXM_OF_UDP_SRC]
+ self._set_verify(headers, tp_src)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ match.set_udp_src(tp_src)
+ self.add_matches(dp, match)
+
+ def test_rule_set_udp_dst(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ ip_proto = inet.IPPROTO_UDP
+ tp_dst = 61278
+
+ headers = [dp.ofproto.OXM_OF_UDP_DST]
+ self._set_verify(headers, tp_dst)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ match.set_udp_dst(tp_dst)
+ self.add_matches(dp, match)
+
+ def test_rule_set_icmpv4_type(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ ip_proto = inet.IPPROTO_ICMP
+ icmp_type = 8
+
+ headers = [dp.ofproto.OXM_OF_ICMPV4_TYPE]
+ self._set_verify(headers, icmp_type)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ match.set_icmpv4_type(icmp_type)
+ self.add_matches(dp, match)
+
+ def test_rule_set_icmpv4_code(self, dp):
+ dl_type = ether.ETH_TYPE_IP
+ ip_proto = inet.IPPROTO_ICMP
+ icmp_type = 9
+ icmp_code = 16
+
+ headers = [dp.ofproto.OXM_OF_ICMPV4_CODE]
+ self._set_verify(headers, icmp_code)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ match.set_icmpv4_type(icmp_type)
+ match.set_icmpv4_code(icmp_code)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_opcode(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ arp_op = 1
+
+ headers = [dp.ofproto.OXM_OF_ARP_OP]
+ self._set_verify(headers, arp_op)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_opcode(arp_op)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_spa(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ nw_src = '192.168.222.57'
+ nw_src_int = self.ipv4_to_int(nw_src)
+
+ headers = [dp.ofproto.OXM_OF_ARP_SPA, dp.ofproto.OXM_OF_ARP_SPA_W]
+ self._set_verify(headers, nw_src_int, type_='ipv4')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_spa(nw_src_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_spa_masked_32(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ nw_src = '192.168.222.57'
+ nw_src_int = self.ipv4_to_int(nw_src)
+ mask = '255.255.255.255'
+ mask_int = self.ipv4_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_ARP_SPA, dp.ofproto.OXM_OF_ARP_SPA_W]
+ self._set_verify(headers, nw_src_int, mask_int, True, type_='ipv4')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_spa_masked(nw_src_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_spa_masked_24(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ nw_src = '192.168.222.57'
+ nw_src_int = self.ipv4_to_int(nw_src)
+ mask = '255.255.255.0'
+ mask_int = self.ipv4_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_ARP_SPA, dp.ofproto.OXM_OF_ARP_SPA_W]
+ self._set_verify(headers, nw_src_int, mask_int, type_='ipv4')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_spa_masked(nw_src_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_spa_masked_00(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ nw_src = '192.168.222.57'
+ nw_src_int = self.ipv4_to_int(nw_src)
+ mask = '0.0.0.0'
+ mask_int = self.ipv4_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_ARP_SPA, dp.ofproto.OXM_OF_ARP_SPA_W]
+ self._set_verify(headers, nw_src_int, mask_int, type_='ipv4')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_spa_masked(nw_src_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_tpa(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ nw_dst = '192.168.198.233'
+ nw_dst_int = self.ipv4_to_int(nw_dst)
+
+ headers = [dp.ofproto.OXM_OF_ARP_TPA, dp.ofproto.OXM_OF_ARP_TPA_W]
+ self._set_verify(headers, nw_dst_int, type_='ipv4')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_tpa(nw_dst_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_tpa_masked_32(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ nw_dst = '192.168.198.233'
+ nw_dst_int = self.ipv4_to_int(nw_dst)
+ mask = '255.255.255.255'
+ mask_int = self.ipv4_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_ARP_TPA, dp.ofproto.OXM_OF_ARP_TPA_W]
+ self._set_verify(headers, nw_dst_int, mask_int, True, type_='ipv4')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_tpa_masked(nw_dst_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_tpa_masked_24(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ nw_dst = '192.168.198.233'
+ nw_dst_int = self.ipv4_to_int(nw_dst)
+ mask = '255.255.255.0'
+ mask_int = self.ipv4_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_ARP_TPA, dp.ofproto.OXM_OF_ARP_TPA_W]
+ self._set_verify(headers, nw_dst_int, mask_int, type_='ipv4')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_tpa_masked(nw_dst_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_tpa_masked_00(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ nw_dst = '192.168.198.233'
+ nw_dst_int = self.ipv4_to_int(nw_dst)
+ mask = '0.0.0.0'
+ mask_int = self.ipv4_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_ARP_TPA, dp.ofproto.OXM_OF_ARP_TPA_W]
+ self._set_verify(headers, nw_dst_int, mask_int, type_='ipv4')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_tpa_masked(nw_dst_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_sha(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ arp_sha = '3e:ec:13:9b:f3:0b'
+ arp_sha_bin = self.haddr_to_bin(arp_sha)
+
+ headers = [dp.ofproto.OXM_OF_ARP_SHA, dp.ofproto.OXM_OF_ARP_SHA_W]
+ self._set_verify(headers, arp_sha_bin, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_sha(arp_sha_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_sha_masked_ff(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ arp_sha = '3e:ec:13:9b:f3:0b'
+ arp_sha_bin = self.haddr_to_bin(arp_sha)
+ mask = 'ff:ff:ff:ff:ff:ff'
+ mask_bin = self.haddr_to_bin(mask)
+
+ headers = [dp.ofproto.OXM_OF_ARP_SHA, dp.ofproto.OXM_OF_ARP_SHA_W]
+ self._set_verify(headers, arp_sha_bin, mask_bin, True, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_sha_masked(arp_sha_bin, mask_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_sha_masked_f0(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ arp_sha = '3e:ec:13:9b:f3:0b'
+ arp_sha_bin = self.haddr_to_bin(arp_sha)
+ mask = 'ff:ff:ff:ff:ff:00'
+ mask_bin = self.haddr_to_bin(mask)
+
+ headers = [dp.ofproto.OXM_OF_ARP_SHA, dp.ofproto.OXM_OF_ARP_SHA_W]
+ self._set_verify(headers, arp_sha_bin, mask_bin, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_sha_masked(arp_sha_bin, mask_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_sha_masked_00(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ arp_sha = '3e:ec:13:9b:f3:0b'
+ arp_sha_bin = self.haddr_to_bin(arp_sha)
+ mask = '00:00:00:00:00:00'
+ mask_bin = self.haddr_to_bin(mask)
+
+ headers = [dp.ofproto.OXM_OF_ARP_SHA, dp.ofproto.OXM_OF_ARP_SHA_W]
+ self._set_verify(headers, arp_sha_bin, mask_bin, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_sha_masked(arp_sha_bin, mask_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_tha(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ arp_tha = '83:6c:21:52:49:68'
+ arp_tha_bin = self.haddr_to_bin(arp_tha)
+
+ headers = [dp.ofproto.OXM_OF_ARP_THA, dp.ofproto.OXM_OF_ARP_THA_W]
+ self._set_verify(headers, arp_tha_bin, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_tha(arp_tha_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_tha_masked_ff(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ arp_tha = '83:6c:21:52:49:68'
+ arp_tha_bin = self.haddr_to_bin(arp_tha)
+ mask = 'ff:ff:ff:ff:ff:ff'
+ mask_bin = self.haddr_to_bin(mask)
+
+ headers = [dp.ofproto.OXM_OF_ARP_THA, dp.ofproto.OXM_OF_ARP_THA_W]
+ self._set_verify(headers, arp_tha_bin, mask_bin, True, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_tha_masked(arp_tha_bin, mask_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_tha_masked_f0(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ arp_tha = '83:6c:21:52:49:68'
+ arp_tha_bin = self.haddr_to_bin(arp_tha)
+ mask = 'ff:ff:ff:ff:ff:00'
+ mask_bin = self.haddr_to_bin(mask)
+
+ headers = [dp.ofproto.OXM_OF_ARP_THA, dp.ofproto.OXM_OF_ARP_THA_W]
+ self._set_verify(headers, arp_tha_bin, mask_bin, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_tha_masked(arp_tha_bin, mask_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_arp_tha_masked_00(self, dp):
+ dl_type = ether.ETH_TYPE_ARP
+ arp_tha = '83:6c:21:52:49:68'
+ arp_tha_bin = self.haddr_to_bin(arp_tha)
+ mask = '00:00:00:00:00:00'
+ mask_bin = self.haddr_to_bin(mask)
+
+ headers = [dp.ofproto.OXM_OF_ARP_THA, dp.ofproto.OXM_OF_ARP_THA_W]
+ self._set_verify(headers, arp_tha_bin, mask_bin, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_arp_tha_masked(arp_tha_bin, mask_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv6_src(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ipv6_src = '2001:db8:bd05:1d2:288a:1fc0:1:10ee'
+ ipv6_src_int = self.ipv6_to_int(ipv6_src)
+
+ headers = [dp.ofproto.OXM_OF_IPV6_SRC, dp.ofproto.OXM_OF_IPV6_SRC_W]
+ self._set_verify(headers, ipv6_src_int, type_='ipv6')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv6_src(ipv6_src_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv6_src_masked_ff(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ipv6_src = '2001:db8:bd05:1d2:288a:1fc0:1:10ee'
+ ipv6_src_int = self.ipv6_to_int(ipv6_src)
+ mask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
+ mask_int = self.ipv6_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_IPV6_SRC, dp.ofproto.OXM_OF_IPV6_SRC_W]
+ self._set_verify(headers, ipv6_src_int, mask_int, True, type_='ipv6')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv6_src_masked(ipv6_src_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv6_src_masked_f0(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ipv6_src = '2001:db8:bd05:1d2:288a:1fc0:1:10ee'
+ ipv6_src_int = self.ipv6_to_int(ipv6_src)
+ mask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:0'
+ mask_int = self.ipv6_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_IPV6_SRC, dp.ofproto.OXM_OF_IPV6_SRC_W]
+ self._set_verify(headers, ipv6_src_int, mask_int, type_='ipv6')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv6_src_masked(ipv6_src_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv6_src_masked_00(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ipv6_src = '2001:db8:bd05:1d2:288a:1fc0:1:10ee'
+ ipv6_src_int = self.ipv6_to_int(ipv6_src)
+ mask = '0:0:0:0:0:0:0:0'
+ mask_int = self.ipv6_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_IPV6_SRC, dp.ofproto.OXM_OF_IPV6_SRC_W]
+ self._set_verify(headers, ipv6_src_int, mask_int, type_='ipv6')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv6_src_masked(ipv6_src_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv6_dst(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ipv6_dst = 'e9e8:9ea5:7d67:82cc:ca54:1fc0:2d24:f038'
+ ipv6_dst_int = self.ipv6_to_int(ipv6_dst)
+
+ headers = [dp.ofproto.OXM_OF_IPV6_DST, dp.ofproto.OXM_OF_IPV6_DST_W]
+ self._set_verify(headers, ipv6_dst_int, type_='ipv6')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv6_dst(ipv6_dst_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv6_dst_masked_ff(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ipv6_dst = 'e9e8:9ea5:7d67:82cc:ca54:1fc0:2d24:f038'
+ ipv6_dst_int = self.ipv6_to_int(ipv6_dst)
+ mask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
+ mask_int = self.ipv6_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_IPV6_DST, dp.ofproto.OXM_OF_IPV6_DST_W]
+ self._set_verify(headers, ipv6_dst_int, mask_int, True, type_='ipv6')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv6_dst_masked(ipv6_dst_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv6_dst_masked_f0(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ipv6_dst = 'e9e8:9ea5:7d67:82cc:ca54:1fc0:2d24:f038'
+ ipv6_dst_int = self.ipv6_to_int(ipv6_dst)
+ mask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:0'
+ mask_int = self.ipv6_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_IPV6_DST, dp.ofproto.OXM_OF_IPV6_DST_W]
+ self._set_verify(headers, ipv6_dst_int, mask_int, type_='ipv6')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv6_dst_masked(ipv6_dst_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv6_dst_masked_00(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ipv6_dst = 'e9e8:9ea5:7d67:82cc:ca54:1fc0:2d24:f038'
+ ipv6_dst_int = self.ipv6_to_int(ipv6_dst)
+ mask = '0:0:0:0:0:0:0:0'
+ mask_int = self.ipv6_to_int(mask)
+
+ headers = [dp.ofproto.OXM_OF_IPV6_DST, dp.ofproto.OXM_OF_IPV6_DST_W]
+ self._set_verify(headers, ipv6_dst_int, mask_int, type_='ipv6')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv6_dst_masked(ipv6_dst_int, mask_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv6_flabel(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ipv6_label = 0xc5384
+
+ headers = [dp.ofproto.OXM_OF_IPV6_FLABEL,
+ dp.ofproto.OXM_OF_IPV6_FLABEL_W]
+ self._set_verify(headers, ipv6_label)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv6_flabel(ipv6_label)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv6_flabel_masked_ff(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ipv6_label = 0xc5384
+ mask = 0xfffff
+
+ headers = [dp.ofproto.OXM_OF_IPV6_FLABEL,
+ dp.ofproto.OXM_OF_IPV6_FLABEL_W]
+ self._set_verify(headers, ipv6_label, mask, True)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv6_flabel_masked(ipv6_label, mask)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv6_flabel_masked_f0(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ipv6_label = 0xc5384
+ mask = 0xffff0
+
+ headers = [dp.ofproto.OXM_OF_IPV6_FLABEL,
+ dp.ofproto.OXM_OF_IPV6_FLABEL_W]
+ self._set_verify(headers, ipv6_label, mask)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv6_flabel_masked(ipv6_label, mask)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv6_flabel_masked_00(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ipv6_label = 0xc5384
+ mask = 0x0
+
+ headers = [dp.ofproto.OXM_OF_IPV6_FLABEL,
+ dp.ofproto.OXM_OF_IPV6_FLABEL_W]
+ self._set_verify(headers, ipv6_label, mask)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ipv6_flabel_masked(ipv6_label, mask)
+ self.add_matches(dp, match)
+
+ def test_rule_set_icmpv6_type(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ip_proto = inet.IPPROTO_ICMPV6
+ icmp_type = 129
+
+ headers = [dp.ofproto.OXM_OF_ICMPV6_TYPE]
+ self._set_verify(headers, icmp_type)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ match.set_icmpv6_type(icmp_type)
+ self.add_matches(dp, match)
+
+ def test_rule_set_icmpv6_code(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ip_proto = inet.IPPROTO_ICMPV6
+ icmp_type = 138
+ icmp_code = 1
+
+ headers = [dp.ofproto.OXM_OF_ICMPV6_CODE]
+ self._set_verify(headers, icmp_code)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ match.set_icmpv6_type(icmp_type)
+ match.set_icmpv6_code(icmp_code)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv6_nd_target(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ip_proto = inet.IPPROTO_ICMPV6
+ icmp_type = 135
+ target = "5420:db3f:921b:3e33:2791:98f:dd7f:2e19"
+ target_int = self.ipv6_to_int(target)
+
+ headers = [dp.ofproto.OXM_OF_IPV6_ND_TARGET]
+ self._set_verify(headers, target_int, type_='ipv6')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ match.set_icmpv6_type(icmp_type)
+ match.set_ipv6_nd_target(target_int)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv6_nd_sll(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ip_proto = inet.IPPROTO_ICMPV6
+ icmp_type = 135
+ nd_sll = "93:6d:d0:d4:e8:36"
+ nd_sll_bin = self.haddr_to_bin(nd_sll)
+
+ headers = [dp.ofproto.OXM_OF_IPV6_ND_SLL]
+ self._set_verify(headers, nd_sll_bin, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ match.set_icmpv6_type(icmp_type)
+ match.set_ipv6_nd_sll(nd_sll_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_ipv6_nd_tll(self, dp):
+ dl_type = ether.ETH_TYPE_IPV6
+ ip_proto = inet.IPPROTO_ICMPV6
+ icmp_type = 136
+ nd_tll = "18:f6:66:b6:f1:b3"
+ nd_tll_bin = self.haddr_to_bin(nd_tll)
+
+ headers = [dp.ofproto.OXM_OF_IPV6_ND_TLL]
+ self._set_verify(headers, nd_tll_bin, type_='mac')
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_ip_proto(ip_proto)
+ match.set_icmpv6_type(icmp_type)
+ match.set_ipv6_nd_tll(nd_tll_bin)
+ self.add_matches(dp, match)
+
+ def test_rule_set_mpls_label(self, dp):
+ dl_type = 0x8847
+ label = 2144
+
+ headers = [dp.ofproto.OXM_OF_MPLS_LABEL]
+ self._set_verify(headers, label)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_mpls_label(label)
+ self.add_matches(dp, match)
+
+ def test_rule_set_mpls_tc(self, dp):
+ dl_type = 0x8847
+ tc = 3
+
+ headers = [dp.ofproto.OXM_OF_MPLS_TC]
+ self._set_verify(headers, tc)
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_type(dl_type)
+ match.set_mpls_tc(tc)
+ self.add_matches(dp, match)
+
+ def is_supported(self, t):
+ # Open vSwitch 1.10 does not support MPLS yet.
+ unsupported = [
+ 'test_rule_set_mpls_label',
+ 'test_rule_set_mpls_tc',
+ ]
+ for u in unsupported:
+ if t.find(u) != -1:
+ return False
+
+ return True
diff --git a/tests/integrated/test_of_config.py b/tests/integrated/test_of_config.py
new file mode 100644
index 00000000..654b1489
--- /dev/null
+++ b/tests/integrated/test_of_config.py
@@ -0,0 +1,328 @@
+# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
+# Copyright (C) 2013 Isaku Yamahata <yamahata at private email ne jp>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+How to run this test
+
+edit linc config file. LINC-Switch/rel/linc/releases/1.0/sys.config
+You can find the sample config I used for the test below
+
+For this following config to work, the network interface
+linc-port and linc-port2 must be created before hand.
+(Or edit the port name depending on your environment)
+An easy way is to create them as follows
+# ip link add linc-port type veth peer name linc-port-peer
+# ip link set linc-port up
+# ip link add linc-port2 type veth peer name linc-port-peer2
+# ip link set linc-port2 up
+
+Then run linc
+# rel/linc/bin/linc console
+
+Then run ryu
+# PYTHONPATH=. ./bin/ryu-manager --verbose \
+ tests/integrated/test_of_config.py
+
+
+Here is my sys.config used for this test.
+-->8-->8-->8-->8-->8-->8-->8-->8-->8-->8-->8-->8-->8---
+[
+ {linc,
+ [
+ {of_config, enabled},
+
+ {logical_switches,
+ [
+ {switch, 0,
+ [
+ {backend, linc_us4},
+
+ {controllers,
+ [
+ {"Switch0-DefaultController", "localhost", 6633, tcp}
+ ]},
+
+ {ports,
+ [
+ {port, 1, [{interface, "linc-port"}]},
+ {port, 2, [{interface, "linc-port2"}]}
+ ]},
+
+ {queues_status, disabled},
+
+ {queues,
+ [
+ ]}
+ ]}
+ ]}
+ ]},
+
+ {enetconf,
+ [
+ {capabilities, [{base, {1, 1}},
+ {startup, {1, 0}},
+ {'writable-running', {1, 0}}]},
+ {callback_module, linc_ofconfig},
+ {sshd_ip, any},
+ {sshd_port, 1830},
+ {sshd_user_passwords,
+ [
+ {"linc", "linc"}
+ ]}
+ ]},
+
+ {lager,
+ [
+ {handlers,
+ [
+ {lager_console_backend, info},
+ {lager_file_backend,
+ [
+ {"log/error.log", error, 10485760, "$D0", 5},
+ {"log/console.log", info, 10485760, "$D0", 5}
+ ]}
+ ]}
+ ]},
+
+ {sasl,
+ [
+ {sasl_error_logger, {file, "log/sasl-error.log"}},
+ {errlog_type, error},
+ {error_logger_mf_dir, "log/sasl"}, % Log directory
+ {error_logger_mf_maxbytes, 10485760}, % 10 MB max file size
+ {error_logger_mf_maxfiles, 5} % 5 files max
+ ]}
+].
+-->8-->8-->8-->8-->8-->8-->8-->8-->8-->8-->8-->8-->8--
+
+"""
+
+from __future__ import print_function
+
+import traceback
+
+import lxml.etree
+import ncclient
+
+from ryu.base import app_manager
+from ryu.lib.netconf import constants as nc_consts
+from ryu.lib import hub
+from ryu.lib import of_config
+from ryu.lib.of_config import capable_switch
+from ryu.lib.of_config import constants as ofc_consts
+
+
+# Change those depending on switch configuration
+HOST = '127.0.0.1'
+PORT = 1830
+USERNAME = 'linc'
+PASSWORD = 'linc'
+
+CAPABLE_SWITCH_ID = 'CapableSwitch0'
+LOGICAL_SWITCH = 'LogicalSwitch0'
+PORT_ID = 'LogicalSwitch0-Port2'
+CONTROLLER_ID = 'Switch0-DefaultController'
+
+PORT_DICT = {
+ 'capable_switch': CAPABLE_SWITCH_ID,
+ 'port_id': PORT_ID,
+ 'logical_switch': LOGICAL_SWITCH,
+ 'controller_id': CONTROLLER_ID,
+ 'ip': HOST,
+}
+
+SWITCH_PORT_DOWN = '''
+<nc:config xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <capable-switch xmlns="urn:onf:of111:config:yang">
+ <id>%(capable_switch)s</id>
+ <resources>
+ <port>
+ <resource-id>%(port_id)s</resource-id>
+ <configuration operation="merge">
+ <admin-state>down</admin-state>
+ <no-receive>false</no-receive>
+ <no-forward>false</no-forward>
+ <no-packet-in>false</no-packet-in>
+ </configuration>
+ </port>
+ </resources>
+ </capable-switch>
+</nc:config>
+''' % PORT_DICT
+
+SWITCH_ADVERTISED = '''
+<nc:config xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <capable-switch xmlns="urn:onf:of111:config:yang">
+ <id>%(capable_switch)s</id>
+ <resources>
+ <port>
+ <resource-id>%(port_id)s</resource-id>
+ <features>
+ <advertised operation="merge">
+ <rate>10Mb-FD</rate>
+ <auto-negotiate>true</auto-negotiate>
+ <medium>copper</medium>
+ <pause>unsupported</pause>
+ </advertised>
+ </features>
+ </port>
+ </resources>
+ </capable-switch>
+</nc:config>
+''' % PORT_DICT
+
+SWITCH_CONTROLLER = '''
+<nc:config xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
+ <capable-switch xmlns="urn:onf:of111:config:yang">
+ <id>%(capable_switch)s</id>
+ <logical-switches>
+ <switch>
+ <id>%(logical_switch)s</id>
+ <controllers>
+ <controller operation="merge">
+ <id>%(controller_id)s</id>
+ <role>master</role>
+ <ip-address>%(ip)s</ip-address>
+ <port>6633</port>
+ <protocol>tcp</protocol>
+ </controller>
+ </controllers>
+ </switch>
+ </logical-switches>
+ </capable-switch>
+</nc:config>
+''' % PORT_DICT
+
+
+def _get_schema():
+ # file_name = of_config.OF_CONFIG_1_0_XSD
+ # file_name = of_config.OF_CONFIG_1_1_XSD
+ file_name = of_config.OF_CONFIG_1_1_1_XSD
+ return lxml.etree.XMLSchema(file=file_name)
+
+
+class OFConfigClient(app_manager.RyuApp):
+ def __init__(self, *args, **kwargs):
+ super(OFConfigClient, self).__init__(*args, **kwargs)
+ self.switch = capable_switch.OFCapableSwitch(
+ host=HOST, port=PORT, username=USERNAME, password=PASSWORD,
+ unknown_host_cb=lambda host, fingeprint: True)
+ hub.spawn(self._do_of_config)
+
+ def _validate(self, tree):
+ xmlschema = _get_schema()
+ try:
+ xmlschema.assertValid(tree)
+ except:
+ traceback.print_exc()
+
+ def _do_get(self):
+ data_xml = self.switch.raw_get()
+
+ tree = lxml.etree.fromstring(data_xml)
+ # print(lxml.etree.tostring(tree, pretty_print=True))
+ self._validate(tree)
+
+ name_spaces = set()
+ for e in tree.getiterator():
+ name_spaces.add(capable_switch.get_ns_tag(e.tag)[0])
+ print(name_spaces)
+
+ return tree
+
+ def _do_get_config(self, source):
+ print('source = %s' % source)
+ config_xml = self.switch.raw_get_config(source)
+
+ tree = lxml.etree.fromstring(config_xml)
+ # print(lxml.etree.tostring(tree, pretty_print=True))
+ self._validate(tree)
+
+ def _do_edit_config(self, config):
+ tree = lxml.etree.fromstring(config)
+ self._validate(tree)
+ self.switch.raw_edit_config(target='running', config=config)
+
+ def _print_ports(self, tree, ns):
+ for port in tree.findall('{%s}%s/{%s}%s' % (ns, ofc_consts.RESOURCES,
+ ns, ofc_consts.PORT)):
+ print(lxml.etree.tostring(port, pretty_print=True))
+
+ def _set_ports_down(self):
+ """try to set all ports down with etree operation"""
+ tree = self._do_get()
+ print(lxml.etree.tostring(tree, pretty_print=True))
+
+ qname = lxml.etree.QName(tree.tag)
+ ns = qname.namespace
+ self._print_ports(tree, ns)
+
+ switch_id = tree.find('{%s}%s' % (ns, ofc_consts.ID))
+ resources = tree.find('{%s}%s' % (ns, ofc_consts.RESOURCES))
+ configuration = tree.find(
+ '{%s}%s/{%s}%s/{%s}%s' % (ns, ofc_consts.RESOURCES,
+ ns, ofc_consts.PORT,
+ ns, ofc_consts.CONFIGURATION))
+ admin_state = tree.find(
+ '{%s}%s/{%s}%s/{%s}%s/{%s}%s' % (ns, ofc_consts.RESOURCES,
+ ns, ofc_consts.PORT,
+ ns, ofc_consts.CONFIGURATION,
+ ns, ofc_consts.ADMIN_STATE))
+
+ config_ = lxml.etree.Element(
+ '{%s}%s' % (ncclient.xml_.BASE_NS_1_0, nc_consts.CONFIG))
+ capable_switch_ = lxml.etree.SubElement(config_, tree.tag)
+ switch_id_ = lxml.etree.SubElement(capable_switch_, switch_id.tag)
+ switch_id_.text = switch_id.text
+ resources_ = lxml.etree.SubElement(capable_switch_,
+ resources.tag)
+ for port in tree.findall(
+ '{%s}%s/{%s}%s' % (ns, ofc_consts.RESOURCES,
+ ns, ofc_consts.PORT)):
+ resource_id = port.find('{%s}%s' % (ns, ofc_consts.RESOURCE_ID))
+
+ port_ = lxml.etree.SubElement(resources_, port.tag)
+ resource_id_ = lxml.etree.SubElement(port_, resource_id.tag)
+ resource_id_.text = resource_id.text
+ configuration_ = lxml.etree.SubElement(port_, configuration.tag)
+ configuration_.set(ofc_consts.OPERATION, nc_consts.MERGE)
+ admin_state_ = lxml.etree.SubElement(configuration_,
+ admin_state.tag)
+ admin_state_.text = ofc_consts.DOWN
+ self._do_edit_config(lxml.etree.tostring(config_, pretty_print=True))
+
+ tree = self._do_get()
+ self._print_ports(tree, ns)
+
+ def _do_of_config(self):
+ self._do_get()
+ self._do_get_config('running')
+ self._do_get_config('startup')
+
+ # LINC doesn't support 'candidate' datastore
+ try:
+ self._do_get_config('candidate')
+ except ncclient.NCClientError:
+ traceback.print_exc()
+
+ # use raw XML format
+ self._do_edit_config(SWITCH_PORT_DOWN)
+ self._do_edit_config(SWITCH_ADVERTISED)
+ self._do_edit_config(SWITCH_CONTROLLER)
+
+ self._set_ports_down()
+
+ self.switch.close_session()
diff --git a/tests/integrated/test_request_reply_v12.py b/tests/integrated/test_request_reply_v12.py
new file mode 100644
index 00000000..ea031182
--- /dev/null
+++ b/tests/integrated/test_request_reply_v12.py
@@ -0,0 +1,1055 @@
+# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+import time
+import logging
+
+from ryu.controller import ofp_event
+from ryu.controller.handler import MAIN_DISPATCHER
+from ryu.controller.handler import set_ev_cls
+from ryu.ofproto import ofproto_v1_2
+
+from tests.integrated import tester
+
+LOG = logging.getLogger(__name__)
+
+
+class RunTest(tester.TestFlowBase):
+ """ Test case for Request-Reply messages.
+
+ Some tests need attached port to switch.
+ If use the OVS, can do it with the following commands.
+ # ip link add <port> type dummy
+ # ovs-vsctl add-port <bridge> <port>
+ """
+
+ OFP_VERSIONS = [ofproto_v1_2.OFP_VERSION]
+
+ def __init__(self, *args, **kwargs):
+ super(RunTest, self).__init__(*args, **kwargs)
+
+ self._verify = None
+ self.n_tables = ofproto_v1_2.OFPTT_MAX
+
+ def start_next_test(self, dp):
+ self._verify = None
+ self.delete_all_flows(dp)
+ dp.send_barrier()
+ if len(self.pending):
+ t = self.pending.pop()
+ if self.is_supported(t):
+ LOG.info(tester.LOG_TEST_START, t)
+ self.current = t
+ getattr(self, t)(dp)
+ else:
+ self.results[t] = 'SKIP (unsupported)'
+ self.unclear -= 1
+ self.start_next_test(dp)
+ else:
+ self.print_results()
+
+ def run_verify(self, ev):
+ msg = ev.msg
+ dp = msg.datapath
+
+ verify_func = self.verify_default
+ v = "verify" + self.current[4:]
+ if v in dir(self):
+ verify_func = getattr(self, v)
+
+ result = verify_func(dp, msg)
+ if result is True:
+ self.unclear -= 1
+
+ self.results[self.current] = result
+ self.start_next_test(dp)
+
+ def verify_default(self, dp, msg):
+ type_ = self._verify
+
+ if msg.msg_type == dp.ofproto.OFPT_STATS_REPLY:
+ return self.verify_stats(dp, msg.body, type_)
+ elif msg.msg_type == type_:
+ return True
+ else:
+ return 'Reply msg_type %s expected %s' \
+ % (msg.msg_type, type_)
+
+ def verify_stats(self, dp, stats, type_):
+ stats_types = dp.ofproto_parser.OFPStatsReply._STATS_TYPES
+ expect = stats_types.get(type_).__name__
+
+ if isinstance(stats, list):
+ for s in stats:
+ if expect == s.__class__.__name__:
+ return True
+ else:
+ if expect == stats.__class__.__name__:
+ return True
+ return 'Reply msg has not \'%s\' class.\n%s' % (expect, stats)
+
+ def mod_flow(self, dp, cookie=0, cookie_mask=0, table_id=0,
+ command=None, idle_timeout=0, hard_timeout=0,
+ priority=0xff, buffer_id=0xffffffff, match=None,
+ actions=None, inst_type=None, out_port=None,
+ out_group=None, flags=0, inst=None):
+
+ if command is None:
+ command = dp.ofproto.OFPFC_ADD
+
+ if inst is None:
+ if inst_type is None:
+ inst_type = dp.ofproto.OFPIT_APPLY_ACTIONS
+
+ inst = []
+ if actions is not None:
+ inst = [dp.ofproto_parser.OFPInstructionActions(
+ inst_type, actions)]
+
+ if match is None:
+ match = dp.ofproto_parser.OFPMatch()
+
+ if out_port is None:
+ out_port = dp.ofproto.OFPP_ANY
+
+ if out_group is None:
+ out_group = dp.ofproto.OFPG_ANY
+
+ m = dp.ofproto_parser.OFPFlowMod(dp, cookie, cookie_mask,
+ table_id, command,
+ idle_timeout, hard_timeout,
+ priority, buffer_id,
+ out_port, out_group,
+ flags, match, inst)
+
+ dp.send_msg(m)
+
+ def get_port(self, dp):
+ for port_no, port in dp.ports.items():
+ if port_no != dp.ofproto.OFPP_LOCAL:
+ return port
+ return None
+
+ # Test for Reply message type
+ def test_desc_stats_request(self, dp):
+ self._verify = dp.ofproto.OFPST_DESC
+ m = dp.ofproto_parser.OFPDescStatsRequest(dp)
+ dp.send_msg(m)
+
+ def test_flow_stats_request(self, dp):
+ self._verify = dp.ofproto.OFPST_FLOW
+ self.mod_flow(dp)
+ self.send_flow_stats(dp)
+
+ def test_aggregate_stats_request(self, dp):
+ self._verify = dp.ofproto.OFPST_AGGREGATE
+ match = dp.ofproto_parser.OFPMatch()
+ m = dp.ofproto_parser.OFPAggregateStatsRequest(
+ dp, dp.ofproto.OFPTT_ALL, dp.ofproto.OFPP_ANY,
+ dp.ofproto.OFPG_ANY, 0, 0, match)
+ dp.send_msg(m)
+
+ def test_table_stats_request(self, dp):
+ self._verify = dp.ofproto.OFPST_TABLE
+ m = dp.ofproto_parser.OFPTableStatsRequest(dp)
+ dp.send_msg(m)
+
+ def test_port_stats_request(self, dp):
+ self._verify = dp.ofproto.OFPST_PORT
+ m = dp.ofproto_parser.OFPPortStatsRequest(dp, dp.ofproto.OFPP_ANY)
+ dp.send_msg(m)
+
+ def test_echo_request(self, dp):
+ self._verify = dp.ofproto.OFPT_ECHO_REPLY
+ m = dp.ofproto_parser.OFPEchoRequest(dp)
+ dp.send_msg(m)
+
+ def test_features_request(self, dp):
+ self._verify = dp.ofproto.OFPT_FEATURES_REPLY
+ m = dp.ofproto_parser.OFPFeaturesRequest(dp)
+ dp.send_msg(m)
+
+ def test_get_config_request(self, dp):
+ self._verify = dp.ofproto.OFPT_GET_CONFIG_REPLY
+ m = dp.ofproto_parser.OFPGetConfigRequest(dp)
+ dp.send_msg(m)
+
+ def test_barrier_request(self, dp):
+ self._verify = dp.ofproto.OFPT_BARRIER_REPLY
+ dp.send_barrier()
+
+ def test_error_reply(self, dp):
+ ports = [0]
+ for p in dp.ports:
+ if p != dp.ofproto.OFPP_LOCAL:
+ ports.append(p)
+
+ port_no = max(ports) + 1
+ self._verify = dp.ofproto.OFPT_ERROR
+ m = dp.ofproto_parser.OFPPortMod(
+ dp, port_no, 'ff:ff:ff:ff:ff:ff', 0, 0, 0)
+ dp.send_msg(m)
+
+ # Test for reply value
+ def test_flow_stats_none(self, dp):
+ self.send_flow_stats(dp)
+
+ def verify_flow_stats_none(self, dp, msg):
+ stats = msg.body
+ if len(stats):
+ return 'Reply msg has body. %s' % (stats, )
+ return True
+
+ def test_flow_stats_reply_value(self, dp):
+ self._verify = []
+ c = 0
+ while c < self.n_tables:
+ # value = (talbe_id, cookie, idle_timeout, hard_timeout, priority)
+ v = (c, c + 1, c + 2, c + 3, c + 4)
+ self._verify.append(v)
+ self.mod_flow(dp, table_id=v[0], cookie=v[1],
+ idle_timeout=v[2], hard_timeout=v[3], priority=v[4])
+ c += 1
+ dp.send_barrier()
+ self.send_flow_stats(dp)
+
+ def verify_flow_stats_reply_value(self, dp, msg):
+ c = 0
+ for f in msg.body:
+ f_value = (f.table_id, f.cookie, f.idle_timeout,
+ f.hard_timeout, f.priority, )
+ if f_value != self._verify[c]:
+ return 'param is mismatched. verify=%s, reply=%s' \
+ % (self._verify[c], f_value,)
+ c += 1
+ return len(msg.body) == self.n_tables
+
+ def test_echo_request_has_data(self, dp):
+ data = 'test'
+ self._verify = data
+ m = dp.ofproto_parser.OFPEchoRequest(dp)
+ m.data = data
+ dp.send_msg(m)
+
+ def verify_echo_request_has_data(self, dp, msg):
+ data = msg.data
+ return self._verify == data
+
+ def test_aggregate_stats_flow_count(self, dp):
+ c = 0
+ while c < self.n_tables:
+ self.mod_flow(dp, table_id=c)
+ c += 1
+ dp.send_barrier()
+ match = dp.ofproto_parser.OFPMatch()
+ m = dp.ofproto_parser.OFPAggregateStatsRequest(
+ dp, dp.ofproto.OFPTT_ALL, dp.ofproto.OFPP_ANY,
+ dp.ofproto.OFPG_ANY, 0, 0, match)
+ dp.send_msg(m)
+
+ def verify_aggregate_stats_flow_count(self, dp, msg):
+ stats = msg.body
+ return stats.flow_count == self.n_tables
+
+ def test_aggregate_stats_flow_count_out_port(self, dp):
+ actions = [dp.ofproto_parser.OFPActionOutput(1, 1500)]
+ self.mod_flow(dp, table_id=1, actions=actions)
+
+ actions = [dp.ofproto_parser.OFPActionOutput(2, 1500)]
+ self.mod_flow(dp, table_id=2, actions=actions)
+ dp.send_barrier()
+
+ out_port = 2
+ match = dp.ofproto_parser.OFPMatch()
+ m = dp.ofproto_parser.OFPAggregateStatsRequest(
+ dp, dp.ofproto.OFPTT_ALL, out_port,
+ dp.ofproto.OFPG_ANY, 0, 0, match)
+ dp.send_msg(m)
+
+ def verify_aggregate_stats_flow_count_out_port(self, dp, msg):
+ stats = msg.body
+ return stats.flow_count == 1
+
+ def test_aggregate_stats_packet_count(self, dp):
+ in_port = 1
+ data = 'test'
+ self._verify = {'packet_count': 1,
+ 'byte_count': len(data)}
+
+ # add flow
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_in_port(in_port)
+ self.mod_flow(dp, table_id=0, match=match)
+
+ # packet out
+ output = dp.ofproto.OFPP_TABLE
+ actions = [dp.ofproto_parser.OFPActionOutput(output, 0)]
+ m = dp.ofproto_parser.OFPPacketOut(dp, 0xffffffff, in_port,
+ actions, data)
+ dp.send_msg(m)
+ dp.send_barrier()
+
+ match = dp.ofproto_parser.OFPMatch()
+ m = dp.ofproto_parser.OFPAggregateStatsRequest(
+ dp, dp.ofproto.OFPTT_ALL, dp.ofproto.OFPP_ANY,
+ dp.ofproto.OFPG_ANY, 0, 0, match)
+ dp.send_msg(m)
+
+ def verify_aggregate_stats_packet_count(self, dp, msg):
+ for name, val in self._verify.items():
+ r_val = getattr(msg.body, name)
+ if val != r_val:
+ return '%s is mismatched. verify=%s, reply=%s' \
+ % (name, val, r_val)
+ return True
+
+ def test_set_config_nomal(self, dp):
+ flags = dp.ofproto.OFPC_FRAG_NORMAL
+ self._verify = flags
+ m = dp.ofproto_parser.OFPSetConfig(dp, flags, 0)
+ dp.send_msg(m)
+ dp.send_barrier()
+
+ m = dp.ofproto_parser.OFPGetConfigRequest(dp)
+ dp.send_msg(m)
+
+ def verify_set_config_nomal(self, dp, msg):
+ return self._verify == msg.flags
+
+ def test_set_config_drop(self, dp):
+ flags = dp.ofproto.OFPC_FRAG_DROP
+ self._verify = flags
+ m = dp.ofproto_parser.OFPSetConfig(dp, flags, 0)
+ dp.send_msg(m)
+ dp.send_barrier()
+
+ m = dp.ofproto_parser.OFPGetConfigRequest(dp)
+ dp.send_msg(m)
+
+ def verify_set_config_drop(self, dp, msg):
+ return self._verify == msg.flags
+
+ def test_set_config_mask(self, dp):
+ flags = dp.ofproto.OFPC_FRAG_MASK
+ self._verify = flags
+ m = dp.ofproto_parser.OFPSetConfig(dp, flags, 0)
+ dp.send_msg(m)
+ dp.send_barrier()
+
+ m = dp.ofproto_parser.OFPGetConfigRequest(dp)
+ dp.send_msg(m)
+
+ def verify_set_config_mask(self, dp, msg):
+ return self._verify == msg.flags
+
+ def test_set_config_ttl_to_controller(self, dp):
+ flags = dp.ofproto.OFPC_INVALID_TTL_TO_CONTROLLER
+ self._verify = flags
+ m = dp.ofproto_parser.OFPSetConfig(dp, flags, 0)
+ dp.send_msg(m)
+ dp.send_barrier()
+
+ m = dp.ofproto_parser.OFPGetConfigRequest(dp)
+ dp.send_msg(m)
+
+ def verify_set_config_ttl_to_controller(self, dp, msg):
+ return self._verify == msg.flags
+
+ def test_set_config_miss_send_len(self, dp):
+ flags = dp.ofproto.OFPC_FRAG_NORMAL
+ ms_len = 256
+ self._verify = ms_len
+ m = dp.ofproto_parser.OFPSetConfig(dp, flags, ms_len)
+ dp.send_msg(m)
+ dp.send_barrier()
+
+ m = dp.ofproto_parser.OFPGetConfigRequest(dp)
+ dp.send_msg(m)
+
+ def verify_set_config_miss_send_len(self, dp, msg):
+ return self._verify == msg.miss_send_len
+
+ def test_set_config_miss_send_len_max(self, dp):
+ flags = dp.ofproto.OFPC_FRAG_NORMAL
+ ms_len = dp.ofproto.OFPCML_MAX
+ self._verify = ms_len
+ m = dp.ofproto_parser.OFPSetConfig(dp, flags, ms_len)
+ dp.send_msg(m)
+ dp.send_barrier()
+
+ m = dp.ofproto_parser.OFPGetConfigRequest(dp)
+ dp.send_msg(m)
+
+ def verify_set_config_miss_send_len_max(self, dp, msg):
+ return self._verify == msg.miss_send_len
+
+ def test_set_config_no_buffer(self, dp):
+ flags = dp.ofproto.OFPC_FRAG_NORMAL
+ ms_len = dp.ofproto.OFPCML_NO_BUFFER
+ self._verify = ms_len
+ m = dp.ofproto_parser.OFPSetConfig(dp, flags, ms_len)
+ dp.send_msg(m)
+ dp.send_barrier()
+
+ m = dp.ofproto_parser.OFPGetConfigRequest(dp)
+ dp.send_msg(m)
+
+ def verify_set_config_no_buffer(self, dp, msg):
+ return self._verify == msg.miss_send_len
+
+ def _verify_flow_inst_type(self, dp, msg):
+ inst_type = self._verify
+ stats = msg.body
+
+ for s in stats:
+ for i in s.instructions:
+ if i.type == inst_type:
+ return True
+ return 'not found inst_type[%s]' % (inst_type, )
+
+ def test_flow_add_apply_actions(self, dp):
+ inst_type = dp.ofproto.OFPIT_APPLY_ACTIONS
+ self._verify = inst_type
+
+ actions = [dp.ofproto_parser.OFPActionOutput(1, 1500)]
+ self.mod_flow(dp, actions=actions, inst_type=inst_type)
+ self.send_flow_stats(dp)
+
+ def verify_flow_add_apply_actions(self, dp, msg):
+ return self._verify_flow_inst_type(dp, msg)
+
+ def test_flow_add_goto_table(self, dp):
+ self._verify = dp.ofproto.OFPIT_GOTO_TABLE
+
+ inst = [dp.ofproto_parser.OFPInstructionGotoTable(1), ]
+ self.mod_flow(dp, inst=inst)
+ self.send_flow_stats(dp)
+
+ def verify_flow_add_goto_table(self, dp, msg):
+ return self._verify_flow_inst_type(dp, msg)
+
+ def _verify_flow_value(self, dp, msg):
+ stats = msg.body
+ verify = self._verify
+
+ if len(verify) != len(stats):
+ return 'flow_count is mismatched. verify=%s stats=%s' \
+ % (len(verify), len(stats))
+
+ for s in stats:
+ v_port = -1
+ v = verify.get(s.table_id, None)
+ if v:
+ v_port = v[3].port
+
+ s_port = s.instructions[0].actions[0].port
+
+ if v_port != s_port:
+ return 'port is mismatched. table_id=%s verify=%s, stats=%s' \
+ % (s.table_id, v_port, s_port)
+ return True
+
+ def _add_flow_for_flow_mod_tests(self, dp):
+ a1 = dp.ofproto_parser.OFPActionOutput(1, 1500)
+ a2 = dp.ofproto_parser.OFPActionOutput(2, 1500)
+
+ # table_id, cookie, priority, dl_dst, action)
+ tables = {0: [0xffff, 10, b'\xee' * 6, a1],
+ 1: [0xff00, 10, b'\xee' * 6, a2],
+ 2: [0xf000, 100, b'\xee' * 6, a1],
+ 3: [0x0000, 10, b'\xff' * 6, a1]}
+
+ self._verify = tables
+ for table_id, val in tables.items():
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_dst(val[2])
+ self.mod_flow(dp, match=match, actions=[val[3]],
+ table_id=table_id, cookie=val[0], priority=val[1])
+ dp.send_barrier()
+
+ def test_flow_mod_table_id(self, dp):
+ self._add_flow_for_flow_mod_tests(dp)
+
+ # modify flow of table_id=3
+ action = dp.ofproto_parser.OFPActionOutput(3, 1500)
+ self._verify[3][3] = action
+
+ table_id = 3
+ self.mod_flow(dp, command=dp.ofproto.OFPFC_MODIFY,
+ actions=[action], table_id=table_id)
+
+ dp.send_barrier()
+ self.send_flow_stats(dp)
+
+ def verify_flow_mod_table_id(self, dp, msg):
+ return self._verify_flow_value(dp, msg)
+
+ def test_flow_mod_cookie(self, dp):
+ self._add_flow_for_flow_mod_tests(dp)
+
+ # modify flow of table_id=1
+ action = dp.ofproto_parser.OFPActionOutput(3, 1500)
+ self._verify[1][3] = action
+
+ cookie = 0xff00
+ cookie_mask = 0xffff
+ table_id = 1
+ self.mod_flow(dp, command=dp.ofproto.OFPFC_MODIFY,
+ actions=[action], table_id=table_id,
+ cookie=cookie, cookie_mask=cookie_mask)
+
+ dp.send_barrier()
+ self.send_flow_stats(dp)
+
+ def verify_flow_mod_cookie(self, dp, msg):
+ return self._verify_flow_value(dp, msg)
+
+ def test_flow_mod_cookie_mask(self, dp):
+ self._add_flow_for_flow_mod_tests(dp)
+
+ # modify flow of table_id=0,1
+ action = dp.ofproto_parser.OFPActionOutput(3, 1500)
+ self._verify[0][3] = action
+ self._verify[1][3] = action
+
+ cookie = 0xffff
+ cookie_mask = 0xff00
+ for table_id in range(2):
+ self.mod_flow(dp, command=dp.ofproto.OFPFC_MODIFY,
+ actions=[action], table_id=table_id,
+ cookie=cookie, cookie_mask=cookie_mask)
+
+ dp.send_barrier()
+ self.send_flow_stats(dp)
+
+ def verify_flow_mod_cookie_mask(self, dp, msg):
+ return self._verify_flow_value(dp, msg)
+
+ def test_flow_mod_match(self, dp):
+ self._add_flow_for_flow_mod_tests(dp)
+
+ # modify flow of table_id=3
+ action = dp.ofproto_parser.OFPActionOutput(3, 1500)
+ self._verify[3][3] = action
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_dst(b'\xff' * 6)
+ table_id = 3
+ self.mod_flow(dp, command=dp.ofproto.OFPFC_MODIFY,
+ actions=[action], table_id=table_id, match=match)
+
+ dp.send_barrier()
+ self.send_flow_stats(dp)
+
+ def verify_flow_mod_match(self, dp, msg):
+ return self._verify_flow_value(dp, msg)
+
+ def test_flow_mod_strict(self, dp):
+ self._add_flow_for_flow_mod_tests(dp)
+
+ # modify flow of table_id=2
+ action = dp.ofproto_parser.OFPActionOutput(3, 1500)
+ self._verify[2][3] = action
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_dst(b'\xee' * 6)
+ priority = 100
+ table_id = 2
+ self.mod_flow(dp, command=dp.ofproto.OFPFC_MODIFY_STRICT,
+ actions=[action], table_id=table_id,
+ match=match, priority=priority)
+
+ dp.send_barrier()
+ self.send_flow_stats(dp)
+
+ def verify_flow_mod_strict(self, dp, msg):
+ return self._verify_flow_value(dp, msg)
+
+ def test_flow_del_table_id(self, dp):
+ self._add_flow_for_flow_mod_tests(dp)
+
+ # delete flow of table_id=3
+ del self._verify[3]
+
+ table_id = 3
+ self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE,
+ table_id=table_id)
+
+ dp.send_barrier()
+ self.send_flow_stats(dp)
+
+ def verify_flow_del_table_id(self, dp, msg):
+ return self._verify_flow_value(dp, msg)
+
+ def test_flow_del_table_id_all(self, dp):
+ self._add_flow_for_flow_mod_tests(dp)
+
+ # delete all flows
+ self._verify = {}
+
+ self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE,
+ table_id=dp.ofproto.OFPTT_ALL)
+
+ dp.send_barrier()
+ self.send_flow_stats(dp)
+
+ def verify_flow_del_table_id_all(self, dp, msg):
+ return self._verify_flow_value(dp, msg)
+
+ def test_flow_del_cookie(self, dp):
+ self._add_flow_for_flow_mod_tests(dp)
+
+ # delete flow of table_id=1
+ del self._verify[1]
+
+ cookie = 0xff00
+ cookie_mask = 0xffff
+ self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE,
+ table_id=dp.ofproto.OFPTT_ALL,
+ cookie=cookie, cookie_mask=cookie_mask)
+
+ dp.send_barrier()
+ self.send_flow_stats(dp)
+
+ def verify_flow_del_cookie(self, dp, msg):
+ return self._verify_flow_value(dp, msg)
+
+ def test_flow_del_cookie_mask(self, dp):
+ self._add_flow_for_flow_mod_tests(dp)
+
+ # delete flow of table_id=0,1
+ del self._verify[0]
+ del self._verify[1]
+
+ cookie = 0xffff
+ cookie_mask = 0xff00
+ self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE,
+ table_id=dp.ofproto.OFPTT_ALL,
+ cookie=cookie, cookie_mask=cookie_mask)
+
+ dp.send_barrier()
+ self.send_flow_stats(dp)
+
+ def verify_flow_del_cookie_mask(self, dp, msg):
+ return self._verify_flow_value(dp, msg)
+
+ def test_flow_del_match(self, dp):
+ self._add_flow_for_flow_mod_tests(dp)
+
+ # delete flow of table_id=3
+ del self._verify[3]
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_dst(b'\xff' * 6)
+ self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE,
+ table_id=dp.ofproto.OFPTT_ALL, match=match)
+
+ dp.send_barrier()
+ self.send_flow_stats(dp)
+
+ def verify_flow_del_match(self, dp, msg):
+ return self._verify_flow_value(dp, msg)
+
+ def test_flow_del_out_port(self, dp):
+ self._add_flow_for_flow_mod_tests(dp)
+
+ # delete flow of table_id=1
+ del self._verify[1]
+
+ out_port = 2
+ self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE,
+ table_id=dp.ofproto.OFPTT_ALL, out_port=out_port)
+
+ dp.send_barrier()
+ self.send_flow_stats(dp)
+
+ def verify_flow_del_out_port(self, dp, msg):
+ return self._verify_flow_value(dp, msg)
+
+ def test_flow_del_strict(self, dp):
+ self._add_flow_for_flow_mod_tests(dp)
+
+ # delete flow of table_id=2
+ del self._verify[2]
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_dl_dst(b'\xee' * 6)
+ priority = 100
+ self.mod_flow(dp, command=dp.ofproto.OFPFC_DELETE_STRICT,
+ table_id=dp.ofproto.OFPTT_ALL,
+ match=match, priority=priority)
+
+ dp.send_barrier()
+ self.send_flow_stats(dp)
+
+ def verify_flow_del_strict(self, dp, msg):
+ return self._verify_flow_value(dp, msg)
+
+ def _send_port_mod(self, dp, config, mask):
+ p = self.get_port(dp)
+ if not p:
+ err = 'need attached port to switch.'
+ self.results[self.current] = err
+ self.start_next_test(dp)
+ return
+
+ self._verify = [p.port_no, config & mask]
+ m = dp.ofproto_parser.OFPPortMod(dp, p.port_no, p.hw_addr,
+ config, mask, 0)
+ dp.send_msg(m)
+ dp.send_barrier()
+
+ # TODO: waiting to port UP|DOWN.
+ time.sleep(1)
+ m = dp.ofproto_parser.OFPFeaturesRequest(dp)
+ dp.send_msg(m)
+
+ def _verify_port_mod_config(self, dp, msg):
+ port_no = self._verify[0]
+ config = self._verify[1]
+
+ port = msg.ports[port_no]
+ if config != port.config:
+ return "config is mismatched. verify=%s, stats=%s" \
+ % (bin(config), bin(port.config))
+ return True
+
+ def test_port_mod_config_01_all(self, dp):
+ config = 0b1100101
+ mask = 0b1111111
+ self._send_port_mod(dp, config, mask)
+
+ def verify_port_mod_config_01_all(self, dp, msg):
+ return self._verify_port_mod_config(dp, msg)
+
+ def test_port_mod_config_02_none(self, dp):
+ config = 0
+ mask = 0b1111111
+ self._send_port_mod(dp, config, mask)
+
+ def verify_port_mod_config_02_none(self, dp, msg):
+ return self._verify_port_mod_config(dp, msg)
+
+ def test_port_mod_config_03_mask(self, dp):
+ config = 0b1100101
+ mask = 0b1111000
+ self._send_port_mod(dp, config, mask)
+
+ def verify_port_mod_config_03_mask(self, dp, msg):
+ res = self._verify_port_mod_config(dp, msg)
+ # reset port config
+ port_no = self._verify[0]
+ p = msg.ports[port_no]
+ m = dp.ofproto_parser.OFPPortMod(dp, p.port_no, p.hw_addr,
+ 0, 0b1111111, 0)
+ dp.send_msg(m)
+ dp.send_barrier()
+ return res
+
+ def test_port_stats_port_no(self, dp):
+ p = self.get_port(dp)
+ if not p:
+ err = 'need attached port to switch.'
+ self.results[self.current] = err
+ self.start_next_test(dp)
+ return
+
+ self._verify = p.port_no
+ m = dp.ofproto_parser.OFPPortStatsRequest(dp, p.port_no)
+ dp.send_msg(m)
+
+ def verify_port_stats_port_no(self, dp, msg):
+ ports = msg.body
+ if len(ports) > 1:
+ return 'reply some ports.\n%s' % (ports)
+
+ if ports[0].port_no != self._verify:
+ return 'port_no is mismatched. request=%s reply=%s' \
+ % (self._verify, ports[0].port_no)
+
+ return True
+
+ def _add_flow_flow_removed(self, dp, reason, table_id=0,
+ cookie=0xff, priority=100, in_port=1,
+ idle_timeout=0, hard_timeout=0):
+ self._verify = {}
+ self._verify['params'] = {'reason': reason,
+ 'table_id': table_id,
+ 'cookie': cookie,
+ 'priority': priority}
+ self._verify['in_port'] = in_port
+ self._verify['timeout'] = idle_timeout
+ if hard_timeout:
+ if (idle_timeout == 0 or idle_timeout > hard_timeout):
+ self._verify['timeout'] = hard_timeout
+
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_in_port(in_port)
+ self.mod_flow(dp, match=match, cookie=cookie,
+ priority=priority, table_id=table_id,
+ idle_timeout=idle_timeout, hard_timeout=hard_timeout,
+ flags=dp.ofproto.OFPFF_SEND_FLOW_REM)
+
+ def _verify_flow_removed(self, dp, msg):
+ params = self._verify['params']
+ in_port = self._verify['in_port']
+ timeout = self._verify['timeout']
+
+ if timeout:
+ duration_nsec = (msg.duration_sec * 10 ** 9) + msg.duration_nsec
+ timeout_nsec = timeout * 10 ** 9
+
+ # grace of -0.5 and +1.5 second to timeout.
+ l = (timeout - 0.5) * 10 ** 9
+ h = (timeout + 1.5) * 10 ** 9
+ if not l < duration_nsec < h:
+ return 'bad duration time. set=%s(nsec), duration=%s(nsec)' \
+ % (timeout_nsec, duration_nsec)
+
+ for name, val in params.items():
+ r_val = getattr(msg, name)
+ if val != r_val:
+ return '%s is mismatched. verify=%s, reply=%s' \
+ % (name, val, r_val)
+
+ for f in msg.match.fields:
+ if f.header == ofproto_v1_2.OXM_OF_IN_PORT:
+ if f.value != in_port:
+ return 'in_port is mismatched. verify=%s, reply=%s' \
+ % (in_port, f.value)
+ return True
+
+ def test_flow_removed_idle_timeout(self, dp):
+ reason = dp.ofproto.OFPRR_IDLE_TIMEOUT
+ idle_timeout = 2
+ self._add_flow_flow_removed(dp, reason,
+ idle_timeout=idle_timeout)
+
+ def verify_flow_removed_idle_timeout(self, dp, msg):
+ return self._verify_flow_removed(dp, msg)
+
+ def test_flow_removed_idle_timeout_hit(self, dp):
+ reason = dp.ofproto.OFPRR_IDLE_TIMEOUT
+ idle_timeout = 5
+ in_port = 1
+ sleep = 2
+
+ # add target flow
+ self._add_flow_flow_removed(dp, reason, in_port=in_port,
+ idle_timeout=idle_timeout)
+ self._verify['timeout'] = idle_timeout + sleep
+
+ # sleep
+ time.sleep(sleep)
+
+ # packet out
+ output = dp.ofproto.OFPP_TABLE
+ actions = [dp.ofproto_parser.OFPActionOutput(output, 0)]
+ m = dp.ofproto_parser.OFPPacketOut(dp, 0xffffffff, in_port,
+ actions, None)
+ dp.send_msg(m)
+
+ def verify_flow_removed_idle_timeout_hit(self, dp, msg):
+ return self._verify_flow_removed(dp, msg)
+
+ def test_flow_removed_hard_timeout(self, dp):
+ reason = dp.ofproto.OFPRR_HARD_TIMEOUT
+ hard_timeout = 2
+ self._add_flow_flow_removed(dp, reason,
+ hard_timeout=hard_timeout)
+
+ def verify_flow_removed_hard_timeout(self, dp, msg):
+ return self._verify_flow_removed(dp, msg)
+
+ def test_flow_removed_hard_timeout_hit(self, dp):
+ reason = dp.ofproto.OFPRR_HARD_TIMEOUT
+ hard_timeout = 5
+ in_port = 1
+ sleep = 2
+
+ self._add_flow_flow_removed(dp, reason, in_port=in_port,
+ hard_timeout=hard_timeout)
+ dp.send_barrier()
+
+ # sleep
+ time.sleep(sleep)
+
+ # packet out
+ output = dp.ofproto.OFPP_TABLE
+ actions = [dp.ofproto_parser.OFPActionOutput(output, 0)]
+ m = dp.ofproto_parser.OFPPacketOut(dp, 0xffffffff, in_port,
+ actions, None)
+ dp.send_msg(m)
+
+ def verify_flow_removed_hard_timeout_hit(self, dp, msg):
+ return self._verify_flow_removed(dp, msg)
+
+ def test_flow_removed_delete(self, dp):
+ reason = dp.ofproto.OFPRR_DELETE
+ self._add_flow_flow_removed(dp, reason)
+ dp.send_barrier()
+ self.delete_all_flows(dp)
+
+ def verify_flow_removed_delete(self, dp, msg):
+ return self._verify_flow_removed(dp, msg)
+
+ def test_flow_removed_table_id(self, dp):
+ reason = dp.ofproto.OFPRR_DELETE
+ table_id = 1
+ self._add_flow_flow_removed(dp, reason, table_id=table_id)
+ dp.send_barrier()
+ self.delete_all_flows(dp)
+
+ def verify_flow_removed_table_id(self, dp, msg):
+ return self._verify_flow_removed(dp, msg)
+
+ def _send_packet_out(self, dp, buffer_id=0xffffffff,
+ in_port=None, output=None, data=''):
+ if in_port is None:
+ in_port = dp.ofproto.OFPP_LOCAL
+
+ if output is None:
+ output = dp.ofproto.OFPP_CONTROLLER
+
+ self._verify['in_port'] = in_port
+ self._verify['data'] = data
+
+ actions = [dp.ofproto_parser.OFPActionOutput(output, len(data))]
+ m = dp.ofproto_parser.OFPPacketOut(dp, buffer_id, in_port,
+ actions, data)
+ dp.send_msg(m)
+
+ def _verify_packet_in(self, dp, msg):
+ for name, val in self._verify.items():
+ if name == 'in_port':
+ for f in msg.match.fields:
+ if f.header == ofproto_v1_2.OXM_OF_IN_PORT:
+ r_val = f.value
+ else:
+ r_val = getattr(msg, name)
+
+ if val != r_val:
+ return '%s is mismatched. verify=%s, reply=%s' \
+ % (name, val, r_val)
+ return True
+
+ def test_packet_in_action(self, dp):
+ self._verify = {}
+ self._verify['reason'] = dp.ofproto.OFPR_ACTION
+ self._send_packet_out(dp)
+
+ def verify_packet_in_action(self, dp, msg):
+ return self._verify_packet_in(dp, msg)
+
+ def test_packet_in_data(self, dp):
+ self._verify = {}
+ self._verify['reason'] = dp.ofproto.OFPR_ACTION
+ data = 'test'
+ self._send_packet_out(dp, data=data)
+
+ def verify_packet_in_data(self, dp, msg):
+ return self._verify_packet_in(dp, msg)
+
+ def test_packet_in_table_id(self, dp):
+ in_port = 1
+ table_id = 2
+ output = dp.ofproto.OFPP_TABLE
+
+ self._verify = {}
+ self._verify['reason'] = dp.ofproto.OFPR_ACTION
+ self._verify['table_id'] = table_id
+
+ # add flow (goto_table)
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_in_port(in_port)
+ inst = [dp.ofproto_parser.OFPInstructionGotoTable(table_id)]
+ self.mod_flow(dp, inst=inst, match=match)
+
+ # add flow (output)
+ match = dp.ofproto_parser.OFPMatch()
+ match.set_in_port(in_port)
+ out = dp.ofproto.OFPP_CONTROLLER
+ actions = [dp.ofproto_parser.OFPActionOutput(out, 0)]
+ self.mod_flow(dp, actions=actions, match=match, table_id=table_id)
+ dp.send_barrier()
+
+ # packet out
+ self._send_packet_out(dp, in_port=in_port, output=output)
+
+ def verify_packet_in_table_id(self, dp, msg):
+ return self._verify_packet_in(dp, msg)
+
+ # handler
+ @set_ev_cls(ofp_event.EventOFPEchoReply, MAIN_DISPATCHER)
+ def echo_replay_handler(self, ev):
+ if self.current.find('echo_request') > 0:
+ self.run_verify(ev)
+
+ @set_ev_cls(ofp_event.EventOFPStatsReply, MAIN_DISPATCHER)
+ def stats_reply_handler(self, ev):
+ if self.current is None:
+ msg = ev.msg
+ dp = msg.datapath
+ if self._verify == dp.ofproto.OFPST_TABLE:
+ self.table_stats = msg.body
+ self.start_next_test(dp)
+ else:
+ self.run_verify(ev)
+
+ @set_ev_cls(ofp_event.EventOFPSwitchFeatures, MAIN_DISPATCHER)
+ def features_replay_handler(self, ev):
+ if self.current is None:
+ pass
+ else:
+ self.run_verify(ev)
+
+ @set_ev_cls(ofp_event.EventOFPGetConfigReply, MAIN_DISPATCHER)
+ def get_config_replay_handler(self, ev):
+ self.run_verify(ev)
+
+ @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
+ def barrier_replay_handler(self, ev):
+ if self.current == 'test_barrier_request':
+ self.run_verify(ev)
+
+ @set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
+ def port_status_handler(self, ev):
+ pass
+
+ @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
+ def packet_in_handler(self, ev):
+ if self.current.find('packet_in'):
+ self.run_verify(ev)
+
+ @set_ev_cls(ofp_event.EventOFPFlowRemoved, MAIN_DISPATCHER)
+ def flow_removed_handler(self, ev):
+ if self.current.find('flow_removed') > 0:
+ self.run_verify(ev)
+
+ @set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER)
+ def error_handler(self, ev):
+ if self.current.find('error') > 0:
+ self.run_verify(ev)
+
+ def is_supported(self, t):
+ unsupported = [
+ ]
+ for u in unsupported:
+ if t.find(u) != -1:
+ return False
+
+ return True
diff --git a/tests/integrated/test_vrrp_linux_multi.py b/tests/integrated/test_vrrp_linux_multi.py
new file mode 100644
index 00000000..ae4d837a
--- /dev/null
+++ b/tests/integrated/test_vrrp_linux_multi.py
@@ -0,0 +1,103 @@
+# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
+# Copyright (C) 2013 Isaku Yamahata <yamahata at valinux co jp>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Usage:
+PYTHONPATH=. ./bin/ryu-manager --verbose \
+ ryu.services.protocols.vrrp.dumper \
+ ryu.services.protocols.vrrp.sample_manager.py \
+ ryu.app.rest \
+ tests.integrated.test_vrrp_linux_multi
+
+ryu.services.protocols.vrrp.dumper is optional.
+ryu.app.rest is merely to prevent ryu-manager from exiting.
+
+ ----------------
+ /--<--veth0-->| |
+ Ryu | linux bridge |<--veth2--> command to generate packets
+ \--<--veth1-->| (vrrpbr) |
+ ----------------
+
+
+# ip link add veth0 type veth peer name veth0-br
+# ip link add veth1 type veth peer name veth1-br
+# ip link add veth2 type veth peer name veth2-br
+
+# brctl addbr vrrpbr
+# brctl addif vrrpbr veth0-br
+# brctl addif vrrpbr veth1-br
+# brctl addif vrrpbr veth2-br
+
+
+# ip link set veth0 up
+# ip link set veth0-br up
+# ip link set veth1 up
+# ip link set veth1-br up
+# ip link set veth2 up
+# ip link set veth2-br up
+# ip link set vrrpbr up
+
+if you like, capture packets on each interfaces like
+# tshark -i vrrpbr
+# tshark -i veth0
+# tshark -i veth1
+# tshark -i veth2
+
+virtual router mac address: 00:00:5E:00:01:{VRID} = 00:00:5E:00:01:07
+during working, send packets destined to mac address 00:00:5E:00:01:07
+from veth2 by packet generator like packeth
+
+NOTE: vrid: 7 and ip address: 10.0.0.1... are hardcoded below
+"""
+
+from ryu.base import app_manager
+from ryu.lib import hub
+from ryu.lib import mac as lib_mac
+from ryu.lib.packet import vrrp
+from ryu.services.protocols.vrrp import api as vrrp_api
+from ryu.services.protocols.vrrp import event as vrrp_event
+from ryu.services.protocols.vrrp import monitor_linux
+
+from . import vrrp_common
+
+
+class VRRPConfigApp(vrrp_common.VRRPCommon):
+ _IFNAME0 = 'veth0'
+ _IFNAME1 = 'veth1'
+
+ def __init__(self, *args, **kwargs):
+ super(VRRPConfigApp, self).__init__(*args, **kwargs)
+
+ def start(self):
+ hub.spawn(self._main)
+
+ def _configure_vrrp_router(self, vrrp_version, priority,
+ primary_ip_address, ifname, vrid):
+ interface = vrrp_event.VRRPInterfaceNetworkDevice(
+ lib_mac.DONTCARE_STR, primary_ip_address, None, ifname)
+ self.logger.debug('%s', interface)
+
+ vip = '10.0.%d.1' % vrid
+ ip_addresses = [vip]
+ config = vrrp_event.VRRPConfig(
+ version=vrrp_version, vrid=vrid, priority=priority,
+ ip_addresses=ip_addresses)
+ self.logger.debug('%s', config)
+
+ rep = vrrp_api.vrrp_config(self, interface, config)
+ self.logger.debug('%s', rep)
+
+ return rep
diff --git a/tests/integrated/test_vrrp_linux_multi.sh b/tests/integrated/test_vrrp_linux_multi.sh
new file mode 100644
index 00000000..b1dba4bc
--- /dev/null
+++ b/tests/integrated/test_vrrp_linux_multi.sh
@@ -0,0 +1,34 @@
+#! /bin/sh
+
+# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
+# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ip link add veth0 type veth peer name veth0-br
+ip link add veth1 type veth peer name veth1-br
+ip link add veth2 type veth peer name veth2-br
+
+brctl addbr vrrpbr
+brctl addif vrrpbr veth0-br
+brctl addif vrrpbr veth1-br
+brctl addif vrrpbr veth2-br
+
+ip link set veth0 up
+ip link set veth0-br up
+ip link set veth1 up
+ip link set veth1-br up
+ip link set veth2 up
+ip link set veth2-br up
+ip link set vrrpbr up
diff --git a/tests/integrated/test_vrrp_multi.py b/tests/integrated/test_vrrp_multi.py
new file mode 100644
index 00000000..9d586119
--- /dev/null
+++ b/tests/integrated/test_vrrp_multi.py
@@ -0,0 +1,143 @@
+# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
+# Copyright (C) 2013 Isaku Yamahata <yamahata at valinux co jp>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Usage:
+PYTHONPATH=. ./bin/ryu-manager --verbose \
+ ryu.topology.switches \
+ ryu.services.protocols.vrrp.dumper \
+ tests.integrated.test_vrrp_multi
+
+ryu.services.protocols.vrrp.dumper is optional.
+
+ +---+ ----------------
+ /--|OVS|<--veth-->| |
+ Ryu +---+ | linux bridge |<--veth--> command to generate packets
+ \--|OVS|<--veth-->| |
+ +---+ ----------------
+
+configure OVSs to connect ryu
+example
+# brctl addbr b0
+# ip link add veth0-ovs type veth peer name veth0-br
+# ip link add veth1-ovs type veth peer name veth1-br
+# brctl addif b0 veth0-br
+# brctl addif b0 veth1-br
+# brctl show
+bridge name bridge id STP enabled interfaces
+b0 8000.6642e5822497 no veth0-br
+ veth1-br
+ovs-system 0000.122038293b55 no
+
+# ovs-vsctl add-br s0
+# ovs-vsctl add-port s0 veth0-ovs
+# ovs-vsctl add-br s1
+# ovs-vsctl add-port s1 veth1-ovs
+# ovs-vsctl set-controller s0 tcp:127.0.0.1:6633
+# ovs-vsctl set bridge s0 protocols='[OpenFlow12]'
+# ovs-vsctl set-controller s1 tcp:127.0.0.1:6633
+# ovs-vsctl set bridge s1 protocols='[OpenFlow12]'
+# ovs-vsctl show
+20c2a046-ae7e-4453-a576-11034db24985
+ Manager "ptcp:6634"
+ Bridge "s0"
+ Controller "tcp:127.0.0.1:6633"
+ is_connected: true
+ Port "veth0-ovs"
+ Interface "veth0-ovs"
+ Port "s0"
+ Interface "s0"
+ type: internal
+ Bridge "s1"
+ Controller "tcp:127.0.0.1:6633"
+ is_connected: true
+ Port "veth1-ovs"
+ Interface "veth1-ovs"
+ Port "s1"
+ Interface "s1"
+ type: internal
+ ovs_version: "1.9.90"
+# ip link veth0-br set up
+# ip link veth0-ovs set up
+# ip link veth1-br set up
+# ip link veth1-ovs set up
+# ip link b0 set up
+"""
+
+from ryu.base import app_manager
+from ryu.controller import handler
+from ryu.lib import dpid as lib_dpid
+from ryu.lib import hub
+from ryu.lib.packet import vrrp
+from ryu.services.protocols.vrrp import api as vrrp_api
+from ryu.services.protocols.vrrp import event as vrrp_event
+from ryu.services.protocols.vrrp import monitor_openflow
+from ryu.topology import event as topo_event
+from ryu.topology import api as topo_api
+
+from . import vrrp_common
+
+
+class VRRPConfigApp(vrrp_common.VRRPCommon):
+ _IFNAME0 = 0
+ _IFNAME1 = 1
+
+ def __init__(self, *args, **kwargs):
+ super(VRRPConfigApp, self).__init__(*args, **kwargs)
+ self.start_main = False
+
+ @handler.set_ev_cls(topo_event.EventSwitchEnter)
+ def _switch_enter_handler(self, ev):
+ if self.start_main:
+ return
+
+ switches = topo_api.get_switch(self)
+ if len(switches) < 2:
+ return
+
+ self.start_main = True
+ app_mgr = app_manager.AppManager.get_instance()
+ self.logger.debug('%s', app_mgr.applications)
+ self.switches = app_mgr.applications['switches']
+ hub.spawn(self._main)
+
+ def _configure_vrrp_router(self, vrrp_version, priority,
+ ip_addr, switch_index, vrid):
+ switches = self.switches
+ self.logger.debug('%s', switches.dps)
+ dpid = sorted(switches.dps.keys())[switch_index]
+ self.logger.debug('%s', lib_dpid.dpid_to_str(dpid))
+ self.logger.debug('%s', switches.port_state)
+ # hack: use the smallest port no to avoid picking OVS local port
+ port_no = sorted(switches.port_state[dpid].keys())[0]
+ self.logger.debug('%d', port_no)
+ port = switches.port_state[dpid][port_no]
+ self.logger.debug('%s', port)
+ mac = port.hw_addr
+ self.logger.debug('%s', mac)
+
+ interface = vrrp_event.VRRPInterfaceOpenFlow(
+ mac, ip_addr, None, dpid, port_no)
+ self.logger.debug('%s', interface)
+
+ config = vrrp_event.VRRPConfig(
+ version=vrrp_version, vrid=vrid, priority=priority,
+ ip_addresses=[ip_addr])
+ self.logger.debug('%s', config)
+
+ rep = vrrp_api.vrrp_config(self, interface, config)
+ self.logger.debug('%s', rep)
+ return rep
diff --git a/tests/integrated/test_vrrp_multi.sh b/tests/integrated/test_vrrp_multi.sh
new file mode 100644
index 00000000..5118cf7d
--- /dev/null
+++ b/tests/integrated/test_vrrp_multi.sh
@@ -0,0 +1,42 @@
+#! /bin/sh
+
+# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
+# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ -z "$VSCTL" ]; then
+ VSCTL=ovs-vsctl
+fi
+
+# create two bridges.
+# conncect them using patch ports.
+
+create() {
+ BR=$1
+ LOCAL_PORT=patch$2
+ PEER_PORT=patch$3
+ CONT=$4
+ ${VSCTL} add-br ${BR} -- set bridge ${BR} datapath_type=netdev
+ ${VSCTL} add-port ${BR} ${LOCAL_PORT}
+ ${VSCTL} set interface ${LOCAL_PORT} type=patch
+ ${VSCTL} set interface ${LOCAL_PORT} options:peer=${PEER_PORT}
+ ${VSCTL} set bridge ${BR} protocols='[OpenFlow12]'
+ ${VSCTL} set-controller ${BR} ${CONT}
+}
+
+CONT=tcp:127.0.0.1:6633
+
+create s0 0 1 ${CONT}
+create s1 1 0 ${CONT}
diff --git a/tests/integrated/tester.py b/tests/integrated/tester.py
new file mode 100644
index 00000000..f052c6cc
--- /dev/null
+++ b/tests/integrated/tester.py
@@ -0,0 +1,214 @@
+# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+import sys
+import logging
+import itertools
+
+from ryu import utils
+from ryu.lib import mac
+from ryu.base import app_manager
+from ryu.controller import ofp_event
+from ryu.controller import handler
+from ryu.controller import dpset
+from ryu.controller.handler import MAIN_DISPATCHER
+from ryu.controller.handler import CONFIG_DISPATCHER
+from ryu.controller.handler import set_ev_cls
+from ryu.ofproto import ofproto_v1_0
+from ryu.ofproto import ofproto_v1_2
+
+
+LOG = logging.getLogger(__name__)
+
+
+LOG_TEST_START = 'TEST_START: %s'
+LOG_TEST_RESULTS = 'TEST_RESULTS:'
+LOG_TEST_FINISH = 'TEST_FINISHED: Completed=[%s] (OK=%s NG=%s SKIP=%s)'
+LOG_TEST_UNSUPPORTED = 'SKIP (unsupported)'
+
+
+class TestFlowBase(app_manager.RyuApp):
+ """
+ To run the tests is required for the following pair of functions.
+ 1. test_<test name>()
+ To send flows to switch.
+
+ 2. verify_<test name>() or _verify_default()
+ To check flows of switch.
+ """
+
+ _CONTEXTS = {'dpset': dpset.DPSet}
+
+ def __init__(self, *args, **kwargs):
+ super(TestFlowBase, self).__init__(*args, **kwargs)
+ self.pending = []
+ self.results = {}
+ self.current = None
+ self.unclear = 0
+
+ for t in dir(self):
+ if t.startswith("test_"):
+ self.pending.append(t)
+ self.pending.sort(reverse=True)
+ self.unclear = len(self.pending)
+
+ def delete_all_flows(self, dp):
+ if dp.ofproto == ofproto_v1_0:
+ match = dp.ofproto_parser.OFPMatch(dp.ofproto.OFPFW_ALL,
+ 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0)
+ m = dp.ofproto_parser.OFPFlowMod(dp, match, 0,
+ dp.ofproto.OFPFC_DELETE,
+ 0, 0, 0, 0,
+ dp.ofproto.OFPP_NONE, 0, None)
+ elif dp.ofproto == ofproto_v1_2:
+ match = dp.ofproto_parser.OFPMatch()
+ m = dp.ofproto_parser.OFPFlowMod(dp, 0, 0, dp.ofproto.OFPTT_ALL,
+ dp.ofproto.OFPFC_DELETE,
+ 0, 0, 0, 0xffffffff,
+ dp.ofproto.OFPP_ANY,
+ dp.ofproto.OFPG_ANY,
+ 0, match, [])
+
+ dp.send_msg(m)
+
+ def send_flow_stats(self, dp):
+ if dp.ofproto == ofproto_v1_0:
+ match = dp.ofproto_parser.OFPMatch(dp.ofproto.OFPFW_ALL,
+ 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0)
+ m = dp.ofproto_parser.OFPFlowStatsRequest(dp, 0, match,
+ 0, dp.ofproto.OFPP_NONE)
+ elif dp.ofproto == ofproto_v1_2:
+ match = dp.ofproto_parser.OFPMatch()
+ m = dp.ofproto_parser.OFPFlowStatsRequest(dp, dp.ofproto.OFPTT_ALL,
+ dp.ofproto.OFPP_ANY,
+ dp.ofproto.OFPG_ANY,
+ 0, 0, match)
+
+ dp.send_msg(m)
+
+ def verify_default(self, dp, stats):
+ return 'function %s() is not found.' % ("verify" + self.current[4:], )
+
+ def start_next_test(self, dp):
+ self.delete_all_flows(dp)
+ dp.send_barrier()
+ if len(self.pending):
+ t = self.pending.pop()
+ if self.is_supported(t):
+ LOG.info(LOG_TEST_START, t)
+ self.current = t
+ getattr(self, t)(dp)
+ dp.send_barrier()
+ self.send_flow_stats(dp)
+ else:
+ self.results[t] = LOG_TEST_UNSUPPORTED
+ self.unclear -= 1
+ self.start_next_test(dp)
+ else:
+ self.print_results()
+
+ def print_results(self):
+ LOG.info("TEST_RESULTS:")
+ ok = 0
+ ng = 0
+ skip = 0
+ for t in sorted(self.results.keys()):
+ if self.results[t] is True:
+ ok += 1
+ elif self.results[t] == LOG_TEST_UNSUPPORTED:
+ skip += 1
+ else:
+ ng += 1
+ LOG.info(" %s: %s", t, self.results[t])
+ LOG.info(LOG_TEST_FINISH, self.unclear == 0, ok, ng, skip)
+
+ @handler.set_ev_cls(ofp_event.EventOFPFlowStatsReply,
+ handler.MAIN_DISPATCHER)
+ def flow_reply_handler(self, ev):
+ self.run_verify(ev)
+
+ @handler.set_ev_cls(ofp_event.EventOFPStatsReply,
+ handler.MAIN_DISPATCHER)
+ def stats_reply_handler(self, ev):
+ self.run_verify(ev)
+
+ def run_verify(self, ev):
+ msg = ev.msg
+ dp = msg.datapath
+
+ verify_func = self.verify_default
+ v = "verify" + self.current[4:]
+ if hasattr(self, v):
+ verify_func = getattr(self, v)
+
+ result = verify_func(dp, msg.body)
+ if result is True:
+ self.unclear -= 1
+
+ self.results[self.current] = result
+ self.start_next_test(dp)
+
+ @handler.set_ev_cls(dpset.EventDP)
+ def handler_datapath(self, ev):
+ if ev.enter:
+ self.start_next_test(ev.dp)
+
+ @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
+ def barrier_replay_handler(self, ev):
+ pass
+
+ def haddr_to_str(self, addr):
+ return mac.haddr_to_str(addr)
+
+ def haddr_to_bin(self, string):
+ return mac.haddr_to_bin(string)
+
+ def haddr_masked(self, haddr_bin, mask_bin):
+ return mac.haddr_bitand(haddr_bin, mask_bin)
+
+ def ipv4_to_str(self, integre):
+ ip_list = [str((integre >> (24 - (n * 8)) & 255)) for n in range(4)]
+ return '.'.join(ip_list)
+
+ def ipv4_to_int(self, string):
+ ip = string.split('.')
+ assert len(ip) == 4
+ i = 0
+ for b in ip:
+ b = int(b)
+ i = (i << 8) | b
+ return i
+
+ def ipv4_masked(self, ip_int, mask_int):
+ return ip_int & mask_int
+
+ def ipv6_to_str(self, integres):
+ return ':'.join(hex(x)[2:] for x in integres)
+
+ def ipv6_to_int(self, string):
+ ip = string.split(':')
+ assert len(ip) == 8
+ return [int(x, 16) for x in ip]
+
+ def ipv6_masked(self, ipv6_int, mask_int):
+ return [x & y for (x, y) in
+ itertools.izip(ipv6_int, mask_int)]
+
+ def is_supported(self, t):
+ return True
diff --git a/tests/integrated/vrrp_common.py b/tests/integrated/vrrp_common.py
new file mode 100644
index 00000000..58621f45
--- /dev/null
+++ b/tests/integrated/vrrp_common.py
@@ -0,0 +1,221 @@
+# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
+# Copyright (C) 2013 Isaku Yamahata <yamahata at valinux co jp>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import time
+import random
+
+from ryu.base import app_manager
+from ryu.lib import hub
+from ryu.lib import mac as lib_mac
+from ryu.lib.packet import vrrp
+from ryu.services.protocols.vrrp import api as vrrp_api
+from ryu.services.protocols.vrrp import event as vrrp_event
+
+
+_VRID = 7
+_PRIMARY_IP_ADDRESS0 = '10.0.0.2'
+_PRIMARY_IP_ADDRESS1 = '10.0.0.3'
+
+
+class VRRPCommon(app_manager.RyuApp):
+ _IFNAME0 = None
+ _IFNAME1 = None
+
+ def __init__(self, *args, **kwargs):
+ super(VRRPCommon, self).__init__(*args, **kwargs)
+
+ def _main(self):
+ self._main_version(vrrp.VRRP_VERSION_V3)
+ self._main_version(vrrp.VRRP_VERSION_V2)
+ print("done!")
+
+ def _main_version(self, vrrp_version):
+ self._main_version_priority(vrrp_version,
+ vrrp.VRRP_PRIORITY_ADDRESS_OWNER)
+ self._main_version_priority(vrrp_version,
+ vrrp.VRRP_PRIORITY_BACKUP_MAX)
+ self._main_version_priority(vrrp_version,
+ vrrp.VRRP_PRIORITY_BACKUP_DEFAULT)
+ self._main_version_priority(vrrp_version,
+ vrrp.VRRP_PRIORITY_BACKUP_MIN)
+
+ def _main_version_priority(self, vrrp_version, priority):
+ self._main_version_priority_sleep(vrrp_version, priority, False)
+ self._main_version_priority_sleep(vrrp_version, priority, True)
+
+ def _check(self, vrrp_api, instances):
+ while True:
+ while True:
+ rep = vrrp_api.vrrp_list(self)
+ if len(rep.instance_list) >= len(instances) * 2:
+ if any(i.state == vrrp_event.VRRP_STATE_INITIALIZE
+ for i in rep.instance_list):
+ continue
+ break
+ print('%s / %s' % (len(rep.instance_list), len(instances) * 2))
+ time.sleep(1)
+
+# for i in rep.instance_list:
+# print('%s %s %s %s %s' % (i.instance_name,
+# i.monitor_name,
+# i.config,
+# i.interface,
+# i.state))
+ assert len(rep.instance_list) == len(instances) * 2
+ num_of_master = 0
+ d = dict(((i.instance_name, i) for i in rep.instance_list))
+ bad = 0
+ for i in rep.instance_list:
+ assert i.state in (vrrp_event.VRRP_STATE_MASTER,
+ vrrp_event.VRRP_STATE_BACKUP)
+ if i.state == vrrp_event.VRRP_STATE_MASTER:
+ num_of_master += 1
+
+ vr = instances[i.config.vrid]
+ if (vr[0].config.priority > vr[1].config.priority and
+ i.instance_name == vr[1].instance_name) or \
+ (vr[0].config.priority < vr[1].config.priority and
+ i.instance_name == vr[0].instance_name):
+ if i.state == vrrp_event.VRRP_STATE_MASTER:
+ print("bad master:")
+ print('%s %s' % (d[vr[0].instance_name].state,
+ d[vr[0].instance_name].config.priority))
+ print('%s %s' % (d[vr[1].instance_name].state,
+ d[vr[1].instance_name].config.priority))
+ bad += 1
+# assert i.state != vrrp_event.VRRP_STATE_MASTER
+ if bad > 0:
+ # this could be a transient state
+ print("%s bad masters" % bad)
+ time.sleep(1)
+ continue
+ if num_of_master >= len(instances):
+ assert num_of_master == len(instances)
+ break
+ print('%s / %s' % (num_of_master, len(instances)))
+ time.sleep(1)
+ continue
+
+ def _main_version_priority_sleep(self, vrrp_version, priority, do_sleep):
+ app_mgr = app_manager.AppManager.get_instance()
+ self.logger.debug('%s', app_mgr.applications)
+ vrrp_mgr = app_mgr.applications['VRRPManager']
+
+ step = 5
+ instances = {}
+ for vrid in range(1, 256, step):
+ if vrid == _VRID:
+ continue
+ print("vrid %s" % vrid)
+ l = {}
+ prio = max(vrrp.VRRP_PRIORITY_BACKUP_MIN,
+ min(vrrp.VRRP_PRIORITY_BACKUP_MAX, vrid))
+ rep0 = self._configure_vrrp_router(vrrp_version,
+ prio,
+ _PRIMARY_IP_ADDRESS0,
+ self._IFNAME0,
+ vrid)
+ assert rep0.instance_name is not None
+ l[0] = rep0
+ prio = max(vrrp.VRRP_PRIORITY_BACKUP_MIN,
+ min(vrrp.VRRP_PRIORITY_BACKUP_MAX, 256 - vrid))
+ rep1 = self._configure_vrrp_router(vrrp_version,
+ prio,
+ _PRIMARY_IP_ADDRESS1,
+ self._IFNAME1,
+ vrid)
+ assert rep1.instance_name is not None
+ l[1] = rep1
+ instances[vrid] = l
+
+ print("vrid %s" % _VRID)
+ l = {}
+ rep0 = self._configure_vrrp_router(vrrp_version, priority,
+ _PRIMARY_IP_ADDRESS0,
+ self._IFNAME0, _VRID)
+ assert rep0.instance_name is not None
+ l[0] = rep0
+ rep1 = self._configure_vrrp_router(
+ vrrp_version, vrrp.VRRP_PRIORITY_BACKUP_DEFAULT,
+ _PRIMARY_IP_ADDRESS1, self._IFNAME1, _VRID)
+ assert rep1.instance_name is not None
+ l[1] = rep1
+ instances[_VRID] = l
+
+ self.logger.debug('%s', vrrp_mgr._instances)
+
+ if do_sleep:
+ print("priority %s" % priority)
+ print("waiting for instances starting")
+
+ self._check(vrrp_api, instances)
+
+ for vrid in instances.keys():
+ if vrid == _VRID:
+ continue
+ which = vrid & 1
+ new_priority = int(random.uniform(vrrp.VRRP_PRIORITY_BACKUP_MIN,
+ vrrp.VRRP_PRIORITY_BACKUP_MAX))
+ i = instances[vrid][which]
+ vrrp_api.vrrp_config_change(self, i.instance_name,
+ priority=new_priority)
+ i.config.priority = new_priority
+
+ if do_sleep:
+ print("priority shuffled")
+
+ self._check(vrrp_api, instances)
+
+ for vrid in instances.keys():
+ if vrid == _VRID:
+ continue
+ which = vrid & 1
+ vrrp_api.vrrp_shutdown(self, instances[vrid][which].instance_name)
+ vrrp_api.vrrp_shutdown(self, instances[_VRID][0].instance_name)
+
+ if do_sleep:
+ print("shutting down instances")
+ while True:
+ rep = vrrp_api.vrrp_list(self)
+ if len(rep.instance_list) <= len(instances):
+ break
+ print("left %s" % len(rep.instance_list))
+ time.sleep(1)
+ assert len(rep.instance_list) == len(instances)
+ print("waiting for the rest becoming master")
+ while True:
+ rep = vrrp_api.vrrp_list(self)
+ if all(i.state == vrrp_event.VRRP_STATE_MASTER
+ for i in rep.instance_list):
+ break
+ time.sleep(1)
+
+ vrrp_api.vrrp_shutdown(self, instances[_VRID][1].instance_name)
+ for vrid in instances.keys():
+ if vrid == _VRID:
+ continue
+ which = 1 - (vrid & 1)
+ vrrp_api.vrrp_shutdown(self, instances[vrid][which].instance_name)
+
+ print("waiting for instances shutting down")
+ while True:
+ rep = vrrp_api.vrrp_list(self)
+ if not rep.instance_list:
+ break
+ print("left %s" % len(rep.instance_list))
+ time.sleep(1)