pox3

git clone git://git.codymlewis.com/pox3.git
Log | Files | Refs | README | LICENSE

commit dcb1f016d52e08fe2635e0297d14851a77ada365
parent 651151f9ee04ee21e77977e081f96941b85dbbb5
Author: Cody Lewis <cody@codymlewis.com>
Date:   Tue, 14 Apr 2020 13:16:53 +1000

Set up package

Diffstat:
AMANIFEST.in | 7+++++++
MREADME.md | 7++++++-
Dpox/__init__.py | 20--------------------
Dpox/boot.py | 556-------------------------------------------------------------------------------
Dpox/core.py | 617-------------------------------------------------------------------------------
Dpox/datapaths/__init__.py | 120-------------------------------------------------------------------------------
Dpox/datapaths/ctl.py | 139-------------------------------------------------------------------------------
Dpox/datapaths/nx_switch.py | 160-------------------------------------------------------------------------------
Dpox/datapaths/pcap_switch.py | 271-------------------------------------------------------------------------------
Dpox/datapaths/switch.py | 1308-------------------------------------------------------------------------------
Dpox/forwarding/hub.py | 54------------------------------------------------------
Dpox/forwarding/l2_flowvisor.py | 136-------------------------------------------------------------------------------
Dpox/forwarding/l2_learning.py | 215-------------------------------------------------------------------------------
Dpox/forwarding/l2_multi.py | 506-------------------------------------------------------------------------------
Dpox/forwarding/l2_nx.py | 126-------------------------------------------------------------------------------
Dpox/forwarding/l2_nx_self_learning.py | 70----------------------------------------------------------------------
Dpox/forwarding/l2_pairs.py | 87-------------------------------------------------------------------------------
Dpox/forwarding/l3_learning.py | 349-------------------------------------------------------------------------------
Dpox/forwarding/topo_proactive.py | 480-------------------------------------------------------------------------------
Dpox/help.py | 161-------------------------------------------------------------------------------
Dpox/host_tracker/__init__.py | 40----------------------------------------
Dpox/host_tracker/host_tracker.py | 416-------------------------------------------------------------------------------
Dpox/info/debug_deadlock.py | 55-------------------------------------------------------
Dpox/info/packet_dump.py | 108-------------------------------------------------------------------------------
Dpox/info/recoco_spy.py | 108-------------------------------------------------------------------------------
Dpox/info/switch_info.py | 85-------------------------------------------------------------------------------
Dpox/lib/addresses.py | 845-------------------------------------------------------------------------------
Dpox/lib/graph/graph.py | 709-------------------------------------------------------------------------------
Dpox/lib/graph/nom.py | 152-------------------------------------------------------------------------------
Dpox/lib/ioworker/__init__.py | 467-------------------------------------------------------------------------------
Dpox/lib/ioworker/notify_demo.py | 168-------------------------------------------------------------------------------
Dpox/lib/ioworker/workers.py | 224-------------------------------------------------------------------------------
Dpox/lib/packet/__init__.py | 96-------------------------------------------------------------------------------
Dpox/lib/packet/arp.py | 177-------------------------------------------------------------------------------
Dpox/lib/packet/dhcp.py | 599-------------------------------------------------------------------------------
Dpox/lib/packet/dns.py | 530-------------------------------------------------------------------------------
Dpox/lib/packet/ethernet.py | 179-------------------------------------------------------------------------------
Dpox/lib/packet/icmpv6.py | 1015-------------------------------------------------------------------------------
Dpox/lib/packet/igmp.py | 120-------------------------------------------------------------------------------
Dpox/lib/packet/ipv4.py | 182-------------------------------------------------------------------------------
Dpox/lib/packet/ipv6.py | 437-------------------------------------------------------------------------------
Dpox/lib/packet/llc.py | 127-------------------------------------------------------------------------------
Dpox/lib/packet/lldp.py | 543-------------------------------------------------------------------------------
Dpox/lib/packet/packet_base.py | 207-------------------------------------------------------------------------------
Dpox/lib/packet/rip.py | 199-------------------------------------------------------------------------------
Dpox/lib/packet/tcp.py | 709-------------------------------------------------------------------------------
Dpox/lib/pxpcap/__init__.py | 444-------------------------------------------------------------------------------
Dpox/lib/pxpcap/dump_trace.py | 120-------------------------------------------------------------------------------
Dpox/lib/pxpcap/strip_openflow.py | 93-------------------------------------------------------------------------------
Dpox/lib/recoco/consumer.py | 111-------------------------------------------------------------------------------
Dpox/lib/recoco/events.py | 103-------------------------------------------------------------------------------
Dpox/lib/recoco/examples.py | 83-------------------------------------------------------------------------------
Dpox/lib/recoco/recoco.py | 998-------------------------------------------------------------------------------
Dpox/lib/revent/__init__.py | 2--
Dpox/lib/revent/revent.py | 578------------------------------------------------------------------------------
Dpox/lib/socketcapture.py | 191-------------------------------------------------------------------------------
Dpox/lib/util.py | 568-------------------------------------------------------------------------------
Dpox/log/__init__.py | 149-------------------------------------------------------------------------------
Dpox/log/color.py | 209-------------------------------------------------------------------------------
Dpox/log/level.py | 48------------------------------------------------
Dpox/messenger/__init__.py | 692-------------------------------------------------------------------------------
Dpox/messenger/ajax_transport.py | 333-------------------------------------------------------------------------------
Dpox/messenger/example.py | 115-------------------------------------------------------------------------------
Dpox/messenger/log_service.py | 255-------------------------------------------------------------------------------
Dpox/messenger/tcp_transport.py | 235-------------------------------------------------------------------------------
Dpox/messenger/test_client.py | 108-------------------------------------------------------------------------------
Dpox/messenger/web_transport.py | 290-------------------------------------------------------------------------------
Dpox/misc/cbench.py | 44--------------------------------------------
Dpox/misc/full_payload.py | 32--------------------------------
Dpox/misc/gephi_topo.py | 246-------------------------------------------------------------------------------
Dpox/misc/ip_loadbalancer.py | 360-------------------------------------------------------------------------------
Dpox/misc/mac_blocker.py | 140-------------------------------------------------------------------------------
Dpox/misc/nat.py | 464-------------------------------------------------------------------------------
Dpox/misc/of_tutorial.py | 149-------------------------------------------------------------------------------
Dpox/misc/pidfile.py | 89-------------------------------------------------------------------------------
Dpox/misc/telnetd/__init__.py | 1781-------------------------------------------------------------------------------
Dpox/openflow/__init__.py | 418-------------------------------------------------------------------------------
Dpox/openflow/discovery.py | 486-------------------------------------------------------------------------------
Dpox/openflow/flow_table.py | 353-------------------------------------------------------------------------------
Dpox/openflow/keepalive.py | 66------------------------------------------------------------------
Dpox/openflow/libopenflow_01.py | 4462-------------------------------------------------------------------------------
Dpox/openflow/nicira.py | 2742-------------------------------------------------------------------------------
Dpox/openflow/of_01.py | 1228-------------------------------------------------------------------------------
Dpox/openflow/of_json.py | 329-------------------------------------------------------------------------------
Dpox/openflow/of_service.py | 222-------------------------------------------------------------------------------
Dpox/openflow/spanning_tree.py | 278-------------------------------------------------------------------------------
Dpox/openflow/topology.py | 460-------------------------------------------------------------------------------
Dpox/openflow/util.py | 95-------------------------------------------------------------------------------
Dpox/openflow/webservice.py | 222-------------------------------------------------------------------------------
Dpox/proto/__init__.py | 23-----------------------
Dpox/proto/arp_helper.py | 275-------------------------------------------------------------------------------
Dpox/proto/arp_responder.py | 293-------------------------------------------------------------------------------
Dpox/proto/dhcp_client.py | 591-------------------------------------------------------------------------------
Dpox/proto/dhcpd.py | 525-------------------------------------------------------------------------------
Dpox/proto/dns_spy.py | 146-------------------------------------------------------------------------------
Dpox/proto/pong.py | 95-------------------------------------------------------------------------------
Dpox/py.py | 128-------------------------------------------------------------------------------
Dpox/samples/__init__.py | 0
Dpox/samples/httopo.py | 27---------------------------
Dpox/samples/mixed_switches.py | 55-------------------------------------------------------
Dpox/samples/pretty_log.py | 33---------------------------------
Dpox/samples/spanning_tree.py | 41-----------------------------------------
Dpox/samples/topo.py | 27---------------------------
Dpox/tk.py | 138-------------------------------------------------------------------------------
Dpox/web/__init__.py | 23-----------------------
Dpox/web/jsonrpc.py | 282-------------------------------------------------------------------------------
Dpox/web/webcore.py | 494-------------------------------------------------------------------------------
Apox3/__init__.py | 20++++++++++++++++++++
Apox3/boot.py | 556+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/core.py | 617+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/datapaths/__init__.py | 120+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/datapaths/ctl.py | 139+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/datapaths/nx_switch.py | 160+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/datapaths/pcap_switch.py | 271+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/datapaths/switch.py | 1308+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/forwarding/__init__.py -> pox3/forwarding/__init__.py | 0
Apox3/forwarding/hub.py | 54++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/forwarding/l2_flowvisor.py | 136+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/forwarding/l2_learning.py | 215+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/forwarding/l2_multi.py | 506+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/forwarding/l2_nx.py | 126+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/forwarding/l2_nx_self_learning.py | 70++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/forwarding/l2_pairs.py | 87+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/forwarding/l3_learning.py | 349+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/forwarding/topo_proactive.py | 480+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/help.py | 161+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/host_tracker/__init__.py | 40++++++++++++++++++++++++++++++++++++++++
Apox3/host_tracker/host_tracker.py | 416+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/info/__init__.py -> pox3/info/__init__.py | 0
Apox3/info/debug_deadlock.py | 55+++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/info/packet_dump.py | 108+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/info/recoco_spy.py | 108+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/info/switch_info.py | 85+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/lib/__init__.py -> pox3/lib/__init__.py | 0
Apox3/lib/addresses.py | 845+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/lib/epoll_select.py -> pox3/lib/epoll_select.py | 0
Rpox/lib/graph/__init__.py -> pox3/lib/graph/__init__.py | 0
Apox3/lib/graph/graph.py | 709+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/lib/graph/minigraph.py -> pox3/lib/graph/minigraph.py | 0
Apox3/lib/graph/nom.py | 152+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/ioworker/__init__.py | 467+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/ioworker/notify_demo.py | 168+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/ioworker/workers.py | 224+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/lib/mock_socket.py -> pox3/lib/mock_socket.py | 0
Rpox/lib/oui.txt -> pox3/lib/oui.txt | 0
Apox3/lib/packet/__init__.py | 96+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/packet/arp.py | 177+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/packet/dhcp.py | 599+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/packet/dns.py | 530+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/lib/packet/eap.py -> pox3/lib/packet/eap.py | 0
Rpox/lib/packet/eapol.py -> pox3/lib/packet/eapol.py | 0
Apox3/lib/packet/ethernet.py | 179+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/lib/packet/icmp.py -> pox3/lib/packet/icmp.py | 0
Apox3/lib/packet/icmpv6.py | 1015+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/packet/igmp.py | 120+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/packet/ipv4.py | 182+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/packet/ipv6.py | 437+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/packet/llc.py | 127+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/packet/lldp.py | 543+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/lib/packet/mpls.py -> pox3/lib/packet/mpls.py | 0
Apox3/lib/packet/packet_base.py | 207+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/lib/packet/packet_utils.py -> pox3/lib/packet/packet_utils.py | 0
Apox3/lib/packet/rip.py | 199+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/packet/tcp.py | 709+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/lib/packet/udp.py -> pox3/lib/packet/udp.py | 0
Rpox/lib/packet/vlan.py -> pox3/lib/packet/vlan.py | 0
Apox3/lib/pxpcap/__init__.py | 444+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/pxpcap/dump_trace.py | 120+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/lib/pxpcap/parser.py -> pox3/lib/pxpcap/parser.py | 0
Rpox/lib/pxpcap/pxpcap_c/build_linux -> pox3/lib/pxpcap/pxpcap_c/build_linux | 0
Rpox/lib/pxpcap/pxpcap_c/build_mac -> pox3/lib/pxpcap/pxpcap_c/build_mac | 0
Rpox/lib/pxpcap/pxpcap_c/build_win.bat -> pox3/lib/pxpcap/pxpcap_c/build_win.bat | 0
Rpox/lib/pxpcap/pxpcap_c/pxpcap.cpp -> pox3/lib/pxpcap/pxpcap_c/pxpcap.cpp | 0
Rpox/lib/pxpcap/pxpcap_c/setup.py -> pox3/lib/pxpcap/pxpcap_c/setup.py | 0
Apox3/lib/pxpcap/strip_openflow.py | 93+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/lib/pxpcap/writer.py -> pox3/lib/pxpcap/writer.py | 0
Rpox/lib/recoco/__init__.py -> pox3/lib/recoco/__init__.py | 0
Apox3/lib/recoco/consumer.py | 111+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/recoco/events.py | 103+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/recoco/examples.py | 83+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/recoco/recoco.py | 998+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/revent/__init__.py | 2++
Apox3/lib/revent/revent.py | 578++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/lib/socketcapture.py | 191+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/lib/threadpool.py -> pox3/lib/threadpool.py | 0
Apox3/lib/util.py | 568+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/log/__init__.py | 149+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/log/color.py | 209+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/log/level.py | 48++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/log/logging.cfg.template -> pox3/log/logging.cfg.template | 0
Apox3/messenger/__init__.py | 692+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/messenger/ajax_transport.py | 333+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/messenger/example.py | 115+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/messenger/log_service.py | 255+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/messenger/tcp_transport.py | 235+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/messenger/test_client.py | 108+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/messenger/web_transport.py | 290+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/misc/__init__.py -> pox3/misc/__init__.py | 0
Apox3/misc/cbench.py | 44++++++++++++++++++++++++++++++++++++++++++++
Apox3/misc/full_payload.py | 32++++++++++++++++++++++++++++++++
Apox3/misc/gephi_topo.py | 246+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/misc/ip_loadbalancer.py | 360+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/misc/mac_blocker.py | 140+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/misc/nat.py | 464+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/misc/of_tutorial.py | 149+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/misc/pidfile.py | 89+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/misc/telnetd/__init__.py | 1780+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/openflow/__init__.py | 418+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/openflow/debug.py -> pox3/openflow/debug.py | 0
Apox3/openflow/discovery.py | 486+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/openflow/flow_table.py | 353+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/openflow/keepalive.py | 66++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/openflow/libopenflow_01.py | 4462+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/openflow/nicira.py | 2742+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/openflow/of_01.py | 1228+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/openflow/of_json.py | 329+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/openflow/of_service.py | 222+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/openflow/spanning_tree.py | 278+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/openflow/topology.py | 460+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/openflow/util.py | 95+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/openflow/webservice.py | 222+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/proto/__init__.py | 23+++++++++++++++++++++++
Apox3/proto/arp_helper.py | 275+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/proto/arp_responder.py | 293+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/proto/dhcp_client.py | 591+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/proto/dhcpd.py | 525+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/proto/dns_spy.py | 146+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/proto/pong.py | 95+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/py.py | 128+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/topology/__init__.py -> pox3/topology/__init__.py | 0
Rpox/topology/topology.py -> pox3/topology/topology.py | 0
Apox3/web/__init__.py | 23+++++++++++++++++++++++
Apox3/web/jsonrpc.py | 282+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Apox3/web/webcore.py | 494+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Rpox/web/www_root/index.html -> pox3/web/www_root/index.html | 0
Rpox/web/www_root/webmessenger.html -> pox3/web/www_root/webmessenger.html | 0
Rpox/web/www_root/webmessenger.js -> pox3/web/www_root/webmessenger.js | 0
Asamples/httopo.py | 27+++++++++++++++++++++++++++
Asamples/mixed_switches.py | 55+++++++++++++++++++++++++++++++++++++++++++++++++++++++
Asamples/pretty_log.py | 33+++++++++++++++++++++++++++++++++
Asamples/spanning_tree.py | 41+++++++++++++++++++++++++++++++++++++++++
Asamples/topo.py | 27+++++++++++++++++++++++++++
Asetup.py | 23+++++++++++++++++++++++
Dtools/pox-log.py | 127-------------------------------------------------------------------------------
Dtools/pox-pydoc.py | 2361-------------------------------------------------------------------------------
245 files changed, 37356 insertions(+), 39948 deletions(-)

diff --git a/MANIFEST.in b/MANIFEST.in @@ -0,0 +1,7 @@ +include pox3/lib/oui.txt +include pox3/web/*.html +include pox3/web/*.js +include pox3/lib/pxpcap/pxpcap_c/build_linux +include pox3/lib/pxpcap/pxpcap_c/build_mac +include pox3/lib/pxpcap/pxpcap_c/build_win.bat +include pox3/lib/pxpcap/pxpcap_c/pxpcap.cpp diff --git a/README.md b/README.md @@ -17,12 +17,17 @@ fine with Python 3.7 or even 3.6), and should run under Linux, Mac OS, and Windo POX3 currently communicates with OpenFlow 1.0 switches and includes special support for the Open vSwitch/Nicira extensions. +## Installation +```sh +pip3 install pox3 +``` + ## API The following example runs with debug logging the of tutorial, it acts like a simple hub ```python -from pox.boot import boot +from pox3.boot import boot boot(["log.level", "--DEBUG", "misc.of_tutorial"]) ``` diff --git a/pox/__init__.py b/pox/__init__.py @@ -1,20 +0,0 @@ -# Copyright 2011 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This is the POX network controller framework. - -Presently, this basically means it's a framework for writing OpenFlow -controllers, some utilities, and some examples. -""" diff --git a/pox/boot.py b/pox/boot.py @@ -1,556 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2011,2012,2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# If you have PyPy 1.6+ in a directory called pypy alongside pox.py, we -# use it. -# Otherwise, we try to use a Python interpreter called python2.7, which -# is a good idea if you're using Python from MacPorts, for example. -# We fall back to just "python" and hope that works. - - -import logging -import logging.config -import os -import sys -import traceback -import time -import inspect -import types -import threading - -import pox.core -core = None - -import pox.openflow -from pox.lib.util import str_to_bool - -# Function to run on main thread -_main_thread_function = None - -try: - import __pypy__ -except ImportError: - __pypy__ = None - -def _do_import (name): - """ - Try to import the named component. - Returns its module name if it was loaded or False on failure. - """ - - def show_fail (): - traceback.print_exc() - print("Could not import module:", name) - - def do_import2 (base_name, names_to_try): - if len(names_to_try) == 0: - print("Module not found:", base_name) - return False - - name = names_to_try.pop(0) - - if name in sys.modules: - return name - - try: - __import__(name, level=0) - return name - except ImportError: - # There are two cases why this might happen: - # 1. The named module could not be found - # 2. Some dependent module (import foo) or some dependent - # name-in-a-module (e.g., from foo import bar) could not be found. - # If it's the former, we might try a name variation (e.g., without - # a leading "pox."), but if we ultimately can't find the named - # module, we just say something along those lines and stop. - # On the other hand, if the problem is with a dependency, we should - # print a stack trace so that it can be fixed. - # Sorting out the two cases is an ugly hack. - - message = str(sys.exc_info()[1].args[0]) - s = message.replace("'", "").rsplit(" ", 1) - - if s[0] == "No module named" and name.endswith(s[1]): - # It was the one we tried to import itself. (Case 1) - # If we have other names to try, try them! - return do_import2(base_name, names_to_try) - elif message == "Import by filename is not supported.": - print(message) - import os.path - n = name.replace("/", ".").replace("\\", ".") - n = n.replace( os.path.sep, ".") - if n.startswith("pox.") or n.startswith("ext."): - n = n[4:] - print("Maybe you meant to run '%s'?" % (n,)) - return False - else: - # This means we found the module we were looking for, but one - # of its dependencies was missing. - show_fail() - return False - except: - # There was some other sort of exception while trying to load the - # module. Just print a trace and call it a day. - show_fail() - return False - - return do_import2(name, ["pox." + name, name]) - - -def _do_imports (components): - """ - Import each of the listed components - - Returns map of component_name->name,module,members on success, - or False on failure - """ - done = {} - for name in components: - if name in done: continue - r = _do_import(name) - if r is False: - return False - members = dict(inspect.getmembers(sys.modules[r])) - done[name] = (r,sys.modules[r],members) - - return done - - -def _do_launch (argv): - component_order = [] - components = {} - - curargs = {} - pox_options = curargs - - for arg in argv: - if not arg.startswith("-"): - if arg not in components: - components[arg] = [] - curargs = {} - components[arg].append(curargs) - component_order.append(arg) - else: - arg = arg.lstrip("-").split("=", 1) - arg[0] = arg[0].replace("-", "_") - if len(arg) == 1: arg.append(True) - curargs[arg[0]] = arg[1] - - _options.process_options(pox_options) - global core - if pox.core.core is not None: - core = pox.core.core - core.getLogger('boot').debug('Using existing POX core') - else: - core = pox.core.initialize(_options.threaded_selecthub, - _options.epoll_selecthub, - _options.handle_signals) - - _pre_startup() - modules = _do_imports(n.split(':')[0] for n in component_order) - if modules is False: - return False - - inst = {} - for name in component_order: - cname = name - inst[name] = inst.get(name, -1) + 1 - params = components[name][inst[name]] - name = name.split(":", 1) - launch = name[1] if len(name) == 2 else "launch" - name = name[0] - - name,module,members = modules[name] - - if launch in members: - f = members[launch] - # We explicitly test for a function and not an arbitrary callable - if type(f) is not types.FunctionType: - print(launch, "in", name, "isn't a function!") - return False - - if getattr(f, '_pox_eval_args', False): - import ast - for k,v in list(params.items()): - if isinstance(v, str): - try: - params[k] = ast.literal_eval(v) - except: - # Leave it as a string - pass - - multi = False - if f.__code__.co_argcount > 0: - #FIXME: This code doesn't look quite right to me and may be broken - # in some cases. We should refactor to use inspect anyway, - # which should hopefully just fix it. - if (f.__code__.co_varnames[f.__code__.co_argcount-1] - == '__INSTANCE__'): - # It's a multi-instance-aware component. - - multi = True - - # Special __INSTANCE__ paramter gets passed a tuple with: - # 1. The number of this instance (0...n-1) - # 2. The total number of instances for this module - # 3. True if this is the last instance, False otherwise - # The last is just a comparison between #1 and #2, but it's - # convenient. - params['__INSTANCE__'] = (inst[cname], len(components[cname]), - inst[cname] + 1 == len(components[cname])) - - if multi == False and len(components[cname]) != 1: - print(name, "does not accept multiple instances") - return False - - try: - if f(**params) is False: - # Abort startup - return False - except TypeError as exc: - instText = '' - if inst[cname] > 0: - instText = "instance {0} of ".format(inst[cname] + 1) - print("Error executing {2}{0}.{1}:".format(name,launch,instText)) - if inspect.currentframe() is sys.exc_info()[2].tb_frame: - # Error is with calling the function - # Try to give some useful feedback - if _options.verbose: - traceback.print_exc() - else: - exc = sys.exc_info()[0:2] - print(''.join(traceback.format_exception_only(*exc)), end='') - print() - EMPTY = "<Unspecified>" - code = f.__code__ - argcount = code.co_argcount - argnames = code.co_varnames[:argcount] - defaults = list((f.__defaults__) or []) - defaults = [EMPTY] * (argcount - len(defaults)) + defaults - args = {} - for n, a in enumerate(argnames): - args[a] = [EMPTY,EMPTY] - if n < len(defaults): - args[a][0] = defaults[n] - if a in params: - args[a][1] = params[a] - del params[a] - if '__INSTANCE__' in args: - del args['__INSTANCE__'] - - if f.__doc__ is not None: - print("Documentation for {0}:".format(name)) - doc = f.__doc__.split("\n") - #TODO: only strip the same leading space as was on the first - # line - doc = list(map(str.strip, doc)) - print('',("\n ".join(doc)).strip()) - - #print(params) - #print(args) - - print("Parameters for {0}:".format(name)) - if len(args) == 0: - print(" None.") - else: - print(" {0:25} {1:25} {2:25}".format("Name", "Default", - "Active")) - print(" {0:25} {0:25} {0:25}".format("-" * 15)) - - for k,v in args.items(): - print(" {0:25} {1:25} {2:25}".format(k,str(v[0]), - str(v[1] if v[1] is not EMPTY else v[0]))) - - if len(params): - print("This component does not have a parameter named " - + "'{0}'.".format(list(params.keys())[0])) - return False - missing = [k for k,x in args.items() - if x[1] is EMPTY and x[0] is EMPTY] - if len(missing): - print("You must specify a value for the '{0}' " - "parameter.".format(missing[0])) - return False - - return False - else: - # Error is inside the function - raise - elif len(params) > 0 or launch != "launch": - print("Module %s has no %s(), but it was specified or passed " \ - "arguments" % (name, launch)) - return False - - return True - - -class Options (object): - def set (self, given_name, value): - name = given_name.replace("-", "_") - if name.startswith("_") or hasattr(Options, name): - # Hey, what's that about? - print("Illegal option:", given_name) - return False - has_field = hasattr(self, name) - has_setter = hasattr(self, "_set_" + name) - if has_field == False and has_setter == False: - print("Unknown option:", given_name) - return False - if has_setter: - setter = getattr(self, "_set_" + name) - setter(given_name, name, value) - else: - if isinstance(getattr(self, name), bool): - # Automatic bool-ization - value = str_to_bool(value) - setattr(self, name, value) - return True - - def process_options (self, options): - for k,v in options.items(): - if self.set(k, v) is False: - # Bad option! - sys.exit(1) - - -_help_text = """ -POX is a Software Defined Networking controller framework. - -The commandline of POX is like: -pox.py [POX options] [C1 [C1 options]] [C2 [C2 options]] ... - -Notable POX options include: - --verbose Print more debugging information (especially useful for - problems on startup) - --no-openflow Don't automatically load the OpenFlow module - --log-config=F Load a Python log configuration file (if you include the - option without specifying F, it defaults to logging.cfg) - -C1, C2, etc. are component names (e.g., Python modules). Options they -support are up to the module. As an example, you can load a learning -switch app that listens on a non-standard port number by specifying an -option to the of_01 component, and loading the l2_learning component like: - ./pox.py --verbose openflow.of_01 --port=6634 forwarding.l2_learning - -The 'help' component can give help for other components. Start with: - ./pox.py help --help -""".strip() - - -class POXOptions (Options): - def __init__ (self): -# self.cli = True - self.verbose = False - self.enable_openflow = True - self.log_config = None - self.threaded_selecthub = True - self.epoll_selecthub = False - self.handle_signals = True - - def _set_h (self, given_name, name, value): - self._set_help(given_name, name, value) - - def _set_help (self, given_name, name, value): - print(_help_text) - #TODO: Summarize options, etc. - sys.exit(0) - - def _set_version (self, given_name, name, value): - global core - if core is None: - core = pox.core.initialize() - print(core._get_python_version()) - sys.exit(0) - - def _set_unthreaded_sh (self, given_name, name, value): - self.threaded_selecthub = False - - def _set_epoll_sh (self, given_name, name, value): - self.epoll_selecthub = str_to_bool(value) - - def _set_no_openflow (self, given_name, name, value): - self.enable_openflow = not str_to_bool(value) - -# def _set_no_cli (self, given_name, name, value): -# self.cli = not str_to_bool(value) - - def _set_log_config (self, given_name, name, value): - if value is True: - # I think I use a better method for finding the path elsewhere... - p = os.path.dirname(os.path.realpath(__file__)) - value = os.path.join(p, "..", "logging.cfg") - self.log_config = value - - def _set_debug (self, given_name, name, value): - value = str_to_bool(value) - if value: - # Debug implies no openflow and no CLI and verbose - #TODO: Is this really an option we need/want? - self.verbose = True - self.enable_openflow = False -# self.cli = False - - -_options = POXOptions() - - -def _pre_startup (): - """ - This function is called after all the POX options have been read in - but before any components are loaded. This gives a chance to do - early setup (e.g., configure logging before a component has a chance - to try to log something!). - """ - - _setup_logging() - - if _options.verbose: - logging.getLogger().setLevel(logging.DEBUG) - - if _options.enable_openflow: - pox.openflow._launch() # Default OpenFlow launch - - -def _post_startup (): - if _options.enable_openflow: - if core._openflow_wanted: - if not core.hasComponent("of_01"): - # Launch a default of_01 - import pox.openflow.of_01 - pox.openflow.of_01.launch() - else: - logging.getLogger("boot").debug("Not launching of_01") - - - -def _setup_logging (): - # First do some basic log config... - - # This is kind of a hack, but we need to keep track of the handler we - # install so that we can, for example, uninstall it later. This code - # originally lived in pox.core, so we explicitly reference it here. - pox.core._default_log_handler = logging.StreamHandler() - formatter = logging.Formatter(logging.BASIC_FORMAT) - pox.core._default_log_handler.setFormatter(formatter) - logging.getLogger().addHandler(pox.core._default_log_handler) - logging.getLogger().setLevel(logging.INFO) - - - # Now set up from config file if specified... - #TODO: - # I think we could move most of the special log stuff into - # the log module. You'd just have to make a point to put the log - # module first on the commandline if you wanted later component - # initializations to honor it. Or it could be special-cased? - - if _options.log_config is not None: - if not os.path.exists(_options.log_config): - print("Could not find logging config file:", _options.log_config) - sys.exit(2) - logging.config.fileConfig(_options.log_config, - disable_existing_loggers=True) - - -def set_main_function (f): - global _main_thread_function - if _main_thread_function == f: return True - if _main_thread_function is not None: - import logging - lg = logging.getLogger("boot") - lg.error("Could not set main thread function to: " + str(f)) - lg.error("The main thread function is already " - + "taken by: " + str(_main_thread_function)) - return False - _main_thread_function = f - return True - - -def boot (argv = None): - """ - Start up POX. - """ - - # Add pox directory to path - base = sys.path[0] - sys.path.insert(0, os.path.abspath(os.path.join(base, 'pox'))) - # sys.path.insert(0, os.path.abspath(os.path.join(base, 'ext'))) - - thread_count = threading.active_count() - - quiet = False - - try: - if argv is None: - argv = sys.argv[1:] - - # Always load cli (first!) - #TODO: Can we just get rid of the normal options yet? - pre = [] - while len(argv): - if argv[0].startswith("-"): - pre.append(argv.pop(0)) - else: - break - argv = pre + "py --disable".split() + argv - - if _do_launch(argv): - _post_startup() - core.goUp() - else: - #return - quiet = True - raise RuntimeError() - - except SystemExit: - return - except: - if not quiet: - traceback.print_exc() - - # Try to exit normally, but do a hard exit if we don't. - # This is sort of a hack. What's the better option? Raise - # the going down event on core even though we never went up? - - try: - for _ in range(4): - if threading.active_count() <= thread_count: - # Normal exit - return - time.sleep(0.25) - except: - pass - - os._exit(1) - return - - if _main_thread_function: - _main_thread_function() - else: - #core.acquire() - try: - while True: - if core.quit_condition.acquire(False): - core.quit_condition.wait(10) - core.quit_condition.release() - if not core.running: break - except: - pass - #core.scheduler._thread.join() # Sleazy - - try: - pox.core.core.quit() - except: - pass diff --git a/pox/core.py b/pox/core.py @@ -1,617 +0,0 @@ -# Copyright 2011-2014 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Some of POX's core API and functionality is here, largely in the POXCore -class (an instance of which is available as pox.core.core). - -This includes things like component rendezvous, logging, system status -(up and down events), etc. -""" - - - -# Set up initial log state -import logging - -import inspect -import time -import os -import signal - -_path = inspect.stack()[0][1] -_ext_path = _path[0:_path.rindex(os.sep)] -_ext_path = os.path.dirname(_ext_path) + os.sep -_path = os.path.dirname(_path) + os.sep - -SQUELCH_TIME = 5 - -_squelch = '' -_squelchTime = 0 -_squelchCount = 0 - -def getLogger (name=None, moreFrames=0): - """ - In general, you don't need to call this directly, and will use - core.getLogger() instead. - """ - if name is None: - s = inspect.stack()[1+moreFrames] - name = s[1] - if name.endswith('.py'): - name = name[0:-3] - elif name.endswith('.pyc'): - name = name[0:-4] - if name.startswith(_path): - name = name[len(_path):] - elif name.startswith(_ext_path): - name = name[len(_ext_path):] - name = name.replace('/', '.').replace('\\', '.') #FIXME: use os.path or whatever - - # Remove double names ("topology.topology" -> "topology") - if name.find('.') != -1: - n = name.split('.') - if len(n) >= 2: - if n[-1] == n[-2]: - del n[-1] - name = '.'.join(n) - - if name.startswith("ext."): - name = name.split("ext.",1)[1] - - if name.endswith(".__init__"): - name = name.rsplit(".__init__",1)[0] - - l = logging.getLogger(name) - g=globals() - if not hasattr(l, "print"): - def printmsg (*args, **kw): - #squelch = kw.get('squelch', True) - msg = ' '.join((str(s) for s in args)) - s = inspect.stack()[1] - o = '[' - if 'self' in s[0].f_locals: - o += s[0].f_locals['self'].__class__.__name__ + '.' - o += s[3] + ':' + str(s[2]) + '] ' - o += msg - if o == _squelch: - if time.time() >= _squelchTime: - l.debug("[Previous message repeated %i more times]" % (g['_squelchCount']+1,)) - g['_squelchCount'] = 0 - g['_squelchTime'] = time.time() + SQUELCH_TIME - else: - g['_squelchCount'] += 1 - else: - g['_squelch'] = o - if g['_squelchCount'] > 0: - l.debug("[Previous message repeated %i more times]" % (g['_squelchCount'],)) - g['_squelchCount'] = 0 - g['_squelchTime'] = time.time() + SQUELCH_TIME - l.debug(o) - - setattr(l, "print", printmsg) - setattr(l, "msg", printmsg) - - return l - - -# Working around something (don't remember what) -log = (lambda : getLogger())() - -from pox.lib.revent import * - -# Now use revent's exception hook to put exceptions in event handlers into -# the log... -def _revent_exception_hook (source, event, args, kw, exc_info): - try: - c = source - t = event - if hasattr(c, "__class__"): c = c.__class__.__name__ - if isinstance(t, Event): t = t.__class__.__name__ - elif issubclass(t, Event): t = t.__name__ - except: - pass - log.exception("Exception while handling %s!%s...\n" % (c,t)) -import pox.lib.revent.revent -pox.lib.revent.revent.handleEventException = _revent_exception_hook - -class GoingUpEvent (Event): - """ Fired when system is going up. """ - pass - -class GoingDownEvent (Event): - """ Fired when system is going down. """ - pass - -class UpEvent (Event): - """ Fired when system is up. """ - pass - -class DownEvent (Event): - """ Fired when system is down. """ - pass - -class ComponentRegistered (Event): - """ - This is raised by core whenever a new component is registered. - By watching this, a component can monitor whether other components it - depends on are available. - """ - def __init__ (self, name, component): - self.name = name - self.component = component - -class RereadConfiguration (Event): - """ Fired when modules should reread their configuration files. """ - pass - -import pox.lib.recoco as recoco - -class POXCore (EventMixin): - """ - A nexus of of the POX API. - - pox.core.core is a reference to an instance of this class. This class - serves a number of functions. - - An important one is that it can serve as a rendezvous point for - components. A component can register objects on core, and they can - then be accessed on the core object (e.g., if you register foo, then - there will then be a pox.core.core.foo). In many cases, this means you - won't need to import a module. - - Another purpose to the central registration is that it decouples - functionality from a specific module. If myL2Switch and yourL2Switch - both register as "switch" and both provide the same API, then it doesn't - matter. Doing this with imports is a pain. - - Additionally, a number of commmon API functions are vailable here. - """ - _eventMixin_events = set([ - UpEvent, - DownEvent, - GoingUpEvent, - GoingDownEvent, - ComponentRegistered, - RereadConfiguration, - ]) - - version = (0, 1, 0) - version_name = "auldridge" - - def __init__ (self, threaded_selecthub=True, epoll_selecthub=False, - handle_signals=True): - self.debug = False - self.running = True - self.starting_up = True - self.components = {'core':self} - - self._openflow_wanted = False - self._handle_signals = handle_signals - - import threading - self.quit_condition = threading.Condition() - - print(self.banner) - - self.scheduler = recoco.Scheduler(daemon=True, - threaded_selecthub=threaded_selecthub, - use_epoll=epoll_selecthub) - - self._waiters = [] # List of waiting components - - @property - def banner (self): - return "{0} / Copyright 2020 Cody Lewis, et al.".format( - self.version_string) - - @property - def version_string (self): - return "POX3 %s (%s)" % ('.'.join(map(str,self.version)),self.version_name) - - def callDelayed (_self, _seconds, _func, *args, **kw): - """ - Calls the function at a later time. - This is just a wrapper around a recoco timer. - """ - t = recoco.Timer(_seconds, _func, args=args, kw=kw, - scheduler = _self.scheduler) - return t - - def callLater (_self, _func, *args, **kw): - # first arg is `_self` rather than `self` in case the user wants - # to specify self as a keyword argument - """ - Call the given function with the given arguments within the context - of the co-operative threading environment. - It actually calls it sooner rather than later. ;) - Much of POX is written without locks because it's all thread-safe - with respect to itself, as it's written using the recoco co-operative - threading library. If you have a real thread outside of the - co-operative thread context, you need to be careful about calling - things within it. This function provides a rather simple way that - works for most situations: you give it a callable (like a method) - and some arguments, and it will call that callable with those - arguments from within the co-operative threader, taking care of - synchronization for you. - """ - _self.scheduler.callLater(_func, *args, **kw) - - def raiseLater (_self, _obj, *args, **kw): - # first arg is `_self` rather than `self` in case the user wants - # to specify self as a keyword argument - """ - This is similar to callLater(), but provides an easy way to raise a - revent event from outide the co-operative context. - Rather than foo.raiseEvent(BarEvent, baz, spam), you just do - core.raiseLater(foo, BarEvent, baz, spam). - """ - _self.scheduler.callLater(_obj.raiseEvent, *args, **kw) - - def getLogger (self, *args, **kw): - """ - Returns a logger. Pass it the name you want if you'd like to specify - one (e.g., core.getLogger("foo")). If you don't specify a name, it - will make one up based on the module name it is called from. - """ - return getLogger(moreFrames=1,*args, **kw) - - def quit (self): - """ - Shut down POX. - """ - import threading - if (self.starting_up or - threading.current_thread() is self.scheduler._thread): - t = threading.Thread(target=self._quit) - t.daemon = True - t.start() - else: - self._quit() - - def _quit (self): - # Should probably do locking here - if not self.running: - return - if self.starting_up: - # Try again later - self.quit() - return - - self.running = False - log.info("Going down...") - import gc - gc.collect() - self.raiseEvent(GoingDownEvent()) - self.callLater(self.scheduler.quit) - for i in range(50): - if self.scheduler._hasQuit: break - gc.collect() - time.sleep(.1) - if not self.scheduler._allDone: - log.warning("Scheduler didn't quit in time") - self.raiseEvent(DownEvent()) - log.info("Down.") - #logging.shutdown() - self.quit_condition.acquire() - self.quit_condition.notifyAll() - core.quit_condition.release() - - def _get_python_version (self): - try: - import platform - return "{impl} ({vers}/{build})".format( - impl=platform.python_implementation(), - vers=platform.python_version(), - build=platform.python_build()[1].replace(" "," ")) - except: - return "Unknown Python" - - def _get_platform_info (self): - try: - import platform - return platform.platform().split("\n")[0] - except: - return "Unknown Platform" - - def _add_signal_handlers (self): - if not self._handle_signals: - return - - import threading - if not isinstance(threading.current_thread(), type(threading.main_thread())): - raise RuntimeError("add_signal_handers must be called from MainThread") - - try: - previous = signal.getsignal(signal.SIGHUP) - signal.signal(signal.SIGHUP, self._signal_handler_SIGHUP) - if previous != signal.SIG_DFL: - log.warn('Redefined signal handler for SIGHUP') - except (AttributeError, ValueError): - # SIGHUP is not supported on some systems (e.g., Windows) - log.debug("Didn't install handler for SIGHUP") - - def _signal_handler_SIGHUP (self, signal, frame): - self.raiseLater(core, RereadConfiguration) - - def goUp (self): - log.debug(self.version_string + " going up...") - - log.debug("Running on " + self._get_python_version()) - log.debug("Platform is " + self._get_platform_info()) - try: - import platform - vers = '.'.join(platform.python_version().split(".")[:2]) - except: - vers = 'an unknown version' - if vers != "3.8": - l = logging.getLogger("version") - if not l.isEnabledFor(logging.WARNING): - l.setLevel(logging.WARNING) - l.warn("POX3 requires Python 3.8. You're running %s.", vers) - l.warn("If you run into problems, try using Python 3.8.") - - self.starting_up = False - self.raiseEvent(GoingUpEvent()) - - self._add_signal_handlers() - - self.raiseEvent(UpEvent()) - - self._waiter_notify() - - if self.running: - log.info(self.version_string + " is up.") - - def _waiter_notify (self): - if len(self._waiters): - waiting_for = set() - for entry in self._waiters: - _, name, components, _, _ = entry - components = [c for c in components if not self.hasComponent(c)] - waiting_for.update(components) - log.debug("%s still waiting for: %s" - % (name, " ".join(components))) - names = set([n for _,n,_,_,_ in self._waiters]) - - #log.info("%i things still waiting on %i components" - # % (names, waiting_for)) - log.warn("Still waiting on %i component(s)" % (len(waiting_for),)) - - def hasComponent (self, name): - """ - Returns True if a component with the given name has been registered. - """ - if name in ('openflow', 'OpenFlowConnectionArbiter'): - self._openflow_wanted = True - return name in self.components - - def registerNew (self, __componentClass, *args, **kw): - """ - Give it a class (and optional __init__ arguments), and it will - create an instance and register it using the class name. If the - instance has a _core_name property, it will use that instead. - It returns the new instance. - core.registerNew(FooClass, arg) is roughly equivalent to - core.register("FooClass", FooClass(arg)). - """ - name = __componentClass.__name__ - obj = __componentClass(*args, **kw) - if hasattr(obj, '_core_name'): - # Default overridden - name = obj._core_name - self.register(name, obj) - return obj - - def register (self, name, component=None): - """ - Makes the object "component" available as pox.core.core.name. - - If only one argument is specified, the given argument is registered - using its class name as the name. - """ - #TODO: weak references? - if component is None: - component = name - name = component.__class__.__name__ - if hasattr(component, '_core_name'): - # Default overridden - name = component._core_name - - if name in self.components: - log.warn("Warning: Registered '%s' multipled times" % (name,)) - self.components[name] = component - self.raiseEventNoErrors(ComponentRegistered, name, component) - self._try_waiters() - - def call_when_ready (self, callback, components=[], name=None, args=(), - kw={}): - """ - Calls a callback when components are ready. - """ - if callback is None: - callback = lambda:None - callback.__name__ = "<None>" - if isinstance(components, str): - components = [components] - elif isinstance(components, set): - components = list(components) - else: - try: - _ = components[0] - components = list(components) - except: - components = [components] - if name is None: - #TODO: Use inspect here instead - name = getattr(callback, 'func_name') - if name is None: - name = str(callback) - else: - name += "()" - if hasattr(callback, 'im_class'): - name = getattr(callback.__self__.__class__,'__name__','')+'.'+name - if hasattr(callback, '__module__'): - # Is this a good idea? If not here, we should do it in the - # exception printing in try_waiter(). - name += " in " + callback.__module__ - entry = (callback, name, components, args, kw) - self._waiters.append(entry) - self._try_waiter(entry) - - def _try_waiter (self, entry): - """ - Tries a waiting callback. - - Calls the callback, removes from _waiters, and returns True if - all are satisfied. - """ - if entry not in self._waiters: - # Already handled - return - callback, name, components, args_, kw_ = entry - for c in components: - if not self.hasComponent(c): - return False - self._waiters.remove(entry) - try: - if callback is not None: - callback(*args_,**kw_) - except: - import traceback - msg = "Exception while trying to notify " + name - import inspect - try: - msg += " at " + inspect.getfile(callback) - msg += ":" + str(inspect.getsourcelines(callback)[1]) - except: - pass - log.exception(msg) - return True - - def _try_waiters (self): - """ - Tries to satisfy all component-waiting callbacks - """ - changed = True - - while changed: - changed = False - for entry in list(self._waiters): - if self._try_waiter(entry): - changed = True - - def listen_to_dependencies (self, sink, components=None, attrs=True, - short_attrs=False, listen_args={}): - """ - Look through *sink* for handlers named like _handle_component_event. - Use that to build a list of components, and append any components - explicitly specified by *components*. - - listen_args is a dict of "component_name"={"arg_name":"arg_value",...}, - allowing you to specify additional arguments to addListeners(). - - When all the referenced components are registered, do the following: - 1) Set up all the event listeners - 2) Call "_all_dependencies_met" on *sink* if it exists - 3) If attrs=True, set attributes on *sink* for each component - (e.g, sink._openflow_ would be set to core.openflow) - - For example, if topology is a dependency, a handler for topology's - SwitchJoin event must be defined as so: - def _handle_topology_SwitchJoin (self, ...): - - *NOTE*: The semantics of this function changed somewhat in the - Summer 2012 milestone, though its intention remains the same. - """ - if components is None: - components = set() - elif isinstance(components, str): - components = set([components]) - else: - components = set(components) - - for c in dir(sink): - if not c.startswith("_handle_"): continue - if c.count("_") < 3: continue - c = '_'.join(c.split("_")[2:-1]) - components.add(c) - - if None in listen_args: - # This means add it to all... - args = listen_args.pop(None) - for k,v in args.items(): - for c in components: - if c not in listen_args: - listen_args[c] = {} - if k not in listen_args[c]: - listen_args[c][k] = v - - if set(listen_args).difference(components): - log.error("Specified listen_args for missing component(s): %s" % - (" ".join(set(listen_args).difference(components)),)) - - def done (sink, components, attrs, short_attrs): - if attrs or short_attrs: - for c in components: - if short_attrs: - attrname = c - else: - attrname = '_%s_' % (c,) - setattr(sink, attrname, getattr(self, c)) - for c in components: - if hasattr(getattr(self, c), "_eventMixin_events"): - kwargs = {"prefix":c} - kwargs.update(listen_args.get(c, {})) - getattr(self, c).addListeners(sink, **kwargs) - getattr(sink, "_all_dependencies_met", lambda : None)() - - - self.call_when_ready(done, components, name=sink.__class__.__name__, - args=(sink,components,attrs,short_attrs)) - - if not self.starting_up: - self._waiter_notify() - - def __getattr__ (self, name): - if name in ('openflow', 'OpenFlowConnectionArbiter'): - self._openflow_wanted = True - c = self.components.get(name) - if c is not None: return c - raise AttributeError("'%s' not registered" % (name,)) - - -core = None - -def initialize (threaded_selecthub=True, epoll_selecthub=False, - handle_signals=True): - global core - core = POXCore(threaded_selecthub=threaded_selecthub, - epoll_selecthub=epoll_selecthub, - handle_signals=handle_signals) - return core - -# The below is a big hack to make tests and doc tools work. -# We should do something better. -def _maybe_initialize (): - import sys - if 'unittest' in sys.modules or 'nose' in sys.modules: - initialize() - return - import __main__ - mod = getattr(__main__, '__file__', '') - if 'pydoc' in mod or 'pdoc' in mod: - initialize() - return -_maybe_initialize() diff --git a/pox/datapaths/__init__.py b/pox/datapaths/__init__.py @@ -1,120 +0,0 @@ -# Copyright 2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Lets you start a default instance of the datapath, for what it's worth. - -Example: -./pox.py --no-openflow datapaths:softwareswitch --address=localhost -""" - -from pox.lib.ioworker.workers import BackoffWorker -from pox.datapaths.switch import SoftwareSwitch, OFConnection -from pox.datapaths.switch import ExpireMixin -from pox.lib.util import dpid_to_str, str_to_dpid - - -class OpenFlowWorker (BackoffWorker): - def __init__ (self, switch=None, **kw): - self.switch = switch - self.connection = None - from pox.core import core - self.log = core.getLogger("dp." + dpid_to_str(self.switch.dpid)) - super(OpenFlowWorker, self).__init__(switch=switch,**kw) - self._info("Connecting to %s:%s", kw.get('addr'), kw.get('port')) - - def _handle_close (self): - super(OpenFlowWorker, self)._handle_close() - - def _handle_connect (self): - super(OpenFlowWorker, self)._handle_connect() - self.connection = OFConnection(self) - self.switch.set_connection(self.connection) - self._info("Connected to controller") - - def _error (self, *args, **kw): - self.log.error(*args,**kw) - def _warn (self, *args, **kw): - self.log.warn(*args,**kw) - def _info (self, *args, **kw): - self.log.info(*args,**kw) - def _debug (self, *args, **kw): - self.log.debug(*args,**kw) - - -def do_launch (cls, address = '127.0.0.1', port = 6633, max_retry_delay = 16, - dpid = None, extra_args = None, **kw): - """ - Used for implementing custom switch launching functions - - cls is the class of the switch you want to add. - - Returns switch instance. - """ - - if extra_args is not None: - import ast - extra_args = ast.literal_eval('{%s}' % (extra_args,)) - kw.update(extra_args) - - from pox.core import core - if not core.hasComponent('datapaths'): - core.register("datapaths", {}) - _switches = core.datapaths - - if dpid is None: - for dpid in range(1,256): - if dpid not in _switches: break - if dpid in _switches: - raise RuntimeError("Out of DPIDs") - else: - dpid = str_to_dpid(dpid) - - switch = cls(dpid=dpid, name="sw"+str(dpid), **kw) - _switches[dpid] = switch - - port = int(port) - max_retry_delay = int(max_retry_delay) - - def up (event): - import pox.lib.ioworker - global loop - loop = pox.lib.ioworker.RecocoIOLoop() - #loop.more_debugging = True - loop.start() - OpenFlowWorker.begin(loop=loop, addr=address, port=port, - max_retry_delay=max_retry_delay, switch=switch) - - from pox.core import core - - core.addListenerByName("UpEvent", up) - - return switch - - -def softwareswitch (address='127.0.0.1', port = 6633, max_retry_delay = 16, - dpid = None, extra = None, __INSTANCE__ = None): - """ - Launches a SoftwareSwitch - - Not particularly useful, since SoftwareSwitch doesn't do much. - """ - from pox.core import core - core.register("datapaths", {}) - - class ExpiringSwitch(ExpireMixin, SoftwareSwitch): - pass - - do_launch(ExpiringSwitch, address, port, max_retry_delay, dpid, - extra_args = extra) diff --git a/pox/datapaths/ctl.py b/pox/datapaths/ctl.py @@ -1,139 +0,0 @@ -# Copyright 2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Simple datapath control framework for POX datapaths -""" - -from pox.core import core -from pox.lib.ioworker.workers import * -from pox.lib.ioworker import * -from pox.lib.revent import * - - -# IOLoop for our IO workers -_ioloop = None - -# Log -log = None - - -class CommandEvent (Event): - """ - Event fired whenever a command is received - """ - def __init__ (self, worker, cmd): - super(CommandEvent,self).__init__() - self.worker = worker - self.cmd = cmd - - @property - def first (self): - return self.cmd.strip().split()[0] - - @property - def args (self): - return self.cmd.strip().split()[1:] - - def __str__ (self): - return "<%s: %s>" % (self.worker, self.cmd) - - -class ServerWorker (TCPServerWorker, RecocoIOWorker): - """ - Worker to accept connections - """ - pass - #TODO: Really should just add this to the ioworker package. - - -class Worker (RecocoIOWorker): - """ - Worker to receive POX dpctl commands - """ - def __init__ (self, *args, **kw): - super(Worker, self).__init__(*args, **kw) - self._connecting = True - self._buf = b'' - - def _process (self, data): - self._buf += data - while '\n' in self._buf: - fore,self._buf = self._buf.split('\n', 1) - core.ctld.raiseEventNoErrors(CommandEvent, self, fore) - - - def _handle_rx (self): - self._buf += self.read() - self._process(self.read()) - - def _exec (self, msg): - msg.split() - - -class Server (EventMixin): - """ - Listens on a TCP socket for control - """ - _eventMixin_events = set([CommandEvent]) - - def __init__ (self, port = 7791): - w = ServerWorker(child_worker_type=Worker, port = port) - self.server_worker = w - _ioloop.register_worker(w) - - -def create_server (port = 7791): - # Set up logging - global log - if not log: - log = core.getLogger() - - # Set up IO loop - global _ioloop - if not _ioloop: - _ioloop = RecocoIOLoop() - #_ioloop.more_debugging = True - _ioloop.start() - - c = Server(port = int(port)) - return c - - -def server (port = 7791): - c = create_server(int(port)) - core.register("ctld", c) - - -def launch (cmd, address = None, port = 7791): - core.quit() - if not address: - address = "127.0.0.1" - import socket - core.getLogger('core').setLevel(100) - log = core.getLogger('ctl') - try: - s = socket.create_connection((address,port), timeout=2) - except: - log.error("Couldn't connect") - return - try: - s.settimeout(2) - s.send(cmd + "\n") - d = s.recv(4096).strip() - core.getLogger("ctl").info(d) - except socket.timeout: - log.warn("No response") - except: - log.exception("While communicating") diff --git a/pox/datapaths/nx_switch.py b/pox/datapaths/nx_switch.py @@ -1,160 +0,0 @@ -# Copyright 2011,2012 Andreas Wundsam -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect - -import pox.openflow.libopenflow_01 as of -import pox.openflow.nicira as nx -from pox.datapaths.switch import SoftwareSwitch, OFConnection - -_slave_blacklist = set([of.ofp_flow_mod, of.ofp_packet_out, of.ofp_port_mod, - of.ofp_barrier_request]) -_messages_for_all = set([of.ofp_port_status]) - - -class NXSoftwareSwitch (SoftwareSwitch): - """ - Software datapath with Nicira (NX) extensions - - Extension of the software switch that supports some of the Nicira (NX) vendor - extensions that are part of Open vSwitch. - - In particular, this include the ability for a switch to connect to multiple - controllers at the same time. - - In the beginning, all controllers start out as equals (ROLE_OTHER). Through - the NX vendor message role_request, one controller can be promoted to - ROLE_MASTER, in which case all other controllers are downgraded to slave - status. - - The switch doesn't accept state-mutating messages (e.g., FLOW_MOD, see - _slave_blacklist) from slave controllers. - - Messages are distributed to controllers according to their type: - - symmetric message replies are sent to the controller that initiated them - (e.g., STATS_REQUEST -> REPLY) - - port_status messages are distributed to all controllers - - all other messages are distributed to the master controller, or if none - is present, any controller in ROLE_OTHER - """ - - def __init__ (self, *args, **kw): - SoftwareSwitch.__init__(self, *args, **kw) - self.role_by_conn={} - self.connections = [] - self.connection_in_action = None - # index of the next 'other' controller to get a message - # (for round robin of async messages) - self.next_other = 0 - - # Set of connections to which we have sent hellos. This is used to - # as part of overriding the single-connection logic in the superclass. - self._sent_hellos = set() - - def rx_message (self, connection, msg): - """ - Handles incoming messages - - @overrides SoftwareSwitch.rx_message - """ - - self.connection_in_action = connection - if not self.check_rights(msg, connection): - self.log.warn("Message %s not allowed for slave controller %d", msg, - connection.ID) - self.send_vendor_error(connection) - else: - SoftwareSwitch.rx_message(self, connection, msg) - - self.connection_in_action = None - - def check_rights (self, ofp, connection): - if self.role_by_conn[connection.ID] != nx.NX_ROLE_SLAVE: - return True - else: - return not type(ofp) in _slave_blacklist - - def send_vendor_error (self, connection): - err = of.ofp_error(type=of.OFPET_BAD_REQUEST, code=of.OFPBRC_BAD_VENDOR) - connection.send(err) - - def send (self, message): - connections_used = [] - if type(message) in _messages_for_all: - for c in self.connections: - c.send(message) - connections_used.append(c) - elif self.connection_in_action: - #self.log.info("Sending %s to active connection %d", - # (str(message), self.connection_in_action.ID)) - self.connection_in_action.send(message) - connections_used.append(self.connection_in_action) - else: - masters = [c for c in self.connections - if self.role_by_conn[c.ID] == nx.NX_ROLE_MASTER] - if len(masters) > 0: - masters[0].send(message) - connections_used.append(masters[0]) - else: - others = [c for c in self.connections - if self.role_by_conn[c.ID] == nx.NX_ROLE_OTHER] - if len(others) > 0: - self.next_other = self.next_other % len(others) - #self.log.info("Sending %s to 'other' connection %d", - # (str(message), self.next_other)) - others[self.next_other].send(message) - connections_used.append(others[self.next_other]) - self.next_other += 1 - else: - self.log.info("Could not find any connection to send messages %s", - str(message)) - return connections_used - - def add_connection (self, connection): - self.role_by_conn[connection.ID] = nx.NX_ROLE_OTHER - connection.set_message_handler(self.rx_message) - self.connections.append(connection) - return connection - - def set_connection (self, connection): - self.add_connection(connection) - - def set_role (self, connection, role): - self.role_by_conn[connection.ID] = role - if role == nx.NX_ROLE_MASTER: - for c in self.connections: - if c != connection: - self.role_by_conn[c.ID] = nx.NX_ROLE_SLAVE - - def _rx_hello (self, ofp, connection): - # Override the usual hello-send logic - if connection not in self._sent_hellos: - self._sent_hellos.add(connection) - self.send_hello(force=True) - - def _rx_vendor (self, vendor, connection): - self.log.debug("Vendor %s %s", self.name, str(vendor)) - if vendor.vendor == nx.NX_VENDOR_ID: - try: - data = nx._unpack_nx_vendor(vendor.data) - if isinstance(data, nx.nx_role_request): - self.set_role(connection, data.role) - reply = of.ofp_vendor(xid=vendor.xid, vendor = nx.NX_VENDOR_ID, - data = nx.nx_role_reply(role = data.role)) - self.send(reply) - return - except NotImplementedError: - self.send_vendor_error(connection) - else: - return SoftwareSwitch._rx_vendor(self, vendor) diff --git a/pox/datapaths/pcap_switch.py b/pox/datapaths/pcap_switch.py @@ -1,271 +0,0 @@ -# Copyright 2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Software switch with PCap ports - -Example: -./pox.py --no-openflow datapaths.pcap_switch --address=localhost -""" - -from pox.core import core -from pox.datapaths import do_launch -from pox.datapaths.switch import SoftwareSwitchBase, OFConnection -from pox.datapaths.switch import ExpireMixin -import pox.lib.pxpcap as pxpcap -from queue import Queue -from threading import Thread -import pox.openflow.libopenflow_01 as of -from pox.lib.packet import ethernet -import logging - -log = core.getLogger() - -DEFAULT_CTL_PORT = 7791 - -_switches = {} - -def _do_ctl (event): - r = _do_ctl2(event) - if r is None: - r = "Okay." - event.worker.send(r + "\n") - -def _do_ctl2 (event): - def errf (msg, *args): - raise RuntimeError(msg % args) - - args = event.args - - def ra (low, high = None): - if high is None: high = low - if len(args) < low or len(args) > high: - raise RuntimeError("Wrong number of arguments") - return False - - try: - if event.first == "add-port": - ra(1,2) - if len(event.args) == 1 and len(_switches) == 1: - sw = _switches[list(_switches.keys())[0]] - p = args[0] - else: - ra(2) - if event.args[0] not in _switches: - raise RuntimeError("No such switch") - sw = _switches[event.args[0]] - p = args[1] - sw.add_interface(p, start=True, on_error=errf) - elif event.first == "del-port": - ra(1,2) - if len(event.args) == 1: - for sw in list(_switches.values()): - for p in sw.ports: - if p.name == event.args[0]: - sw.remove_interface(event.args[0]) - return - raise RuntimeError("No such interface") - sw = _switches[event.args[0]] - sw.remove_interface(args[1]) - elif event.first == "show": - ra(0) - s = [] - for sw in list(_switches.values()): - s.append("Switch %s" % (sw.name,)) - for no,p in sw.ports.items(): - s.append(" %3s %s" % (no, p.name)) - return "\n".join(s) - - else: - raise RuntimeError("Unknown command") - - except Exception as e: - log.exception("While processing command") - return "Error: " + str(e) - - -def launch (address = '127.0.0.1', port = 6633, max_retry_delay = 16, - dpid = None, ports = '', extra = None, ctl_port = None, - __INSTANCE__ = None): - """ - Launches a switch - """ - - if not pxpcap.enabled: - raise RuntimeError("You need PXPCap to use this component") - - if ctl_port: - if core.hasComponent('ctld'): - raise RuntimeError("Only one ctl_port is allowed") - - if ctl_port is True: - ctl_port = DEFAULT_CTL_PORT - - from . import ctl - ctl.server(ctl_port) - core.ctld.addListenerByName("CommandEvent", _do_ctl) - - _ports = ports.strip() - def up (event): - ports = [p for p in _ports.split(",") if p] - - sw = do_launch(PCapSwitch, address, port, max_retry_delay, dpid, - ports=ports, extra_args=extra) - _switches[sw.name] = sw - - core.addListenerByName("UpEvent", up) - - -class PCapSwitch (ExpireMixin, SoftwareSwitchBase): - # Default level for loggers of this class - default_log_level = logging.INFO - - def __init__ (self, **kw): - """ - Create a switch instance - - Additional options over superclass: - log_level (default to default_log_level) is level for this instance - ports is a list of interface names - """ - log_level = kw.pop('log_level', self.default_log_level) - - self.q = Queue() - self.t = Thread(target=self._consumer_threadproc) - core.addListeners(self) - - ports = kw.pop('ports', []) - kw['ports'] = [] - - super(PCapSwitch,self).__init__(**kw) - - self._next_port = 1 - - self.px = {} - - for p in ports: - self.add_interface(p, start=False) - - self.log.setLevel(log_level) - - for px in self.px.values(): - px.start() - - self.t.start() - - def add_interface (self, name, port_no=-1, on_error=None, start=False): - if on_error is None: - on_error = log.error - - devs = pxpcap.PCap.get_devices() - if name not in devs: - on_error("Device %s not available -- ignoring", name) - return - dev = devs[name] - if dev.get('addrs',{}).get('ethernet',{}).get('addr') is None: - on_error("Device %s has no ethernet address -- ignoring", name) - return - if dev.get('addrs',{}).get('AF_INET') != None: - on_error("Device %s has an IP address -- ignoring", name) - return - for no,p in self.px.items(): - if p.device == name: - on_error("Device %s already added", name) - - if port_no == -1: - while True: - port_no = self._next_port - self._next_port += 1 - if port_no not in self.ports: break - - if port_no in self.ports: - on_error("Port %s already exists -- ignoring", port_no) - return - - phy = of.ofp_phy_port() - phy.port_no = port_no - phy.hw_addr = dev['addrs']['ethernet']['addr'] - phy.name = name - # Fill in features sort of arbitrarily - phy.curr = of.OFPPF_10MB_HD - phy.advertised = of.OFPPF_10MB_HD - phy.supported = of.OFPPF_10MB_HD - phy.peer = of.OFPPF_10MB_HD - - self.add_port(phy) - - px = pxpcap.PCap(name, callback = self._pcap_rx, start = False) - px.port_no = phy.port_no - self.px[phy.port_no] = px - - if start: - px.start() - - return px - - def remove_interface (self, name_or_num): - if isinstance(name_or_num, str): - for no,p in self.px.items(): - if p.device == name_or_num: - self.remove_interface(no) - return - raise ValueError("No such interface") - - px = self.px[name_or_num] - px.stop() - px.port_no = None - self.delete_port(name_or_num) - - def _handle_GoingDownEvent (self, event): - self.q.put(None) - - def _consumer_threadproc (self): - timeout = 3 - while core.running: - try: - data = self.q.get(timeout=timeout) - except: - continue - if data is None: - # Signal to quit - break - batch = [] - while True: - self.q.task_done() - port_no,data = data - data = ethernet(data) - batch.append((data,port_no)) - try: - data = self.q.get(block=False) - except: - break - core.callLater(self.rx_batch, batch) - - def rx_batch (self, batch): - for data,port_no in batch: - self.rx_packet(data, port_no) - - def _pcap_rx (self, px, data, sec, usec, length): - if px.port_no is None: return - self.q.put((px.port_no, data)) - - def _output_packet_physical (self, packet, port_no): - """ - send a packet out a single physical port - - This is called by the more general _output_packet(). - """ - px = self.px.get(port_no) - if not px: return - px.inject(packet) diff --git a/pox/datapaths/switch.py b/pox/datapaths/switch.py @@ -1,1308 +0,0 @@ -# Copyright 2012,2013 Colin Scott -# Copyright 2012,2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A software OpenFlow switch -""" - -""" -TODO ----- -* Don't reply to HELLOs -- just send one on connect -* Pass raw OFP packet to rx handlers as well as parsed -* Once previous is done, use raw OFP for error data when appropriate -* Check self.features to see if various features/actions are enabled, - and act appropriately if they're not (rather than just doing them). -* Virtual ports currently have no config/state, but probably should. -* Provide a way to rebuild, e.g., the action handler table when the - features object is adjusted. -""" - - -from pox.lib.util import assert_type, initHelper, dpid_to_str -from pox.lib.revent import Event, EventMixin -from pox.lib.recoco import Timer -from pox.openflow.libopenflow_01 import * -import pox.openflow.libopenflow_01 as of -from pox.openflow.util import make_type_to_unpacker_table -from pox.openflow.flow_table import FlowTable, TableEntry -from pox.lib.packet import * - -import logging -import struct -import time - - -# Multicast address used for STP 802.1D -_STP_MAC = EthAddr('01:80:c2:00:00:00') - - -class DpPacketOut (Event): - """ - Event raised when a dataplane packet is sent out a port - """ - def __init__ (self, node, packet, port): - assert assert_type("packet", packet, ethernet, none_ok=False) - self.node = node - self.packet = packet - self.port = port - self.switch = node # For backwards compatability - - -class SoftwareSwitchBase (object): - def __init__ (self, dpid, name=None, ports=4, miss_send_len=128, - max_buffers=100, max_entries=0x7fFFffFF, features=None): - """ - Initialize switch - - ports is a list of ofp_phy_ports or a number of ports - - miss_send_len is number of bytes to send to controller on table miss - - max_buffers is number of buffered packets to store - - max_entries is max flows entries per table - """ - if name is None: name = dpid_to_str(dpid) - self.name = name - - self.dpid = dpid - - if isinstance(ports, int): - ports = [self.generate_port(i) for i in range(1, ports+1)] - - self.max_buffers = max_buffers - self.max_entries = max_entries - self.miss_send_len = miss_send_len - self.config_flags = 0 - self._has_sent_hello = False - - self.table = FlowTable() - self.table.addListeners(self) - - self._lookup_count = 0 - self._matched_count = 0 - - self.log = logging.getLogger(self.name) - self._connection = None - - # buffer for packets during packet_in - self._packet_buffer = [] - - # Map port_no -> openflow.pylibopenflow_01.ofp_phy_ports - self.ports = {} - self.port_stats = {} - - for port in ports: - self.add_port(port) - - if features is not None: - self.features = features - else: - # Set up default features - - self.features = SwitchFeatures() - self.features.cap_flow_stats = True - self.features.cap_table_stats = True - self.features.cap_port_stats = True - #self.features.cap_stp = True - #self.features.cap_ip_reasm = True - #self.features.cap_queue_stats = True - #self.features.cap_arp_match_ip = True - - self.features.act_output = True - self.features.act_enqueue = True - self.features.act_strip_vlan = True - self.features.act_set_vlan_vid = True - self.features.act_set_vlan_pcp = True - self.features.act_set_dl_dst = True - self.features.act_set_dl_src = True - self.features.act_set_nw_dst = True - self.features.act_set_nw_src = True - self.features.act_set_nw_tos = True - self.features.act_set_tp_dst = True - self.features.act_set_tp_src = True - #self.features.act_vendor = True - - # Set up handlers for incoming OpenFlow messages - # That is, self.ofp_handlers[OFPT_FOO] = self._rx_foo - self.ofp_handlers = {} - for value,name in ofp_type_map.items(): - name = name.split("OFPT_",1)[-1].lower() - h = getattr(self, "_rx_" + name, None) - if not h: continue - assert of._message_type_to_class[value]._from_controller, name - self.ofp_handlers[value] = h - - # Set up handlers for actions - # That is, self.action_handlers[OFPAT_FOO] = self._action_foo - #TODO: Refactor this with above - self.action_handlers = {} - for value,name in ofp_action_type_map.items(): - name = name.split("OFPAT_",1)[-1].lower() - h = getattr(self, "_action_" + name, None) - if not h: continue - if getattr(self.features, "act_" + name) is False: continue - self.action_handlers[value] = h - - # Set up handlers for stats handlers - # That is, self.stats_handlers[OFPST_FOO] = self._stats_foo - #TODO: Refactor this with above - self.stats_handlers = {} - for value,name in ofp_stats_type_map.items(): - name = name.split("OFPST_",1)[-1].lower() - h = getattr(self, "_stats_" + name, None) - if not h: continue - self.stats_handlers[value] = h - - # Set up handlers for flow mod handlers - # That is, self.flow_mod_handlers[OFPFC_FOO] = self._flow_mod_foo - #TODO: Refactor this with above - self.flow_mod_handlers = {} - for name,value in ofp_flow_mod_command_rev_map.items(): - name = name.split("OFPFC_",1)[-1].lower() - h = getattr(self, "_flow_mod_" + name, None) - if not h: continue - self.flow_mod_handlers[value] = h - - def _gen_port_name (self, port_no): - return "%s.%s"%(dpid_to_str(self.dpid, True).replace('-','')[:12], port_no) - - def _gen_ethaddr (self, port_no): - return EthAddr("02%06x%04x" % (self.dpid % 0x00FFff, port_no % 0xffFF)) - - def generate_port (self, port_no, name = None, ethaddr = None): - dpid = self.dpid - p = ofp_phy_port() - p.port_no = port_no - if ethaddr is None: - p.hw_addr = self._gen_ethaddr(p.port_no) - else: - p.hw_addr = EthAddr(ethaddr) - if name is None: - p.name = self._gen_port_name(p.port_no) - else: - p.name = name - # Fill in features sort of arbitrarily - p.config = OFPPC_NO_STP - p.curr = OFPPF_10MB_HD - p.advertised = OFPPF_10MB_HD - p.supported = OFPPF_10MB_HD - p.peer = OFPPF_10MB_HD - return p - - @property - def _time (self): - """ - Get the current time - - This should be used for, e.g., calculating timeouts. It currently isn't - used everywhere it should be. - - Override this to change time behavior. - """ - return time.time() - - def _handle_FlowTableModification (self, event): - """ - Handle flow table modification events - """ - # Currently, we only use this for sending flow_removed messages - if not event.removed: return - - if event.reason in (OFPRR_IDLE_TIMEOUT,OFPRR_HARD_TIMEOUT,OFPRR_DELETE): - # These reasons may lead to a flow_removed - count = 0 - for entry in event.removed: - if entry.flags & OFPFF_SEND_FLOW_REM and not entry.flags & OFPFF_EMERG: - # Flow wants removal notification -- send it - fr = entry.to_flow_removed(self._time, reason=event.reason) - self.send(fr) - count += 1 - self.log.debug("%d flows removed (%d removal notifications)", - len(event.removed), count) - - def rx_message (self, connection, msg): - """ - Handle an incoming OpenFlow message - """ - ofp_type = msg.header_type - h = self.ofp_handlers.get(ofp_type) - if h is None: - raise RuntimeError("No handler for ofp_type %s(%d)" - % (ofp_type_map.get(ofp_type), ofp_type)) - - self.log.debug("Got %s with XID %s",ofp_type_map.get(ofp_type),msg.xid) - h(msg, connection=connection) - - def set_connection (self, connection): - """ - Set this switch's connection. - """ - self._has_sent_hello = False - connection.set_message_handler(self.rx_message) - self._connection = connection - - def send (self, message, connection = None): - """ - Send a message to this switch's communication partner - """ - if connection is None: - connection = self._connection - if connection: - connection.send(message) - else: - self.log.debug("Asked to send message %s, but not connected", message) - - def _rx_hello (self, ofp, connection): - #FIXME: This isn't really how hello is supposed to work -- we're supposed - # to send it immediately on connection. See _send_hello(). - self.send_hello() - - def _rx_echo_request (self, ofp, connection): - """ - Handles echo requests - """ - msg = ofp_echo_reply(xid=ofp.xid, body=ofp.body) - self.send(msg) - - def _rx_features_request (self, ofp, connection): - """ - Handles feature requests - """ - self.log.debug("Send features reply") - msg = ofp_features_reply(datapath_id = self.dpid, - xid = ofp.xid, - n_buffers = self.max_buffers, - n_tables = 1, - capabilities = self.features.capability_bits, - actions = self.features.action_bits, - ports = list(self.ports.values())) - self.send(msg) - - def _rx_flow_mod (self, ofp, connection): - """ - Handles flow mods - """ - self.log.debug("Flow mod details: %s", ofp.show()) - - #self.table.process_flow_mod(ofp) - #self._process_flow_mod(ofp, connection=connection, table=self.table) - handler = self.flow_mod_handlers.get(ofp.command) - if handler is None: - self.log.warn("Command not implemented: %s" % command) - self.send_error(type=OFPET_FLOW_MOD_FAILED, code=OFPFMFC_BAD_COMMAND, - ofp=ofp, connection=connection) - return - handler(flow_mod=ofp, connection=connection, table=self.table) - - if ofp.buffer_id is not None: - self._process_actions_for_packet_from_buffer(ofp.actions, ofp.buffer_id, - ofp) - - def _rx_packet_out (self, packet_out, connection): - """ - Handles packet_outs - """ - self.log.debug("Packet out details: %s", packet_out.show()) - - if packet_out.data: - self._process_actions_for_packet(packet_out.actions, packet_out.data, - packet_out.in_port, packet_out) - elif packet_out.buffer_id is not None: - self._process_actions_for_packet_from_buffer(packet_out.actions, - packet_out.buffer_id, - packet_out) - else: - self.log.warn("packet_out: No data and no buffer_id -- " - "don't know what to send") - - def _rx_echo_reply (self, ofp, connection): - pass - - def _rx_barrier_request (self, ofp, connection): - msg = ofp_barrier_reply(xid = ofp.xid) - self.send(msg) - - def _rx_get_config_request (self, ofp, connection): - msg = ofp_get_config_reply(xid = ofp.xid) - msg.miss_send_len = self.miss_send_len - msg.flags = self.config_flags - self.log.debug("Sending switch config reply %s", msg) - self.send(msg) - - def _rx_stats_request (self, ofp, connection): - handler = self.stats_handlers.get(ofp.type) - if handler is None: - self.log.warning("Stats type %s not implemented", ofp.type) - - self.send_error(type=OFPET_BAD_REQUEST, code=OFPBRC_BAD_STAT, - ofp=ofp, connection=connection) - return - - body = handler(ofp, connection=connection) - if body is not None: - reply = ofp_stats_reply(xid=ofp.xid, type=ofp.type, body=body) - self.log.debug("Sending stats reply %s", reply) - self.send(reply) - - def _rx_set_config (self, config, connection): - self.miss_send_len = config.miss_send_len - self.config_flags = config.flags - - def _rx_port_mod (self, port_mod, connection): - port_no = port_mod.port_no - if port_no not in self.ports: - self.send_error(type=OFPET_PORT_MOD_FAILED, code=OFPPMFC_BAD_PORT, - ofp=port_mod, connection=connection) - return - port = self.ports[port_no] - if port.hw_addr != port_mod.hw_addr: - self.send_error(type=OFPET_PORT_MOD_FAILED, code=OFPPMFC_BAD_HW_ADDR, - ofp=port_mod, connection=connection) - return - - mask = port_mod.mask - - for bit in range(32): - bit = 1 << bit - if mask & bit: - handled,r = self._set_port_config_bit(port, bit, port_mod.config & bit) - if not handled: - self.log.warn("Unsupported port config flag: %08x", bit) - continue - if r is not None: - msg = "Port %s: " % (port.port_no,) - if isinstance(r, str): - msg += r - else: - msg += ofp_port_config_map.get(bit, "config bit %x" % (bit,)) - msg += " set to " - msg += "true" if r else "false" - self.log.debug(msg) - - def _rx_vendor (self, vendor, connection): - # We don't support vendor extensions, so send an OFP_ERROR, per - # page 42 of spec - self.send_error(type=OFPET_BAD_REQUEST, code=OFPBRC_BAD_VENDOR, - ofp=vendor, connection=connection) - - def _rx_queue_get_config_request (self, ofp, connection): - """ - Handles an OFPT_QUEUE_GET_CONFIG_REQUEST message. - """ - reply = ofp_queue_get_config_reply(xid=ofp.xid, port=ofp.port, queues=[]) - self.log.debug("Sending queue get config reply %s", reply) - self.send(reply) - - def send_hello (self, force = False): - """ - Send hello (once) - """ - #FIXME: This is wrong -- we should just send when connecting. - if self._has_sent_hello and not force: return - self._has_sent_hello = True - self.log.debug("Sent hello") - msg = ofp_hello(xid=0) - self.send(msg) - - def send_packet_in (self, in_port, buffer_id=None, packet=b'', reason=None, - data_length=None): - """ - Send PacketIn - """ - if hasattr(packet, 'pack'): - packet = packet.pack() - assert assert_type("packet", packet, bytes) - self.log.debug("Send PacketIn") - if reason is None: - reason = OFPR_NO_MATCH - if data_length is not None and len(packet) > data_length: - if buffer_id is not None: - packet = packet[:data_length] - - msg = ofp_packet_in(xid = 0, in_port = in_port, buffer_id = buffer_id, - reason = reason, data = packet) - - self.send(msg) - - def send_port_status (self, port, reason): - """ - Send port status - - port is an ofp_phy_port - reason is one of OFPPR_xxx - """ - assert assert_type("port", port, ofp_phy_port, none_ok=False) - assert reason in list(ofp_port_reason_rev_map.values()) - msg = ofp_port_status(desc=port, reason=reason) - self.send(msg) - - def send_error (self, type, code, ofp=None, data=None, connection=None): - """ - Send an error - - If you pass ofp, it will be used as the source of the error's XID and - data. - You can override the data by also specifying data. - """ - err = ofp_error(type=type, code=code) - if ofp: - err.xid = ofp.xid - err.data = ofp.pack() - else: - err.xid = 0 - if data is not None: - err.data = data - self.send(err, connection = connection) - - def rx_packet (self, packet, in_port, packet_data = None): - """ - process a dataplane packet - - packet: an instance of ethernet - in_port: the integer port number - packet_data: packed version of packet if available - """ - assert assert_type("packet", packet, ethernet, none_ok=False) - assert assert_type("in_port", in_port, int, none_ok=False) - port = self.ports.get(in_port) - if port is None: - self.log.warn("Got packet on missing port %i", in_port) - return - - is_stp = packet.dst == _STP_MAC - - if (port.config & OFPPC_NO_RECV) and not is_stp: - # Drop all except STP - return - if (port.config & OFPPC_NO_RECV_STP) and is_stp: - # Drop STP - return - - if self.config_flags & OFPC_FRAG_MASK: - ipp = packet.find(ipv4) - if ipp: - if (ipp.flags & ipv4.MF_FLAG) or ipp.frag != 0: - frag_mode = self.config_flags & OFPC_FRAG_MASK - if frag_mode == OFPC_FRAG_DROP: - # Drop fragment - return - elif frag_mode == OFPC_FRAG_REASM: - if self.features.cap_ip_reasm: - #TODO: Implement fragment reassembly - self.log.info("Can't reassemble fragment: not implemented") - else: - self.log.warn("Illegal fragment processing mode: %i", frag_mode) - - self.port_stats[in_port].rx_packets += 1 - if packet_data is not None: - self.port_stats[in_port].rx_bytes += len(packet_data) - else: - self.port_stats[in_port].rx_bytes += len(packet.pack()) # Expensive - - self._lookup_count += 1 - entry = self.table.entry_for_packet(packet, in_port) - if entry is not None: - self._matched_count += 1 - entry.touch_packet(len(packet)) - self._process_actions_for_packet(entry.actions, packet, in_port) - else: - # no matching entry - if port.config & OFPPC_NO_PACKET_IN: - return - buffer_id = self._buffer_packet(packet, in_port) - if packet_data is None: - packet_data = packet.pack() - self.send_packet_in(in_port, buffer_id, packet_data, - reason=OFPR_NO_MATCH, data_length=self.miss_send_len) - - def delete_port (self, port): - """ - Removes a port - - Sends a port_status message to the controller - - Returns the removed phy_port - """ - try: - port_no = port.port_no - assert self.ports[port_no] is port - except: - port_no = port - port = self.ports[port_no] - if port_no not in self.ports: - raise RuntimeError("Can't remove nonexistent port " + str(port_no)) - self.send_port_status(port, OFPPR_DELETE) - del self.ports[port_no] - return port - - def add_port (self, port): - """ - Adds a port - - Sends a port_status message to the controller - """ - try: - port_no = port.port_no - except: - port_no = port - port = self.generate_port(port_no, self.dpid) - if port_no in self.ports: - raise RuntimeError("Port %s already exists" % (port_no,)) - self.ports[port_no] = port - self.port_stats[port.port_no] = ofp_port_stats(port_no=port.port_no) - self.send_port_status(port, OFPPR_ADD) - - def _set_port_config_bit (self, port, bit, value): - """ - Set a port config bit - - This is called in response to port_mods. It is passed the ofp_phy_port, - the bit/mask, and the value of the bit (i.e., 0 if the flag is to be - unset, or the same value as bit if it is to be set). - - The return value is a tuple (handled, msg). - If bit is handled, then handled will be True, else False. - if msg is a string, it will be used as part of a log message. - If msg is None, there will be no log message. - If msg is anything else "truthy", an "enabled" log message is generated. - If msg is anything else "falsy", a "disabled" log message is generated. - msg is only used when handled is True. - """ - if bit == OFPPC_NO_STP: - if value == 0: - # we also might send OFPBRC_EPERM if trying to disable this bit - self.log.warn("Port %s: Can't enable 802.1D STP", port.port_no) - return (True, None) - - if bit not in (OFPPC_PORT_DOWN, OFPPC_NO_STP, OFPPC_NO_RECV, OFPPC_NO_RECV_STP, - OFPPC_NO_FLOOD, OFPPC_NO_FWD, OFPPC_NO_PACKET_IN): - return (False, None) - - if port.set_config(value, bit): - if bit == OFPPC_PORT_DOWN: - # Note (Peter Peresini): Although the spec is not clear about it, - # we will assume that config.OFPPC_PORT_DOWN implies - # state.OFPPS_LINK_DOWN. This is consistent with Open vSwitch. - - #TODO: for now, we assume that there is always physical link present - # and that the link state depends only on the configuration. - old_state = port.state & OFPPS_LINK_DOWN - port.state = port.state & ~OFPPS_LINK_DOWN - if port.config & OFPPC_PORT_DOWN: - port.state = port.state | OFPPS_LINK_DOWN - new_state = port.state & OFPPS_LINK_DOWN - if old_state != new_state: - self.send_port_status(port, OFPPR_MODIFY) - - # Do default log message. - return (True, value) - - # No change -- no log message. - return (True, None) - - def _output_packet_physical (self, packet, port_no): - """ - send a packet out a single physical port - - This is called by the more general _output_packet(). - - Override this. - """ - self.log.info("Sending packet %s out port %s", str(packet), port_no) - - def _output_packet (self, packet, out_port, in_port, max_len=None): - """ - send a packet out some port - - This handles virtual ports and does validation. - - packet: instance of ethernet - out_port, in_port: the integer port number - max_len: maximum packet payload length to send to controller - """ - assert assert_type("packet", packet, ethernet, none_ok=False) - - def real_send (port_no, allow_in_port=False): - if type(port_no) == ofp_phy_port: - port_no = port_no.port_no - if port_no == in_port and not allow_in_port: - self.log.warn("Dropping packet sent on port %i: Input port", port_no) - return - if port_no not in self.ports: - self.log.warn("Dropping packet sent on port %i: Invalid port", port_no) - return - if self.ports[port_no].config & OFPPC_NO_FWD: - self.log.warn("Dropping packet sent on port %i: Forwarding disabled", - port_no) - return - if self.ports[port_no].config & OFPPC_PORT_DOWN: - self.log.warn("Dropping packet sent on port %i: Port down", port_no) - return - if self.ports[port_no].state & OFPPS_LINK_DOWN: - self.log.debug("Dropping packet sent on port %i: Link down", port_no) - return - self.port_stats[port_no].tx_packets += 1 - self.port_stats[port_no].tx_bytes += len(packet.pack()) #FIXME: Expensive - self._output_packet_physical(packet, port_no) - - if out_port < OFPP_MAX: - real_send(out_port) - elif out_port == OFPP_IN_PORT: - real_send(in_port, allow_in_port=True) - elif out_port == OFPP_FLOOD: - for no,port in self.ports.items(): - if no == in_port: continue - if port.config & OFPPC_NO_FLOOD: continue - real_send(port) - elif out_port == OFPP_ALL: - for no,port in self.ports.items(): - if no == in_port: continue - real_send(port) - elif out_port == OFPP_CONTROLLER: - buffer_id = self._buffer_packet(packet, in_port) - # Should we honor OFPPC_NO_PACKET_IN here? - self.send_packet_in(in_port, buffer_id, packet, reason=OFPR_ACTION, - data_length=max_len) - elif out_port == OFPP_TABLE: - # Do we disable send-to-controller when performing this? - # (Currently, there's the possibility that a table miss from this - # will result in a send-to-controller which may send back to table...) - self.rx_packet(packet, in_port) - else: - self.log.warn("Unsupported virtual output port: %d", out_port) - - def _buffer_packet (self, packet, in_port=None): - """ - Buffer packet and return buffer ID - - If no buffer is available, return None. - """ - # Do we have an empty slot? - for (i, value) in enumerate(self._packet_buffer): - if value is None: - # Yes -- use it - self._packet_buffer[i] = (packet, in_port) - return i + 1 - # No -- create a new slow - if len(self._packet_buffer) >= self.max_buffers: - # No buffers available! - return None - self._packet_buffer.append( (packet, in_port) ) - return len(self._packet_buffer) - - def _process_actions_for_packet_from_buffer (self, actions, buffer_id, - ofp=None): - """ - output and release a packet from the buffer - - ofp is the message which triggered this processing, if any (used for error - generation) - """ - buffer_id = buffer_id - 1 - if (buffer_id >= len(self._packet_buffer)) or (buffer_id < 0): - self.log.warn("Invalid output buffer id: %d", buffer_id + 1) - return - if self._packet_buffer[buffer_id] is None: - self.log.warn("Buffer %d has already been flushed", buffer_id + 1) - return - (packet, in_port) = self._packet_buffer[buffer_id] - self._process_actions_for_packet(actions, packet, in_port, ofp) - self._packet_buffer[buffer_id] = None - - def _process_actions_for_packet (self, actions, packet, in_port, ofp=None): - """ - process the output actions for a packet - - ofp is the message which triggered this processing, if any (used for error - generation) - """ - assert assert_type("packet", packet, (ethernet, bytes), none_ok=False) - if not isinstance(packet, ethernet): - packet = ethernet.unpack(packet) - - for action in actions: - #if action.type is ofp_action_resubmit: - # self.rx_packet(packet, in_port) - # return - h = self.action_handlers.get(action.type) - if h is None: - self.log.warn("Unknown action type: %x " % (action.type,)) - self.send_error(type=OFPET_BAD_ACTION, code=OFPBAC_BAD_TYPE, ofp=ofp) - return - packet = h(action, packet, in_port) - - def _flow_mod_add (self, flow_mod, connection, table): - """ - Process an OFPFC_ADD flow mod sent to the switch. - """ - match = flow_mod.match - priority = flow_mod.priority - - if flow_mod.flags & OFPFF_EMERG: - if flow_mod.idle_timeout != 0 or flow_mod.hard_timeout != 0: - # Emergency flow mod has non-zero timeouts. Do not add. - self.log.warn("Rejecting emergency flow with nonzero timeout") - self.send_error(type=OFPET_FLOW_MOD_FAILED, - code=OFPFMFC_BAD_EMERG_TIMEOUT, - ofp=flow_mod, connection=connection) - return - if flow_mod.flags & OFPFF_SEND_FLOW_REM: - # Emergency flows can't send removal messages, we we might want to - # reject this early. Sadly, there's no error code for this, so we just - # abuse EPERM. If we eventually support Nicira extended error codes, - # we should use one here. - self.log.warn("Rejecting emergency flow with flow removal flag") - self.send_error(type=OFPET_FLOW_MOD_FAILED, - code=OFPFMFC_EPERM, - ofp=flow_mod, connection=connection) - return - #NOTE: An error is sent anyways because the current implementation does - # not support emergency entries. - self.log.warn("Rejecting emergency flow (not supported)") - self.send_error(type=OFPET_FLOW_MOD_FAILED, - code=OFPFMFC_ALL_TABLES_FULL, - ofp=flow_mod, connection=connection) - return - - new_entry = TableEntry.from_flow_mod(flow_mod) - - if flow_mod.flags & OFPFF_CHECK_OVERLAP: - if table.check_for_overlapping_entry(new_entry): - # Another entry overlaps. Do not add. - self.send_error(type=OFPET_FLOW_MOD_FAILED, code=OFPFMFC_OVERLAP, - ofp=flow_mod, connection=connection) - return - - if flow_mod.command == OFPFC_ADD: - # Exactly matching entries have to be removed if OFPFC_ADD - table.remove_matching_entries(match, priority=priority, strict=True) - - if len(table) >= self.max_entries: - # Flow table is full. Respond with error message. - self.send_error(type=OFPET_FLOW_MOD_FAILED, - code=OFPFMFC_ALL_TABLES_FULL, - ofp=flow_mod, connection=connection) - return - - table.add_entry(new_entry) - - def _flow_mod_modify (self, flow_mod, connection, table, strict=False): - """ - Process an OFPFC_MODIFY flow mod sent to the switch. - """ - match = flow_mod.match - priority = flow_mod.priority - - modified = False - for entry in table.entries: - # update the actions field in the matching flows - if entry.is_matched_by(match, priority=priority, strict=strict): - entry.actions = flow_mod.actions - modified = True - - if not modified: - # if no matching entry is found, modify acts as add - self._flow_mod_add(flow_mod, connection, table) - - def _flow_mod_modify_strict (self, flow_mod, connection, table): - """ - Process an OFPFC_MODIFY_STRICT flow mod sent to the switch. - """ - self._flow_mod_modify(flow_mod, connection, table, strict=True) - - def _flow_mod_delete (self, flow_mod, connection, table, strict=False): - """ - Process an OFPFC_DELETE flow mod sent to the switch. - """ - match = flow_mod.match - priority = flow_mod.priority - - out_port = flow_mod.out_port - if out_port == OFPP_NONE: out_port = None # Don't filter - table.remove_matching_entries(match, priority=priority, strict=strict, - out_port=out_port, reason=OFPRR_DELETE) - - def _flow_mod_delete_strict (self, flow_mod, connection, table): - """ - Process an OFPFC_DELETE_STRICT flow mod sent to the switch. - """ - self._flow_mod_delete(flow_mod, connection, table, strict=True) - - def _action_output (self, action, packet, in_port): - self._output_packet(packet, action.port, in_port, action.max_len) - return packet - def _action_set_vlan_vid (self, action, packet, in_port): - if not isinstance(packet.payload, vlan): - vl = vlan() - vl.eth_type = packet.type - vl.payload = packet.payload - packet.type = ethernet.VLAN_TYPE - packet.payload = vl - packet.payload.id = action.vlan_vid - return packet - def _action_set_vlan_pcp (self, action, packet, in_port): - if not isinstance(packet.payload, vlan): - vl = vlan() - vl.payload = packet.payload - vl.eth_type = packet.type - packet.payload = vl - packet.type = ethernet.VLAN_TYPE - packet.payload.pcp = action.vlan_pcp - return packet - def _action_strip_vlan (self, action, packet, in_port): - if isinstance(packet.payload, vlan): - packet.type = packet.payload.eth_type - packet.payload = packet.payload.payload - return packet - def _action_set_dl_src (self, action, packet, in_port): - packet.src = action.dl_addr - return packet - def _action_set_dl_dst (self, action, packet, in_port): - packet.dst = action.dl_addr - return packet - def _action_set_nw_src (self, action, packet, in_port): - nw = packet.payload - if isinstance(nw, vlan): - nw = nw.payload - if isinstance(nw, ipv4): - nw.srcip = action.nw_addr - return packet - def _action_set_nw_dst (self, action, packet, in_port): - nw = packet.payload - if isinstance(nw, vlan): - nw = nw.payload - if isinstance(nw, ipv4): - nw.dstip = action.nw_addr - return packet - def _action_set_nw_tos (self, action, packet, in_port): - nw = packet.payload - if isinstance(nw, vlan): - nw = nw.payload - if isinstance(nw, ipv4): - nw.tos = action.nw_tos - return packet - def _action_set_tp_src (self, action, packet, in_port): - nw = packet.payload - if isinstance(nw, vlan): - nw = nw.payload - if isinstance(nw, ipv4): - tp = nw.payload - if isinstance(tp, udp) or isinstance(tp, tcp): - tp.srcport = action.tp_port - return packet - def _action_set_tp_dst (self, action, packet, in_port): - nw = packet.payload - if isinstance(nw, vlan): - nw = nw.payload - if isinstance(nw, ipv4): - tp = nw.payload - if isinstance(tp, udp) or isinstance(tp, tcp): - tp.dstport = action.tp_port - return packet - def _action_enqueue (self, action, packet, in_port): - self.log.warn("Enqueue not supported. Performing regular output.") - self._output_packet(packet, action.tp_port, in_port) - return packet -# def _action_push_mpls_tag (self, action, packet, in_port): -# bottom_of_stack = isinstance(packet.next, mpls) -# packet.next = mpls(prev = packet.pack()) -# if bottom_of_stack: -# packet.next.s = 1 -# packet.type = action.ethertype -# return packet -# def _action_pop_mpls_tag (self, action, packet, in_port): -# if not isinstance(packet.next, mpls): -# return packet -# if not isinstance(packet.next.next, str): -# packet.next.next = packet.next.next.pack() -# if action.ethertype in ethernet.type_parsers: -# packet.next = ethernet.type_parsers[action.ethertype](packet.next.next) -# else: -# packet.next = packet.next.next -# packet.ethertype = action.ethertype -# return packet -# def _action_set_mpls_label (self, action, packet, in_port): -# if not isinstance(packet.next, mpls): -# mock = ofp_action_push_mpls() -# packet = push_mpls_tag(mock, packet) -# packet.next.label = action.mpls_label -# return packet -# def _action_set_mpls_tc (self, action, packet, in_port): -# if not isinstance(packet.next, mpls): -# mock = ofp_action_push_mpls() -# packet = push_mpls_tag(mock, packet) -# packet.next.tc = action.mpls_tc -# return packet -# def _action_set_mpls_ttl (self, action, packet, in_port): -# if not isinstance(packet.next, mpls): -# mock = ofp_action_push_mpls() -# packet = push_mpls_tag(mock, packet) -# packet.next.ttl = action.mpls_ttl -# return packet -# def _action_dec_mpls_ttl (self, action, packet, in_port): -# if not isinstance(packet.next, mpls): -# return packet -# packet.next.ttl = packet.next.ttl - 1 -# return packet - - - def _stats_desc (self, ofp, connection): - try: - from pox.core import core - return ofp_desc_stats(mfr_desc="POX", - hw_desc=core._get_platform_info(), - sw_desc=core.version_string, - serial_num=str(self.dpid), - dp_desc=type(self).__name__) - except: - return ofp_desc_stats(mfr_desc="POX", - hw_desc="Unknown", - sw_desc="Unknown", - serial_num=str(self.dpid), - dp_desc=type(self).__name__) - - - def _stats_flow (self, ofp, connection): - if ofp.body.table_id not in (TABLE_ALL, 0): - return [] # No flows for other tables - out_port = ofp.body.out_port - if out_port == OFPP_NONE: out_port = None # Don't filter - return self.table.flow_stats(ofp.body.match, out_port) - - def _stats_aggregate (self, ofp, connection): - if ofp.body.table_id not in (TABLE_ALL, 0): - return [] # No flows for other tables - out_port = ofp.body.out_port - if out_port == OFPP_NONE: out_port = None # Don't filter - return self.table.aggregate_stats(ofp.body.match, out_port) - - def _stats_table (self, ofp, connection): - # Some of these may come from the actual table(s) in the future... - r = ofp_table_stats() - r.table_id = 0 - r.name = "Default" - r.wildcards = OFPFW_ALL - r.max_entries = self.max_entries - r.active_count = len(self.table) - r.lookup_count = self._lookup_count - r.matched_count = self._matched_count - return r - - def _stats_port (self, ofp, connection): - req = ofp.body - if req.port_no == OFPP_NONE: - return list(self.port_stats.values()) - else: - return self.port_stats[req.port_no] - - def _stats_queue (self, ofp, connection): - # We don't support queues whatsoever so either send an empty list or send - # an OFP_ERROR if an actual queue is requested. - req = ofp.body - #if req.port_no != OFPP_ALL: - # self.send_error(type=OFPET_QUEUE_OP_FAILED, code=OFPQOFC_BAD_PORT, - # ofp=ofp, connection=connection) - # Note: We don't care about this case for now, even if port_no is bogus. - if req.queue_id == OFPQ_ALL: - return [] - else: - self.send_error(type=OFPET_QUEUE_OP_FAILED, code=OFPQOFC_BAD_QUEUE, - ofp=ofp, connection=connection) - - - def __repr__ (self): - return "%s(dpid=%s, num_ports=%d)" % (type(self).__name__, - dpid_to_str(self.dpid), - len(self.ports)) - - -class SoftwareSwitch (SoftwareSwitchBase, EventMixin): - _eventMixin_events = set([DpPacketOut]) - - def _output_packet_physical (self, packet, port_no): - """ - send a packet out a single physical port - - This is called by the more general _output_packet(). - """ - self.raiseEvent(DpPacketOut(self, packet, self.ports[port_no])) - - -class ExpireMixin (object): - """ - Adds expiration to a switch - - Inherit *before* switch base. - """ - _expire_period = 2 - - def __init__ (self, *args, **kw): - expire_period = kw.pop('expire_period', self._expire_period) - super(ExpireMixin,self).__init__(*args, **kw) - if not expire_period: - # Disable - return - self._expire_timer = Timer(expire_period, - self.table.remove_expired_entries, - recurring=True) - - -class OFConnection (object): - """ - A codec for OpenFlow messages. - - Decodes and encodes OpenFlow messages (ofp_message) into byte arrays. - - Wraps an io_worker that does the actual io work, and calls a - receiver_callback function when a new message as arrived. - """ - - # Unlike of_01.Connection, this is persistent (at least until we implement - # a proper recoco Connection Listener loop) - # Globally unique identifier for the Connection instance - ID = 0 - - # See _error_handler for information the meanings of these - ERR_BAD_VERSION = 1 - ERR_NO_UNPACKER = 2 - ERR_BAD_LENGTH = 3 - ERR_EXCEPTION = 4 - - # These methods are called externally by IOWorker - def msg (self, m): - self.log.debug("%s %s", str(self), str(m)) - def err (self, m): - self.log.error("%s %s", str(self), str(m)) - def info (self, m): - self.log.info("%s %s", str(self), str(m)) - - def __init__ (self, io_worker): - self.starting = True # No data yet - self.io_worker = io_worker - self.io_worker.rx_handler = self.read - self.controller_id = io_worker.socket.getpeername() - OFConnection.ID += 1 - self.ID = OFConnection.ID - self.log = logging.getLogger("ControllerConnection(id=%d)" % (self.ID,)) - self.unpackers = make_type_to_unpacker_table() - - self.on_message_received = None - - def set_message_handler (self, handler): - self.on_message_received = handler - - def send (self, data): - """ - Send raw data to the controller. - - Generally, data is a bytes object. If not, we check if it has a pack() - method and call it (hoping the result will be a bytes object). This - way, you can just pass one of the OpenFlow objects from the OpenFlow - library to it and get the expected result, for example. - """ - if type(data) is not bytes: - if hasattr(data, 'pack'): - data = data.pack() - self.io_worker.send(data) - - def read (self, io_worker): - #FIXME: Do we need to pass io_worker here? - while True: - message = io_worker.peek() - if len(message) < 4: - break - - # Parse head of OpenFlow message by hand - ofp_version = ord(message[0]) - ofp_type = ord(message[1]) - - if ofp_version != OFP_VERSION: - info = ofp_version - r = self._error_handler(self.ERR_BAD_VERSION, info) - if r is False: break - continue - - message_length = ord(message[2]) << 8 | ord(message[3]) - if message_length > len(message): - break - - if ofp_type >= 0 and ofp_type < len(self.unpackers): - unpacker = self.unpackers[ofp_type] - else: - unpacker = None - if unpacker is None: - info = (ofp_type, message_length) - r = self._error_handler(self.ERR_NO_UNPACKER, info) - if r is False: break - io_worker.consume_receive_buf(message_length) - continue - - new_offset, msg_obj = self.unpackers[ofp_type](message, 0) - if new_offset != message_length: - info = (msg_obj, message_length, new_offset) - r = self._error_handler(self.ERR_BAD_LENGTH, info) - if r is False: break - # Assume sender was right and we should skip what it told us to. - io_worker.consume_receive_buf(message_length) - continue - - io_worker.consume_receive_buf(message_length) - self.starting = False - - if self.on_message_received is None: - raise RuntimeError("on_message_receieved hasn't been set yet!") - - try: - self.on_message_received(self, msg_obj) - except Exception as e: - info = (e, message[:message_length], msg_obj) - r = self._error_handler(self.ERR_EXCEPTION, info) - if r is False: break - continue - - return True - - def _error_handler (self, reason, info): - """ - Called when read() has an error - - reason is one of OFConnection.ERR_X - - info depends on reason: - ERR_BAD_VERSION: claimed version number - ERR_NO_UNPACKER: (claimed message type, claimed length) - ERR_BAD_LENGTH: (unpacked message, claimed length, unpacked length) - ERR_EXCEPTION: (exception, raw message, unpacked message) - - Return False to halt processing of subsequent data (makes sense to - do this if you called connection.close() here, for example). - """ - if reason == OFConnection.ERR_BAD_VERSION: - ofp_version = info - self.log.warn('Unsupported OpenFlow version 0x%02x', info) - if self.starting: - message = self.io_worker.peek() - err = ofp_error(type=OFPET_HELLO_FAILED, code=OFPHFC_INCOMPATIBLE) - #err = ofp_error(type=OFPET_BAD_REQUEST, code=OFPBRC_BAD_VERSION) - err.xid = self._extract_message_xid(message) - err.data = 'Version unsupported' - self.send(err) - self.close() - return False - elif reason == OFConnection.ERR_NO_UNPACKER: - ofp_type, message_length = info - self.log.warn('Unsupported OpenFlow message type 0x%02x', ofp_type) - message = self.io_worker.peek() - err = ofp_error(type=OFPET_BAD_REQUEST, code=OFPBRC_BAD_TYPE) - err.xid = self._extract_message_xid(message) - err.data = message[:message_length] - self.send(err) - elif reason == OFConnection.ERR_BAD_LENGTH: - msg_obj, message_length, new_offset = info - t = type(msg_obj).__name__ - self.log.error('Different idea of message length for %s ' - '(us:%s them:%s)' % (t, new_offset, message_length)) - message = self.io_worker.peek() - err = ofp_error(type=OFPET_BAD_REQUEST, code=OFPBRC_BAD_LEN) - err.xid = self._extract_message_xid(message) - err.data = message[:message_length] - self.send(err) - elif reason == OFConnection.ERR_EXCEPTION: - ex, raw_message, msg_obj = info - t = type(ex).__name__ - self.log.exception('Exception handling %s' % (t,)) - else: - self.log.error("Unhandled error") - self.close() - return False - - def _extract_message_xid (self, message): - """ - Extract and return the xid (and length) of an openflow message. - """ - xid = 0 - if len(message) >= 8: - #xid = struct.unpack_from('!L', message, 4)[0] - message_length, xid = struct.unpack_from('!HL', message, 2) - elif len(message) >= 4: - message_length = ord(message[2]) << 8 | ord(message[3]) - else: - message_length = len(message) - return xid - - def close (self): - self.io_worker.shutdown() - - def get_controller_id (self): - """ - Return a tuple of the controller's (address, port) we are connected to - """ - return self.controller_id - - def __str__ (self): - return "[Con " + str(self.ID) + "]" - - -class SwitchFeatures (object): - """ - Stores switch features - - Keeps settings for switch capabilities and supported actions. - Automatically has attributes of the form ".act_foo" for all OFPAT_FOO, - and ".cap_foo" for all OFPC_FOO (as gathered from libopenflow). - """ - def __init__ (self, **kw): - self._cap_info = {} - for val,name in ofp_capabilities_map.items(): - name = name[5:].lower() # strip OFPC_ - name = "cap_" + name - setattr(self, name, False) - self._cap_info[name] = val - - self._act_info = {} - for val,name in ofp_action_type_map.items(): - name = name[6:].lower() # strip OFPAT_ - name = "act_" + name - setattr(self, name, False) - self._act_info[name] = val - - self._locked = True - - initHelper(self, kw) - - def __setattr__ (self, attr, value): - if getattr(self, '_locked', False): - if not hasattr(self, attr): - raise AttributeError("No such attribute as '%s'" % (attr,)) - return super(SwitchFeatures,self).__setattr__(attr, value) - - @property - def capability_bits (self): - """ - Value used in features reply - """ - return sum( (v if getattr(self, k) else 0) - for k,v in self._cap_info.items() ) - - @property - def action_bits (self): - """ - Value used in features reply - """ - return sum( (1<<v if getattr(self, k) else 0) - for k,v in self._act_info.items() ) - - def __str__ (self): - l = list(k for k in self._cap_info if getattr(self, k)) - l += list(k for k in self._act_info if getattr(self, k)) - return ",".join(l) diff --git a/pox/forwarding/hub.py b/pox/forwarding/hub.py @@ -1,54 +0,0 @@ -# Copyright 2012 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Turns your complex OpenFlow switches into stupid hubs. - -There are actually two hubs in here -- a reactive one and a proactive one. -""" - -from pox.core import core -import pox.openflow.libopenflow_01 as of -from pox.lib.util import dpidToStr - -log = core.getLogger() - - -def _handle_ConnectionUp (event): - """ - Be a proactive hub by telling every connected switch to flood all packets - """ - msg = of.ofp_flow_mod() - msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) - event.connection.send(msg) - log.info("Hubifying %s", dpidToStr(event.dpid)) - - -def _handle_PacketIn (event): - """ - Be a reactive hub by flooding every incoming packet - """ - msg = of.ofp_packet_out() - msg.data = event.ofp - msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) - event.connection.send(msg) - - -def launch (reactive = False): - if reactive: - core.openflow.addListenerByName("PacketIn", _handle_PacketIn) - log.info("Reactive hub running.") - else: - core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp) - log.info("Proactive hub running.") diff --git a/pox/forwarding/l2_flowvisor.py b/pox/forwarding/l2_flowvisor.py @@ -1,136 +0,0 @@ -# Copyright 2012 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A modification of l2_pairs to work with FlowVisor on looped topologies. - -The spanning_tree component doesn't work with FlowVisor because FlowVisor -does not virtualize the NO_FLOOD bit on switch ports, which is what -the spanning_tree component would need to work properly. - -This hack of l2_pairs uses the spanning tree construction from the -spanning_tree component, but instead of using it to modify port bits, -instead of ever actually flooding, it "simulates" flooding by just -adding all of the ports on the spanning tree as individual output -actions. - -Requires discovery. -""" - -# These next two imports are common POX convention -from pox.core import core -import pox.openflow.libopenflow_01 as of -import pox.openflow.spanning_tree as spanning_tree - -# Even a simple usage of the logger is much nicer than print! -log = core.getLogger() - - -# This table maps (switch,MAC-addr) pairs to the port on 'switch' at -# which we last saw a packet *from* 'MAC-addr'. -# (In this case, we use a Connection object for the switch.) -table = {} - - -# A spanning tree to be used for flooding -tree = {} - -def _handle_links (event): - """ - Handle discovery link events to update the spanning tree - """ - global tree - tree = spanning_tree._calc_spanning_tree() - - -def _handle_PacketIn (event): - """ - Handle messages the switch has sent us because it has no - matching rule. - """ - - def drop (): - # Kill buffer on switch - if event.ofp.buffer_id is not None: - msg = of.ofp_packet_out() - msg.buffer_id = event.ofp.buffer_id - msg.in_port = event.port - event.connection.send(msg) - - packet = event.parsed - - if packet.type == packet.LLDP_TYPE or packet.dst.isBridgeFiltered(): - return drop() - - # Learn the source - table[(event.connection,packet.src)] = event.port - - if not packet.dst.is_multicast: - dst_port = table.get((event.connection,packet.dst)) - else: - # Ideally, we'd install a flow entries that output multicasts - # to all ports on the spanning tree. - dst_port = None - - if dst_port is None: - # We don't know where the destination is yet. So, we'll just - # send the packet out all ports in the spanning tree - # and hope the destination is out there somewhere. :) - msg = of.ofp_packet_out(data = event.ofp) - - tree_ports = [p[1] for p in tree.get(event.dpid, [])] - - for p in event.connection.ports: - if p >= of.OFPP_MAX: - # Not a normal port - continue - - if not core.openflow_discovery.is_edge_port(event.dpid, p): - # If the port isn't a switch-to-switch port, it's fine to flood - # through it. But if it IS a switch-to-switch port, we only - # want to use it if it's on the spanning tree. - if p not in tree_ports: - continue - - msg.actions.append(of.ofp_action_output(port = p)) - - event.connection.send(msg) - - else: - # Since we know the switch ports for both the source and dest - # MACs, we can install rules for both directions. - msg = of.ofp_flow_mod() - msg.match.dl_dst = packet.src - msg.match.dl_src = packet.dst - msg.actions.append(of.ofp_action_output(port = event.port)) - event.connection.send(msg) - - # This is the packet that just came in -- we want to - # install the rule and also resend the packet. - msg = of.ofp_flow_mod() - msg.data = event.ofp # Forward the incoming packet - msg.match.dl_src = packet.src - msg.match.dl_dst = packet.dst - msg.actions.append(of.ofp_action_output(port = dst_port)) - event.connection.send(msg) - - log.debug("Installing %s <-> %s" % (packet.src, packet.dst)) - - -def launch (): - def start (): - core.openflow_discovery.addListenerByName("LinkEvent", _handle_links) - core.openflow.addListenerByName("PacketIn", _handle_PacketIn) - log.info("FlowVisor Pair-Learning switch running.") - core.call_when_ready(start, "openflow_discovery") diff --git a/pox/forwarding/l2_learning.py b/pox/forwarding/l2_learning.py @@ -1,215 +0,0 @@ -# Copyright 2011-2012 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -An L2 learning switch. - -It is derived from one written live for an SDN crash course. -It is somwhat similar to NOX's pyswitch in that it installs -exact-match rules for each flow. -""" - -from pox.core import core -import pox.openflow.libopenflow_01 as of -from pox.lib.util import dpid_to_str, str_to_dpid -from pox.lib.util import str_to_bool -import time - -log = core.getLogger() - -# We don't want to flood immediately when a switch connects. -# Can be overriden on commandline. -_flood_delay = 0 - -class LearningSwitch (object): - """ - The learning switch "brain" associated with a single OpenFlow switch. - - When we see a packet, we'd like to output it on a port which will - eventually lead to the destination. To accomplish this, we build a - table that maps addresses to ports. - - We populate the table by observing traffic. When we see a packet - from some source coming from some port, we know that source is out - that port. - - When we want to forward traffic, we look up the desintation in our - table. If we don't know the port, we simply send the message out - all ports except the one it came in on. (In the presence of loops, - this is bad!). - - In short, our algorithm looks like this: - - For each packet from the switch: - 1) Use source address and switch port to update address/port table - 2) Is transparent = False and either Ethertype is LLDP or the packet's - destination address is a Bridge Filtered address? - Yes: - 2a) Drop packet -- don't forward link-local traffic (LLDP, 802.1x) - DONE - 3) Is destination multicast? - Yes: - 3a) Flood the packet - DONE - 4) Port for destination address in our address/port table? - No: - 4a) Flood the packet - DONE - 5) Is output port the same as input port? - Yes: - 5a) Drop packet and similar ones for a while - 6) Install flow table entry in the switch so that this - flow goes out the appopriate port - 6a) Send the packet out appropriate port - """ - def __init__ (self, connection, transparent): - # Switch we'll be adding L2 learning switch capabilities to - self.connection = connection - self.transparent = transparent - - # Our table - self.macToPort = {} - - # We want to hear PacketIn messages, so we listen - # to the connection - connection.addListeners(self) - - # We just use this to know when to log a helpful message - self.hold_down_expired = _flood_delay == 0 - - #log.debug("Initializing LearningSwitch, transparent=%s", - # str(self.transparent)) - - def _handle_PacketIn (self, event): - """ - Handle packet in messages from the switch to implement above algorithm. - """ - - packet = event.parsed - - def flood (message = None): - """ Floods the packet """ - msg = of.ofp_packet_out() - if time.time() - self.connection.connect_time >= _flood_delay: - # Only flood if we've been connected for a little while... - - if self.hold_down_expired is False: - # Oh yes it is! - self.hold_down_expired = True - log.info("%s: Flood hold-down expired -- flooding", - dpid_to_str(event.dpid)) - - if message is not None: log.debug(message) - #log.debug("%i: flood %s -> %s", event.dpid,packet.src,packet.dst) - # OFPP_FLOOD is optional; on some switches you may need to change - # this to OFPP_ALL. - msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) - else: - pass - #log.info("Holding down flood for %s", dpid_to_str(event.dpid)) - msg.data = event.ofp - msg.in_port = event.port - self.connection.send(msg) - - def drop (duration = None): - """ - Drops this packet and optionally installs a flow to continue - dropping similar ones for a while - """ - if duration is not None: - if not isinstance(duration, tuple): - duration = (duration,duration) - msg = of.ofp_flow_mod() - msg.match = of.ofp_match.from_packet(packet) - msg.idle_timeout = duration[0] - msg.hard_timeout = duration[1] - msg.buffer_id = event.ofp.buffer_id - self.connection.send(msg) - elif event.ofp.buffer_id is not None: - msg = of.ofp_packet_out() - msg.buffer_id = event.ofp.buffer_id - msg.in_port = event.port - self.connection.send(msg) - - self.macToPort[packet.src] = event.port # 1 - - if not self.transparent: # 2 - if packet.type == packet.LLDP_TYPE or packet.dst.isBridgeFiltered(): - drop() # 2a - return - - if packet.dst.is_multicast: - flood() # 3a - else: - if packet.dst not in self.macToPort: # 4 - flood("Port for %s unknown -- flooding" % (packet.dst,)) # 4a - else: - port = self.macToPort[packet.dst] - if port == event.port: # 5 - # 5a - log.warning("Same port for packet from %s -> %s on %s.%s. Drop." - % (packet.src, packet.dst, dpid_to_str(event.dpid), port)) - drop(10) - return - # 6 - log.debug("installing flow for %s.%i -> %s.%i" % - (packet.src, event.port, packet.dst, port)) - msg = of.ofp_flow_mod() - msg.match = of.ofp_match.from_packet(packet, event.port) - msg.idle_timeout = 10 - msg.hard_timeout = 30 - msg.actions.append(of.ofp_action_output(port = port)) - msg.data = event.ofp # 6a - self.connection.send(msg) - - -class l2_learning (object): - """ - Waits for OpenFlow switches to connect and makes them learning switches. - """ - def __init__ (self, transparent, ignore = None): - """ - Initialize - - See LearningSwitch for meaning of 'transparent' - 'ignore' is an optional list/set of DPIDs to ignore - """ - core.openflow.addListeners(self) - self.transparent = transparent - self.ignore = set(ignore) if ignore else () - - def _handle_ConnectionUp (self, event): - if event.dpid in self.ignore: - log.debug("Ignoring connection %s" % (event.connection,)) - return - log.debug("Connection %s" % (event.connection,)) - LearningSwitch(event.connection, self.transparent) - - -def launch (transparent=False, hold_down=_flood_delay, ignore = None): - """ - Starts an L2 learning switch. - """ - try: - global _flood_delay - _flood_delay = int(str(hold_down), 10) - assert _flood_delay >= 0 - except: - raise RuntimeError("Expected hold-down to be a number") - - if ignore: - ignore = ignore.replace(',', ' ').split() - ignore = set(str_to_dpid(dpid) for dpid in ignore) - - core.registerNew(l2_learning, str_to_bool(transparent), ignore) diff --git a/pox/forwarding/l2_multi.py b/pox/forwarding/l2_multi.py @@ -1,506 +0,0 @@ -# Copyright 2012-2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A shortest-path forwarding application. - -This is a standalone L2 switch that learns ethernet addresses -across the entire network and picks short paths between them. - -You shouldn't really write an application this way -- you should -keep more state in the controller (that is, your flow tables), -and/or you should make your topology more static. However, this -does (mostly) work. :) - -Depends on openflow.discovery -Works with openflow.spanning_tree -""" - -from pox.core import core -import pox.openflow.libopenflow_01 as of -from pox.lib.revent import * -from pox.lib.recoco import Timer -from collections import defaultdict -from pox.openflow.discovery import Discovery -from pox.lib.util import dpid_to_str -import time - -log = core.getLogger() - -# Adjacency map. [sw1][sw2] -> port from sw1 to sw2 -adjacency = defaultdict(lambda:defaultdict(lambda:None)) - -# Switches we know of. [dpid] -> Switch -switches = {} - -# ethaddr -> (switch, port) -mac_map = {} - -# [sw1][sw2] -> (distance, intermediate) -path_map = defaultdict(lambda:defaultdict(lambda:(None,None))) - -# Waiting path. (dpid,xid)->WaitingPath -waiting_paths = {} - -# Time to not flood in seconds -FLOOD_HOLDDOWN = 5 - -# Flow timeouts -FLOW_IDLE_TIMEOUT = 10 -FLOW_HARD_TIMEOUT = 30 - -# How long is allowable to set up a path? -PATH_SETUP_TIME = 4 - - -def _calc_paths (): - """ - Essentially Floyd-Warshall algorithm - """ - - def dump (): - for i in sws: - for j in sws: - a = path_map[i][j][0] - #a = adjacency[i][j] - if a is None: a = "*" - print(a, end=' ') - print() - - sws = list(switches.values()) - path_map.clear() - for k in sws: - for j,port in adjacency[k].items(): - if port is None: continue - path_map[k][j] = (1,None) - path_map[k][k] = (0,None) # distance, intermediate - - #dump() - - for k in sws: - for i in sws: - for j in sws: - if path_map[i][k][0] is not None: - if path_map[k][j][0] is not None: - # i -> k -> j exists - ikj_dist = path_map[i][k][0]+path_map[k][j][0] - if path_map[i][j][0] is None or ikj_dist < path_map[i][j][0]: - # i -> k -> j is better than existing - path_map[i][j] = (ikj_dist, k) - - #print "--------------------" - #dump() - - -def _get_raw_path (src, dst): - """ - Get a raw path (just a list of nodes to traverse) - """ - if len(path_map) == 0: _calc_paths() - if src is dst: - # We're here! - return [] - if path_map[src][dst][0] is None: - return None - intermediate = path_map[src][dst][1] - if intermediate is None: - # Directly connected - return [] - return _get_raw_path(src, intermediate) + [intermediate] + \ - _get_raw_path(intermediate, dst) - - -def _check_path (p): - """ - Make sure that a path is actually a string of nodes with connected ports - - returns True if path is valid - """ - for a,b in zip(p[:-1],p[1:]): - if adjacency[a[0]][b[0]] != a[2]: - return False - if adjacency[b[0]][a[0]] != b[1]: - return False - return True - - -def _get_path (src, dst, first_port, final_port): - """ - Gets a cooked path -- a list of (node,in_port,out_port) - """ - # Start with a raw path... - if src == dst: - path = [src] - else: - path = _get_raw_path(src, dst) - if path is None: return None - path = [src] + path + [dst] - - # Now add the ports - r = [] - in_port = first_port - for s1,s2 in zip(path[:-1],path[1:]): - out_port = adjacency[s1][s2] - r.append((s1,in_port,out_port)) - in_port = adjacency[s2][s1] - r.append((dst,in_port,final_port)) - - assert _check_path(r), "Illegal path!" - - return r - - -class WaitingPath (object): - """ - A path which is waiting for its path to be established - """ - def __init__ (self, path, packet): - """ - xids is a sequence of (dpid,xid) - first_switch is the DPID where the packet came from - packet is something that can be sent in a packet_out - """ - self.expires_at = time.time() + PATH_SETUP_TIME - self.path = path - self.first_switch = path[0][0].dpid - self.xids = set() - self.packet = packet - - if len(waiting_paths) > 1000: - WaitingPath.expire_waiting_paths() - - def add_xid (self, dpid, xid): - self.xids.add((dpid,xid)) - waiting_paths[(dpid,xid)] = self - - @property - def is_expired (self): - return time.time() >= self.expires_at - - def notify (self, event): - """ - Called when a barrier has been received - """ - self.xids.discard((event.dpid,event.xid)) - if len(self.xids) == 0: - # Done! - if self.packet: - log.debug("Sending delayed packet out %s" - % (dpid_to_str(self.first_switch),)) - msg = of.ofp_packet_out(data=self.packet, - action=of.ofp_action_output(port=of.OFPP_TABLE)) - core.openflow.sendToDPID(self.first_switch, msg) - - core.l2_multi.raiseEvent(PathInstalled(self.path)) - - - @staticmethod - def expire_waiting_paths (): - packets = set(waiting_paths.values()) - killed = 0 - for p in packets: - if p.is_expired: - killed += 1 - for entry in p.xids: - waiting_paths.pop(entry, None) - if killed: - log.error("%i paths failed to install" % (killed,)) - - -class PathInstalled (Event): - """ - Fired when a path is installed - """ - def __init__ (self, path): - self.path = path - - -class Switch (EventMixin): - def __init__ (self): - self.connection = None - self.ports = None - self.dpid = None - self._listeners = None - self._connected_at = None - - def __repr__ (self): - return dpid_to_str(self.dpid) - - def _install (self, switch, in_port, out_port, match, buf = None): - msg = of.ofp_flow_mod() - msg.match = match - msg.match.in_port = in_port - msg.idle_timeout = FLOW_IDLE_TIMEOUT - msg.hard_timeout = FLOW_HARD_TIMEOUT - msg.actions.append(of.ofp_action_output(port = out_port)) - msg.buffer_id = buf - switch.connection.send(msg) - - def _install_path (self, p, match, packet_in=None): - wp = WaitingPath(p, packet_in) - for sw,in_port,out_port in p: - self._install(sw, in_port, out_port, match) - msg = of.ofp_barrier_request() - sw.connection.send(msg) - wp.add_xid(sw.dpid,msg.xid) - - def install_path (self, dst_sw, last_port, match, event): - """ - Attempts to install a path between this switch and some destination - """ - p = _get_path(self, dst_sw, event.port, last_port) - if p is None: - log.warning("Can't get from %s to %s", match.dl_src, match.dl_dst) - - import pox.lib.packet as pkt - - if (match.dl_type == pkt.ethernet.IP_TYPE and - event.parsed.find('ipv4')): - # It's IP -- let's send a destination unreachable - log.debug("Dest unreachable (%s -> %s)", - match.dl_src, match.dl_dst) - - from pox.lib.addresses import EthAddr - e = pkt.ethernet() - e.src = EthAddr(dpid_to_str(self.dpid)) #FIXME: Hmm... - e.dst = match.dl_src - e.type = e.IP_TYPE - ipp = pkt.ipv4() - ipp.protocol = ipp.ICMP_PROTOCOL - ipp.srcip = match.nw_dst #FIXME: Ridiculous - ipp.dstip = match.nw_src - icmp = pkt.icmp() - icmp.type = pkt.ICMP.TYPE_DEST_UNREACH - icmp.code = pkt.ICMP.CODE_UNREACH_HOST - orig_ip = event.parsed.find('ipv4') - - d = orig_ip.pack() - d = d[:orig_ip.hl * 4 + 8] - import struct - d = struct.pack("!HH", 0,0) + d #FIXME: MTU - icmp.payload = d - ipp.payload = icmp - e.payload = ipp - msg = of.ofp_packet_out() - msg.actions.append(of.ofp_action_output(port = event.port)) - msg.data = e.pack() - self.connection.send(msg) - - return - - log.debug("Installing path for %s -> %s %04x (%i hops)", - match.dl_src, match.dl_dst, match.dl_type, len(p)) - - # We have a path -- install it - self._install_path(p, match, event.ofp) - - # Now reverse it and install it backwards - # (we'll just assume that will work) - p = [(sw,out_port,in_port) for sw,in_port,out_port in p] - self._install_path(p, match.flip()) - - - def _handle_PacketIn (self, event): - def flood (): - """ Floods the packet """ - if self.is_holding_down: - log.warning("Not flooding -- holddown active") - msg = of.ofp_packet_out() - # OFPP_FLOOD is optional; some switches may need OFPP_ALL - msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) - msg.buffer_id = event.ofp.buffer_id - msg.in_port = event.port - self.connection.send(msg) - - def drop (): - # Kill the buffer - if event.ofp.buffer_id is not None: - msg = of.ofp_packet_out() - msg.buffer_id = event.ofp.buffer_id - event.ofp.buffer_id = None # Mark is dead - msg.in_port = event.port - self.connection.send(msg) - - packet = event.parsed - - loc = (self, event.port) # Place we saw this ethaddr - oldloc = mac_map.get(packet.src) # Place we last saw this ethaddr - - if packet.effective_ethertype == packet.LLDP_TYPE: - drop() - return - - if oldloc is None: - if packet.src.is_multicast == False: - mac_map[packet.src] = loc # Learn position for ethaddr - log.debug("Learned %s at %s.%i", packet.src, loc[0], loc[1]) - elif oldloc != loc: - # ethaddr seen at different place! - if core.openflow_discovery.is_edge_port(loc[0].dpid, loc[1]): - # New place is another "plain" port (probably) - log.debug("%s moved from %s.%i to %s.%i?", packet.src, - dpid_to_str(oldloc[0].dpid), oldloc[1], - dpid_to_str( loc[0].dpid), loc[1]) - if packet.src.is_multicast == False: - mac_map[packet.src] = loc # Learn position for ethaddr - log.debug("Learned %s at %s.%i", packet.src, loc[0], loc[1]) - elif packet.dst.is_multicast == False: - # New place is a switch-to-switch port! - # Hopefully, this is a packet we're flooding because we didn't - # know the destination, and not because it's somehow not on a - # path that we expect it to be on. - # If spanning_tree is running, we might check that this port is - # on the spanning tree (it should be). - if packet.dst in mac_map: - # Unfortunately, we know the destination. It's possible that - # we learned it while it was in flight, but it's also possible - # that something has gone wrong. - log.warning("Packet from %s to known destination %s arrived " - "at %s.%i without flow", packet.src, packet.dst, - dpid_to_str(self.dpid), event.port) - - - if packet.dst.is_multicast: - log.debug("Flood multicast from %s", packet.src) - flood() - else: - if packet.dst not in mac_map: - log.debug("%s unknown -- flooding" % (packet.dst,)) - flood() - else: - dest = mac_map[packet.dst] - match = of.ofp_match.from_packet(packet) - self.install_path(dest[0], dest[1], match, event) - - def disconnect (self): - if self.connection is not None: - log.debug("Disconnect %s" % (self.connection,)) - self.connection.removeListeners(self._listeners) - self.connection = None - self._listeners = None - - def connect (self, connection): - if self.dpid is None: - self.dpid = connection.dpid - assert self.dpid == connection.dpid - if self.ports is None: - self.ports = connection.features.ports - self.disconnect() - log.debug("Connect %s" % (connection,)) - self.connection = connection - self._listeners = self.listenTo(connection) - self._connected_at = time.time() - - @property - def is_holding_down (self): - if self._connected_at is None: return True - if time.time() - self._connected_at > FLOOD_HOLDDOWN: - return False - return True - - def _handle_ConnectionDown (self, event): - self.disconnect() - - -class l2_multi (EventMixin): - - _eventMixin_events = set([ - PathInstalled, - ]) - - def __init__ (self): - # Listen to dependencies (specifying priority 0 for openflow) - core.listen_to_dependencies(self, listen_args={'openflow':{'priority':0}}) - - def _handle_openflow_discovery_LinkEvent (self, event): - def flip (link): - return Discovery.Link(link[2],link[3], link[0],link[1]) - - l = event.link - sw1 = switches[l.dpid1] - sw2 = switches[l.dpid2] - - # Invalidate all flows and path info. - # For link adds, this makes sure that if a new link leads to an - # improved path, we use it. - # For link removals, this makes sure that we don't use a - # path that may have been broken. - #NOTE: This could be radically improved! (e.g., not *ALL* paths break) - clear = of.ofp_flow_mod(command=of.OFPFC_DELETE) - for sw in switches.values(): - if sw.connection is None: continue - sw.connection.send(clear) - path_map.clear() - - if event.removed: - # This link no longer okay - if sw2 in adjacency[sw1]: del adjacency[sw1][sw2] - if sw1 in adjacency[sw2]: del adjacency[sw2][sw1] - - # But maybe there's another way to connect these... - for ll in core.openflow_discovery.adjacency: - if ll == event.link: continue - if ll.dpid1 == l.dpid1 and ll.dpid2 == l.dpid2: - if flip(ll) in core.openflow_discovery.adjacency: - # Yup, link goes both ways - adjacency[sw1][sw2] = ll.port1 - adjacency[sw2][sw1] = ll.port2 - # Fixed -- new link chosen to connect these - break - else: - # If we already consider these nodes connected, we can - # ignore this link up. - # Otherwise, we might be interested... - if adjacency[sw1][sw2] is None: - # These previously weren't connected. If the link - # exists in both directions, we consider them connected now. - if flip(l) in core.openflow_discovery.adjacency: - # Yup, link goes both ways -- connected! - adjacency[sw1][sw2] = l.port1 - adjacency[sw2][sw1] = l.port2 - - # If we have learned a MAC on this port which we now know to - # be connected to a switch, unlearn it. - bad_macs = set() - for mac,(sw,port) in mac_map.items(): - if sw is sw1 and port == l.port1: bad_macs.add(mac) - if sw is sw2 and port == l.port2: bad_macs.add(mac) - for mac in bad_macs: - log.debug("Unlearned %s", mac) - del mac_map[mac] - - def _handle_openflow_ConnectionUp (self, event): - sw = switches.get(event.dpid) - if sw is None: - # New switch - sw = Switch() - switches[event.dpid] = sw - sw.connect(event.connection) - else: - sw.connect(event.connection) - - def _handle_openflow_BarrierIn (self, event): - wp = waiting_paths.pop((event.dpid,event.xid), None) - if not wp: - #log.info("No waiting packet %s,%s", event.dpid, event.xid) - return - #log.debug("Notify waiting packet %s,%s", event.dpid, event.xid) - wp.notify(event) - - -def launch (): - core.registerNew(l2_multi) - - timeout = min(max(PATH_SETUP_TIME, 5) * 2, 15) - Timer(timeout, WaitingPath.expire_waiting_paths, recurring=True) diff --git a/pox/forwarding/l2_nx.py b/pox/forwarding/l2_nx.py @@ -1,126 +0,0 @@ -# Copyright 2012 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A quick-and-dirty learning switch for Open vSwitch - -This learning switch requires Nicira extensions as found in Open vSwitch. -Furthermore, you must enable packet-in conversion. Run with something like: - ./pox.py openflow.nicira --convert-packet-in forwarding.l2_nx - -This forwards based on ethernet source and destination addresses. Where -l2_pairs installs rules for each pair of source and destination address, -this component uses two tables on the switch -- one for source addresses -and one for destination addresses. Specifically, we use tables 0 and 1 -on the switch to implement the following logic: -0. Is this source address known? - NO: Send to controller (so we can learn it) -1. Is this destination address known? - YES: Forward out correct port - NO: Flood - -Note that unlike the other learning switches *we keep no state in the -controller*. In truth, we could implement this whole thing using OVS's -learn action, but doing it something like is done here will still allow -us to implement access control or something at the controller. -""" - -from pox.core import core -from pox.lib.addresses import EthAddr -import pox.openflow.libopenflow_01 as of -import pox.openflow.nicira as nx -from pox.lib.revent import EventRemove - - -# Even a simple usage of the logger is much nicer than print! -log = core.getLogger() - - -def _handle_PacketIn (event): - packet = event.parsed - - if event.port > of.OFPP_MAX: - log.debug("Ignoring special port %s", event.port) - return - - # Add to source table - msg = nx.nx_flow_mod() - msg.match.of_eth_src = packet.src - msg.actions.append(nx.nx_action_resubmit.resubmit_table(table = 1)) - event.connection.send(msg) - - # Add to destination table - msg = nx.nx_flow_mod() - msg.table_id = 1 - msg.match.of_eth_dst = packet.src - msg.actions.append(of.ofp_action_output(port = event.port)) - event.connection.send(msg) - - log.info("Learning %s on port %s of %s" - % (packet.src, event.port, event.connection)) - - -def _handle_ConnectionUp (event): - # Set up this switch. - # After setting up, we send a barrier and wait for the response - # before starting to listen to packet_ins for this switch -- before - # the switch is set up, the packet_ins may not be what we expect, - # and our responses may not work! - - # Turn on Nicira packet_ins - msg = nx.nx_packet_in_format() - event.connection.send(msg) - - # Turn on ability to specify table in flow_mods - msg = nx.nx_flow_mod_table_id() - event.connection.send(msg) - - # Clear second table - msg = nx.nx_flow_mod(command=of.OFPFC_DELETE, table_id = 1) - event.connection.send(msg) - - # Fallthrough rule for table 0: flood and send to controller - msg = nx.nx_flow_mod() - msg.priority = 1 # Low priority - msg.actions.append(of.ofp_action_output(port = of.OFPP_CONTROLLER)) - msg.actions.append(nx.nx_action_resubmit.resubmit_table(table = 1)) - event.connection.send(msg) - - # Fallthrough rule for table 1: flood - msg = nx.nx_flow_mod() - msg.table_id = 1 - msg.priority = 1 # Low priority - msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) - event.connection.send(msg) - - def ready (event): - if event.ofp.xid != 0x80000000: - # Not the right barrier - return - log.info("%s ready", event.connection) - event.connection.addListenerByName("PacketIn", _handle_PacketIn) - return EventRemove - - event.connection.send(of.ofp_barrier_request(xid=0x80000000)) - event.connection.addListenerByName("BarrierIn", ready) - - -def launch (): - def start (): - if not core.NX.convert_packet_in: - log.error("PacketIn conversion required") - return - core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp) - log.info("Simple NX switch running.") - core.call_when_ready(start, ['NX','openflow']) diff --git a/pox/forwarding/l2_nx_self_learning.py b/pox/forwarding/l2_nx_self_learning.py @@ -1,70 +0,0 @@ -# Copyright 2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This makes Nicira-extension capable switches into learning switches - -This uses the "learn" action so that switches become learning switches -*with no controller involvement*. - - ./pox.py openflow.nicira forwarding.l2_nx_self_learning -""" - -from pox.core import core -import pox.openflow.libopenflow_01 as of -import pox.openflow.nicira as nx - - -log = core.getLogger("l2_nx_self_learning") - - -def _handle_ConnectionUp (event): - # Set up this switch. - - # Turn on ability to specify table in flow_mods - msg = nx.nx_flow_mod_table_id() - event.connection.send(msg) - - # Clear second table - msg = nx.nx_flow_mod(command=of.OFPFC_DELETE, table_id = 1) - event.connection.send(msg) - - # Learning rule in table 0 - msg = nx.nx_flow_mod() - msg.table_id = 0 - - learn = nx.nx_action_learn(table_id=1,hard_timeout=10) - learn.spec.chain( - field=nx.NXM_OF_VLAN_TCI, n_bits=12).chain( - field=nx.NXM_OF_ETH_SRC, match=nx.NXM_OF_ETH_DST).chain( - field=nx.NXM_OF_IN_PORT, output=True) - - msg.actions.append(learn) - msg.actions.append(nx.nx_action_resubmit.resubmit_table(1)) - event.connection.send(msg) - - # Fallthrough rule for table 1: flood - msg = nx.nx_flow_mod() - msg.table_id = 1 - msg.priority = 1 # Low priority - msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) - event.connection.send(msg) - - - -def launch (): - def start (): - core.openflow.addListenerByName("ConnectionUp", _handle_ConnectionUp) - log.info("NX self-learning switch running.") - core.call_when_ready(start, ['NX','openflow']) diff --git a/pox/forwarding/l2_pairs.py b/pox/forwarding/l2_pairs.py @@ -1,87 +0,0 @@ -# Copyright 2012 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A super simple OpenFlow learning switch that installs rules for -each pair of L2 addresses. -""" - -# These next two imports are common POX convention -from pox.core import core -import pox.openflow.libopenflow_01 as of - - -# Even a simple usage of the logger is much nicer than print! -log = core.getLogger() - - -# This table maps (switch,MAC-addr) pairs to the port on 'switch' at -# which we last saw a packet *from* 'MAC-addr'. -# (In this case, we use a Connection object for the switch.) -table = {} - - -# To send out all ports, we can use either of the special ports -# OFPP_FLOOD or OFPP_ALL. We'd like to just use OFPP_FLOOD, -# but it's not clear if all switches support this, so we make -# it selectable. -all_ports = of.OFPP_FLOOD - - -# Handle messages the switch has sent us because it has no -# matching rule. -def _handle_PacketIn (event): - packet = event.parsed - - # Learn the source - table[(event.connection,packet.src)] = event.port - - dst_port = table.get((event.connection,packet.dst)) - - if dst_port is None: - # We don't know where the destination is yet. So, we'll just - # send the packet out all ports (except the one it came in on!) - # and hope the destination is out there somewhere. :) - msg = of.ofp_packet_out(data = event.ofp) - msg.actions.append(of.ofp_action_output(port = all_ports)) - event.connection.send(msg) - else: - # Since we know the switch ports for both the source and dest - # MACs, we can install rules for both directions. - msg = of.ofp_flow_mod() - msg.match.dl_dst = packet.src - msg.match.dl_src = packet.dst - msg.actions.append(of.ofp_action_output(port = event.port)) - event.connection.send(msg) - - # This is the packet that just came in -- we want to - # install the rule and also resend the packet. - msg = of.ofp_flow_mod() - msg.data = event.ofp # Forward the incoming packet - msg.match.dl_src = packet.src - msg.match.dl_dst = packet.dst - msg.actions.append(of.ofp_action_output(port = dst_port)) - event.connection.send(msg) - - log.debug("Installing %s <-> %s" % (packet.src, packet.dst)) - - -def launch (disable_flood = False): - global all_ports - if disable_flood: - all_ports = of.OFPP_ALL - - core.openflow.addListenerByName("PacketIn", _handle_PacketIn) - - log.info("Pair-Learning switch running.") diff --git a/pox/forwarding/l3_learning.py b/pox/forwarding/l3_learning.py @@ -1,349 +0,0 @@ -# Copyright 2012-2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A stupid L3 switch - -For each switch: -1) Keep a table that maps IP addresses to MAC addresses and switch ports. - Stock this table using information from ARP and IP packets. -2) When you see an ARP query, try to answer it using information in the table - from step 1. If the info in the table is old, just flood the query. -3) Flood all other ARPs. -4) When you see an IP packet, if you know the destination port (because it's - in the table from step 1), install a flow for it. -""" - -from pox.core import core -import pox -log = core.getLogger() - -from pox.lib.packet.ethernet import ethernet, ETHER_BROADCAST -from pox.lib.packet.ipv4 import ipv4 -from pox.lib.packet.arp import arp -from pox.lib.addresses import IPAddr, EthAddr -from pox.lib.util import str_to_bool, dpid_to_str -from pox.lib.recoco import Timer - -import pox.openflow.libopenflow_01 as of - -from pox.lib.revent import * - -import time - -# Timeout for flows -FLOW_IDLE_TIMEOUT = 10 - -# Timeout for ARP entries -ARP_TIMEOUT = 60 * 2 - -# Maximum number of packet to buffer on a switch for an unknown IP -MAX_BUFFERED_PER_IP = 5 - -# Maximum time to hang on to a buffer for an unknown IP in seconds -MAX_BUFFER_TIME = 5 - - -class Entry (object): - """ - Not strictly an ARP entry. - We use the port to determine which port to forward traffic out of. - We use the MAC to answer ARP replies. - We use the timeout so that if an entry is older than ARP_TIMEOUT, we - flood the ARP request rather than try to answer it ourselves. - """ - def __init__ (self, port, mac): - self.timeout = time.time() + ARP_TIMEOUT - self.port = port - self.mac = mac - - def __eq__ (self, other): - if type(other) == tuple: - return (self.port,self.mac)==other - else: - return (self.port,self.mac)==(other.port,other.mac) - def __ne__ (self, other): - return not self.__eq__(other) - - def isExpired (self): - if self.port == of.OFPP_NONE: return False - return time.time() > self.timeout - - -def dpid_to_mac (dpid): - return EthAddr("%012x" % (dpid & 0xffFFffFFffFF,)) - - -class l3_switch (EventMixin): - def __init__ (self, fakeways = [], arp_for_unknowns = False, wide = False): - # These are "fake gateways" -- we'll answer ARPs for them with MAC - # of the switch they're connected to. - self.fakeways = set(fakeways) - - # If True, we create "wide" matches. Otherwise, we create "narrow" - # (exact) matches. - self.wide = wide - - # If this is true and we see a packet for an unknown - # host, we'll ARP for it. - self.arp_for_unknowns = arp_for_unknowns - - # (dpid,IP) -> expire_time - # We use this to keep from spamming ARPs - self.outstanding_arps = {} - - # (dpid,IP) -> [(expire_time,buffer_id,in_port), ...] - # These are buffers we've gotten at this datapath for this IP which - # we can't deliver because we don't know where they go. - self.lost_buffers = {} - - # For each switch, we map IP addresses to Entries - self.arpTable = {} - - # This timer handles expiring stuff - self._expire_timer = Timer(5, self._handle_expiration, recurring=True) - - core.listen_to_dependencies(self) - - def _handle_expiration (self): - # Called by a timer so that we can remove old items. - empty = [] - for k,v in self.lost_buffers.items(): - dpid,ip = k - - for item in list(v): - expires_at,buffer_id,in_port = item - if expires_at < time.time(): - # This packet is old. Tell this switch to drop it. - v.remove(item) - po = of.ofp_packet_out(buffer_id = buffer_id, in_port = in_port) - core.openflow.sendToDPID(dpid, po) - if len(v) == 0: empty.append(k) - - # Remove empty buffer bins - for k in empty: - del self.lost_buffers[k] - - def _send_lost_buffers (self, dpid, ipaddr, macaddr, port): - """ - We may have "lost" buffers -- packets we got but didn't know - where to send at the time. We may know now. Try and see. - """ - if (dpid,ipaddr) in self.lost_buffers: - # Yup! - bucket = self.lost_buffers[(dpid,ipaddr)] - del self.lost_buffers[(dpid,ipaddr)] - log.debug("Sending %i buffered packets to %s from %s" - % (len(bucket),ipaddr,dpid_to_str(dpid))) - for _,buffer_id,in_port in bucket: - po = of.ofp_packet_out(buffer_id=buffer_id,in_port=in_port) - po.actions.append(of.ofp_action_dl_addr.set_dst(macaddr)) - po.actions.append(of.ofp_action_output(port = port)) - core.openflow.sendToDPID(dpid, po) - - def _handle_openflow_PacketIn (self, event): - dpid = event.connection.dpid - inport = event.port - packet = event.parsed - if not packet.parsed: - log.warning("%i %i ignoring unparsed packet", dpid, inport) - return - - if dpid not in self.arpTable: - # New switch -- create an empty table - self.arpTable[dpid] = {} - for fake in self.fakeways: - self.arpTable[dpid][IPAddr(fake)] = Entry(of.OFPP_NONE, - dpid_to_mac(dpid)) - - if packet.type == ethernet.LLDP_TYPE: - # Ignore LLDP packets - return - - if isinstance(packet.__next__, ipv4): - log.debug("%i %i IP %s => %s", dpid,inport, - packet.next.srcip,packet.next.dstip) - - # Send any waiting packets... - self._send_lost_buffers(dpid, packet.next.srcip, packet.src, inport) - - # Learn or update port/MAC info - if packet.next.srcip in self.arpTable[dpid]: - if self.arpTable[dpid][packet.next.srcip] != (inport, packet.src): - log.info("%i %i RE-learned %s", dpid,inport,packet.next.srcip) - if self.wide: - # Make sure we don't have any entries with the old info... - msg = of.ofp_flow_mod(command=of.OFPFC_DELETE) - msg.match.nw_dst = packet.next.srcip - msg.match.dl_type = ethernet.IP_TYPE - event.connection.send(msg) - else: - log.debug("%i %i learned %s", dpid,inport,packet.next.srcip) - self.arpTable[dpid][packet.next.srcip] = Entry(inport, packet.src) - - # Try to forward - dstaddr = packet.next.dstip - if dstaddr in self.arpTable[dpid]: - # We have info about what port to send it out on... - - prt = self.arpTable[dpid][dstaddr].port - mac = self.arpTable[dpid][dstaddr].mac - if prt == inport: - log.warning("%i %i not sending packet for %s back out of the " - "input port" % (dpid, inport, dstaddr)) - else: - log.debug("%i %i installing flow for %s => %s out port %i" - % (dpid, inport, packet.next.srcip, dstaddr, prt)) - - actions = [] - actions.append(of.ofp_action_dl_addr.set_dst(mac)) - actions.append(of.ofp_action_output(port = prt)) - if self.wide: - match = of.ofp_match(dl_type = packet.type, nw_dst = dstaddr) - else: - match = of.ofp_match.from_packet(packet, inport) - - msg = of.ofp_flow_mod(command=of.OFPFC_ADD, - idle_timeout=FLOW_IDLE_TIMEOUT, - hard_timeout=of.OFP_FLOW_PERMANENT, - buffer_id=event.ofp.buffer_id, - actions=actions, - match=match) - event.connection.send(msg.pack()) - elif self.arp_for_unknowns: - # We don't know this destination. - # First, we track this buffer so that we can try to resend it later - # if we learn the destination, second we ARP for the destination, - # which should ultimately result in it responding and us learning - # where it is - - # Add to tracked buffers - if (dpid,dstaddr) not in self.lost_buffers: - self.lost_buffers[(dpid,dstaddr)] = [] - bucket = self.lost_buffers[(dpid,dstaddr)] - entry = (time.time() + MAX_BUFFER_TIME,event.ofp.buffer_id,inport) - bucket.append(entry) - while len(bucket) > MAX_BUFFERED_PER_IP: del bucket[0] - - # Expire things from our outstanding ARP list... - self.outstanding_arps = {k:v for k,v in - self.outstanding_arps.items() if v > time.time()} - - # Check if we've already ARPed recently - if (dpid,dstaddr) in self.outstanding_arps: - # Oop, we've already done this one recently. - return - - # And ARP... - self.outstanding_arps[(dpid,dstaddr)] = time.time() + 4 - - r = arp() - r.hwtype = r.HW_TYPE_ETHERNET - r.prototype = r.PROTO_TYPE_IP - r.hwlen = 6 - r.protolen = r.protolen - r.opcode = r.REQUEST - r.hwdst = ETHER_BROADCAST - r.protodst = dstaddr - r.hwsrc = packet.src - r.protosrc = packet.next.srcip - e = ethernet(type=ethernet.ARP_TYPE, src=packet.src, - dst=ETHER_BROADCAST) - e.set_payload(r) - log.debug("%i %i ARPing for %s on behalf of %s" % (dpid, inport, - r.protodst, r.protosrc)) - msg = of.ofp_packet_out() - msg.data = e.pack() - msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) - msg.in_port = inport - event.connection.send(msg) - - elif isinstance(packet.__next__, arp): - a = packet.__next__ - log.debug("%i %i ARP %s %s => %s", dpid, inport, - {arp.REQUEST:"request",arp.REPLY:"reply"}.get(a.opcode, - 'op:%i' % (a.opcode,)), a.protosrc, a.protodst) - - if a.prototype == arp.PROTO_TYPE_IP: - if a.hwtype == arp.HW_TYPE_ETHERNET: - if a.protosrc != 0: - - # Learn or update port/MAC info - if a.protosrc in self.arpTable[dpid]: - if self.arpTable[dpid][a.protosrc] != (inport, packet.src): - log.info("%i %i RE-learned %s", dpid,inport,a.protosrc) - if self.wide: - # Make sure we don't have any entries with the old info... - msg = of.ofp_flow_mod(command=of.OFPFC_DELETE) - msg.match.dl_type = ethernet.IP_TYPE - msg.match.nw_dst = a.protosrc - event.connection.send(msg) - else: - log.debug("%i %i learned %s", dpid,inport,a.protosrc) - self.arpTable[dpid][a.protosrc] = Entry(inport, packet.src) - - # Send any waiting packets... - self._send_lost_buffers(dpid, a.protosrc, packet.src, inport) - - if a.opcode == arp.REQUEST: - # Maybe we can answer - - if a.protodst in self.arpTable[dpid]: - # We have an answer... - - if not self.arpTable[dpid][a.protodst].isExpired(): - # .. and it's relatively current, so we'll reply ourselves - - r = arp() - r.hwtype = a.hwtype - r.prototype = a.prototype - r.hwlen = a.hwlen - r.protolen = a.protolen - r.opcode = arp.REPLY - r.hwdst = a.hwsrc - r.protodst = a.protosrc - r.protosrc = a.protodst - r.hwsrc = self.arpTable[dpid][a.protodst].mac - e = ethernet(type=packet.type, src=dpid_to_mac(dpid), - dst=a.hwsrc) - e.set_payload(r) - log.debug("%i %i answering ARP for %s" % (dpid, inport, - r.protosrc)) - msg = of.ofp_packet_out() - msg.data = e.pack() - msg.actions.append(of.ofp_action_output(port = - of.OFPP_IN_PORT)) - msg.in_port = inport - event.connection.send(msg) - return - - # Didn't know how to answer or otherwise handle this ARP, so just flood it - log.debug("%i %i flooding ARP %s %s => %s" % (dpid, inport, - {arp.REQUEST:"request",arp.REPLY:"reply"}.get(a.opcode, - 'op:%i' % (a.opcode,)), a.protosrc, a.protodst)) - - msg = of.ofp_packet_out(in_port = inport, data = event.ofp, - action = of.ofp_action_output(port = of.OFPP_FLOOD)) - event.connection.send(msg) - - -def launch (fakeways="", arp_for_unknowns=None, wide=False): - fakeways = fakeways.replace(","," ").split() - fakeways = [IPAddr(x) for x in fakeways] - if arp_for_unknowns is None: - arp_for_unknowns = len(fakeways) > 0 - else: - arp_for_unknowns = str_to_bool(arp_for_unknowns) - core.registerNew(l3_switch, fakeways, arp_for_unknowns, wide) - diff --git a/pox/forwarding/topo_proactive.py b/pox/forwarding/topo_proactive.py @@ -1,480 +0,0 @@ -# Copyright 2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Installs forwarding rules based on topologically significant IP addresses. - -We also issue those addresses by DHCP. A host must use the assigned IP! -Actually, the last byte can be almost anything. But addresses are of the -form 10.switchID.portNumber.x. - -This is an example of a pretty proactive forwarding application. - -The forwarding code is based on l2_multi. - -Depends on openflow.discovery -Works with openflow.spanning_tree (sort of) -""" - -from pox.core import core -import pox.openflow.libopenflow_01 as of -import pox.lib.packet as pkt - -from pox.lib.addresses import IPAddr,EthAddr,parse_cidr -from pox.lib.addresses import IP_BROADCAST, IP_ANY -from pox.lib.revent import * -from pox.lib.util import dpid_to_str -from pox.proto.dhcpd import DHCPLease, DHCPD -from collections import defaultdict -from pox.openflow.discovery import Discovery -import time - -log = core.getLogger("f.t_p") - - -# Adjacency map. [sw1][sw2] -> port from sw1 to sw2 -adjacency = defaultdict(lambda:defaultdict(lambda:None)) - -# Switches we know of. [dpid] -> Switch and [id] -> Switch -switches_by_dpid = {} -switches_by_id = {} - -# [sw1][sw2] -> (distance, intermediate) -path_map = defaultdict(lambda:defaultdict(lambda:(None,None))) - - -def dpid_to_mac (dpid): - return EthAddr("%012x" % (dpid & 0xffFFffFFffFF,)) - - -def _calc_paths (): - """ - Essentially Floyd-Warshall algorithm - """ - - def dump (): - for i in sws: - for j in sws: - a = path_map[i][j][0] - #a = adjacency[i][j] - if a is None: a = "*" - print(a, end=' ') - print() - - sws = list(switches_by_dpid.values()) - path_map.clear() - for k in sws: - for j,port in adjacency[k].items(): - if port is None: continue - path_map[k][j] = (1,None) - path_map[k][k] = (0,None) # distance, intermediate - - #dump() - - for k in sws: - for i in sws: - for j in sws: - if path_map[i][k][0] is not None: - if path_map[k][j][0] is not None: - # i -> k -> j exists - ikj_dist = path_map[i][k][0]+path_map[k][j][0] - if path_map[i][j][0] is None or ikj_dist < path_map[i][j][0]: - # i -> k -> j is better than existing - path_map[i][j] = (ikj_dist, k) - - #print "--------------------" - #dump() - - -def _get_raw_path (src, dst): - """ - Get a raw path (just a list of nodes to traverse) - """ - if len(path_map) == 0: _calc_paths() - if src is dst: - # We're here! - return [] - if path_map[src][dst][0] is None: - return None - intermediate = path_map[src][dst][1] - if intermediate is None: - # Directly connected - return [] - return _get_raw_path(src, intermediate) + [intermediate] + \ - _get_raw_path(intermediate, dst) - - -def _get_path (src, dst): - """ - Gets a cooked path -- a list of (node,out_port) - """ - # Start with a raw path... - if src == dst: - path = [src] - else: - path = _get_raw_path(src, dst) - if path is None: return None - path = [src] + path + [dst] - - # Now add the ports - r = [] - for s1,s2 in zip(path[:-1],path[1:]): - out_port = adjacency[s1][s2] - r.append((s1,out_port)) - in_port = adjacency[s2][s1] - - return r - - -def ipinfo (ip): - parts = [int(x) for x in str(ip).split('.')] - ID = parts[1] - port = parts[2] - num = parts[3] - return switches_by_id.get(ID),port,num - - -class TopoSwitch (DHCPD): - _eventMixin_events = set([DHCPLease]) - _next_id = 100 - - def __repr__ (self): - try: - return "[%s/%s]" % (dpid_to_str(self.connection.dpid),self._id) - except: - return "[Unknown]" - - - def __init__ (self): - self.log = log.getChild("Unknown") - - self.connection = None - self.ports = None - self.dpid = None - self._listeners = None - self._connected_at = None - self._id = None - self.subnet = None - self.network = None - self._install_flow = False - self.mac = None - - self.ip_to_mac = {} - - # Listen to our own event... :) - self.addListenerByName("DHCPLease", self._on_lease) - - core.ARPHelper.addListeners(self) - - - def _handle_ARPRequest (self, event): - if ipinfo(event.ip)[0] is not self: return - event.reply = self.mac - - - def send_table (self): - if self.connection is None: - self.log.debug("Can't send table: disconnected") - return - - clear = of.ofp_flow_mod(command=of.OFPFC_DELETE) - self.connection.send(clear) - self.connection.send(of.ofp_barrier_request()) - - # From DHCPD - msg = of.ofp_flow_mod() - msg.match = of.ofp_match() - msg.match.dl_type = pkt.ethernet.IP_TYPE - msg.match.nw_proto = pkt.ipv4.UDP_PROTOCOL - #msg.match.nw_dst = IP_BROADCAST - msg.match.tp_src = pkt.dhcp.CLIENT_PORT - msg.match.tp_dst = pkt.dhcp.SERVER_PORT - msg.actions.append(of.ofp_action_output(port = of.OFPP_CONTROLLER)) - #msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) - self.connection.send(msg) - - core.openflow_discovery.install_flow(self.connection) - - src = self - for dst in switches_by_dpid.values(): - if dst is src: continue - p = _get_path(src, dst) - if p is None: continue - - msg = of.ofp_flow_mod() - msg.match = of.ofp_match() - msg.match.dl_type = pkt.ethernet.IP_TYPE - #msg.match.nw_dst = "%s/%s" % (dst.network, dst.subnet) - msg.match.nw_dst = "%s/%s" % (dst.network, "255.255.0.0") - - msg.actions.append(of.ofp_action_output(port=p[0][1])) - self.connection.send(msg) - - """ - # Can just do this instead of MAC learning if you run arp_responder... - for port in self.ports: - p = port.port_no - if p < 0 or p >= of.OFPP_MAX: continue - msg = of.ofp_flow_mod() - msg.match = of.ofp_match() - msg.match.dl_type = pkt.ethernet.IP_TYPE - msg.match.nw_dst = "10.%s.%s.0/255.255.255.0" % (self._id,p) - msg.actions.append(of.ofp_action_output(port=p)) - self.connection.send(msg) - """ - - for ip,mac in self.ip_to_mac.items(): - self._send_rewrite_rule(ip, mac) - - flood_ports = [] - for port in self.ports: - p = port.port_no - if p < 0 or p >= of.OFPP_MAX: continue - - if core.openflow_discovery.is_edge_port(self.dpid, p): - flood_ports.append(p) - - msg = of.ofp_flow_mod() - msg.priority -= 1 - msg.match = of.ofp_match() - msg.match.dl_type = pkt.ethernet.IP_TYPE - msg.match.nw_dst = "10.%s.%s.0/255.255.255.0" % (self._id,p) - msg.actions.append(of.ofp_action_output(port=of.OFPP_CONTROLLER)) - self.connection.send(msg) - - msg = of.ofp_flow_mod() - msg.priority -= 1 - msg.match = of.ofp_match() - msg.match.dl_type = pkt.ethernet.IP_TYPE - msg.match.nw_dst = "255.255.255.255" - for p in flood_ports: - msg.actions.append(of.ofp_action_output(port=p)) - self.connection.send(msg) - - - def _send_rewrite_rule (self, ip, mac): - p = ipinfo(ip)[1] - - msg = of.ofp_flow_mod() - msg.match = of.ofp_match() - msg.match.dl_type = pkt.ethernet.IP_TYPE - msg.match.nw_dst = ip - msg.actions.append(of.ofp_action_dl_addr.set_src(self.mac)) - msg.actions.append(of.ofp_action_dl_addr.set_dst(mac)) - msg.actions.append(of.ofp_action_output(port=p)) - self.connection.send(msg) - - - def disconnect (self): - if self.connection is not None: - log.debug("Disconnect %s" % (self.connection,)) - self.connection.removeListeners(self._listeners) - self.connection = None - self._listeners = None - - - def connect (self, connection): - if connection is None: - self.log.warn("Can't connect to nothing") - return - if self.dpid is None: - self.dpid = connection.dpid - assert self.dpid == connection.dpid - if self.ports is None: - self.ports = connection.features.ports - self.disconnect() - self.connection = connection - self._listeners = self.listenTo(connection) - self._connected_at = time.time() - - label = dpid_to_str(connection.dpid) - self.log = log.getChild(label) - self.log.debug("Connect %s" % (connection,)) - - if self._id is None: - if self.dpid not in switches_by_id and self.dpid <= 254: - self._id = self.dpid - else: - self._id = TopoSwitch._next_id - TopoSwitch._next_id += 1 - switches_by_id[self._id] = self - - self.network = IPAddr("10.%s.0.0" % (self._id,)) - self.mac = dpid_to_mac(self.dpid) - - # Disable flooding - con = connection - log.debug("Disabling flooding for %i ports", len(con.ports)) - for p in con.ports.values(): - if p.port_no >= of.OFPP_MAX: continue - pm = of.ofp_port_mod(port_no=p.port_no, - hw_addr=p.hw_addr, - config = of.OFPPC_NO_FLOOD, - mask = of.OFPPC_NO_FLOOD) - con.send(pm) - con.send(of.ofp_barrier_request()) - con.send(of.ofp_features_request()) - - # Some of this is copied from DHCPD's __init__(). - self.send_table() - - def fix_addr (addr, backup): - if addr is None: return None - if addr is (): return IPAddr(backup) - return IPAddr(addr) - - self.ip_addr = IPAddr("10.%s.0.1" % (self._id,)) - #self.router_addr = self.ip_addr - self.router_addr = None - self.dns_addr = None #fix_addr(dns_address, self.router_addr) - - self.subnet = IPAddr("255.0.0.0") - self.pools = {} - for p in connection.ports: - if p < 0 or p >= of.OFPP_MAX: continue - self.pools[p] = [IPAddr("10.%s.%s.%s" % (self._id,p,n)) - for n in range(1,255)] - - self.lease_time = 60 * 60 # An hour - #TODO: Actually make them expire :) - - self.offers = {} # Eth -> IP we offered - self.leases = {} # Eth -> IP we leased - - - def _get_pool (self, event): - pool = self.pools.get(event.port) - if pool is None: - log.warn("No IP pool for port %s", event.port) - return pool - - - def _handle_ConnectionDown (self, event): - self.disconnect() - - - def _mac_learn (self, mac, ip): - if ip.inNetwork(self.network,"255.255.0.0"): - if self.ip_to_mac.get(ip) != mac: - self.ip_to_mac[ip] = mac - self._send_rewrite_rule(ip, mac) - return True - return False - - - def _on_lease (self, event): - if self._mac_learn(event.host_mac, event.ip): - self.log.debug("Learn %s -> %s by DHCP Lease",event.ip,event.host_mac) - - - def _handle_PacketIn (self, event): - packet = event.parsed - arpp = packet.find('arp') - if arpp is not None: - if event.port != ipinfo(arpp.protosrc)[1]: - self.log.warn("%s has incorrect IP %s", arpp.hwsrc, arpp.protosrc) - return - - if self._mac_learn(packet.src, arpp.protosrc): - self.log.debug("Learn %s -> %s by ARP",arpp.protosrc,packet.src) - else: - ipp = packet.find('ipv4') - if ipp is not None: - # Should be destined for this switch with unknown MAC - # Send an ARP - sw,p,_= ipinfo(ipp.dstip) - if sw is self: - log.debug("Need MAC for %s", ipp.dstip) - core.ARPHelper.send_arp_request(event.connection,ipp.dstip,port=p) - - return super(TopoSwitch,self)._handle_PacketIn(event) - - -class topo_addressing (object): - def __init__ (self): - core.listen_to_dependencies(self, listen_args={'openflow':{'priority':0}}) - - def _handle_ARPHelper_ARPRequest (self, event): - pass # Just here to make sure we load it - - def _handle_openflow_discovery_LinkEvent (self, event): - def flip (link): - return Discovery.Link(link[2],link[3], link[0],link[1]) - - l = event.link - sw1 = switches_by_dpid[l.dpid1] - sw2 = switches_by_dpid[l.dpid2] - - # Invalidate all flows and path info. - # For link adds, this makes sure that if a new link leads to an - # improved path, we use it. - # For link removals, this makes sure that we don't use a - # path that may have been broken. - #NOTE: This could be radically improved! (e.g., not *ALL* paths break) - clear = of.ofp_flow_mod(command=of.OFPFC_DELETE) - for sw in switches_by_dpid.values(): - if sw.connection is None: continue - sw.connection.send(clear) - path_map.clear() - - if event.removed: - # This link no longer okay - if sw2 in adjacency[sw1]: del adjacency[sw1][sw2] - if sw1 in adjacency[sw2]: del adjacency[sw2][sw1] - - # But maybe there's another way to connect these... - for ll in core.openflow_discovery.adjacency: - if ll.dpid1 == l.dpid1 and ll.dpid2 == l.dpid2: - if flip(ll) in core.openflow_discovery.adjacency: - # Yup, link goes both ways - adjacency[sw1][sw2] = ll.port1 - adjacency[sw2][sw1] = ll.port2 - # Fixed -- new link chosen to connect these - break - else: - # If we already consider these nodes connected, we can - # ignore this link up. - # Otherwise, we might be interested... - if adjacency[sw1][sw2] is None: - # These previously weren't connected. If the link - # exists in both directions, we consider them connected now. - if flip(l) in core.openflow_discovery.adjacency: - # Yup, link goes both ways -- connected! - adjacency[sw1][sw2] = l.port1 - adjacency[sw2][sw1] = l.port2 - - for sw in switches_by_dpid.values(): - sw.send_table() - - - def _handle_openflow_ConnectionUp (self, event): - sw = switches_by_dpid.get(event.dpid) - - if sw is None: - # New switch - - sw = TopoSwitch() - switches_by_dpid[event.dpid] = sw - sw.connect(event.connection) - else: - sw.connect(event.connection) - - - -def launch (debug = False): - core.registerNew(topo_addressing) - from proto.arp_helper import launch - launch(eat_packets=False) - if not debug: - core.getLogger("proto.arp_helper").setLevel(99) diff --git a/pox/help.py b/pox/help.py @@ -1,161 +0,0 @@ -# Copyright 2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Attempts to give help on other components -""" - - -import pox.boot as boot -import inspect -import sys - -def _show_args (f,name): - #TODO: Refactor with pox.boot - - if name == "launch": name = "default launcher" - - out = [] - - EMPTY = "<Unspecified>" - - argnames,varargs,kws,defaults = inspect.getargspec(f) - argcount = len(argnames) - defaults = list((f.__defaults__) or []) - defaults = [EMPTY] * (argcount - len(defaults)) + defaults - - args = {} - for n, a in enumerate(argnames): - args[a] = [EMPTY,EMPTY] - if n < len(defaults): - args[a][0] = defaults[n] - multi = False - if '__INSTANCE__' in args: - multi = True - del args['__INSTANCE__'] - - if len(args) == 0: - if argcount or kws: - out.append(" Multiple.") - varargs = kws = None - else: - out.append(" None.") - else: - out.append(" {0:25} {1:25}".format("Name", "Default")) - out.append(" {0:25} {0:25}".format("-" * 15)) - - for k,v in args.items(): - k = k.replace("_","-") - out.append(" {0:25} {1:25}".format(k,str(v[0]))) - - if len(out): - out.insert(0, "Parameters for {0}:".format(name)) - out.append("") - - if multi: - out.append(" Note: This can be invoked multiple times.") - if varargs or kws: - out.append(" Note: This can be invoked with parameters not listed here.") - - out = '\n'.join(out) - - return out.strip() - - -def launch (no_args = False, short = False, **kw): - """ - Shows help - - Usage: help <args> --component_name - help <args> --component_name=launcher - - Args are: - --short Only summarize docs - --no-args Don't show parameter info - """ - - if len(kw) == 0: - d = boot._help_text - if short: d = d.split("\n")[0] - print(d) - sys.exit(0) - - if len(kw) != 1: - if len(kw) > 1: - print() - print("Didn't understand what you wanted. " - "Showing help for help instead.") - kw = {'help':True} - - component = list(kw.keys())[0] - launcher = list(kw.values())[0] - - if component == 'help': - # Special case! - name = "pox.help" - else: - name = boot._do_import(component) - - if name is False: - print("No such component:",component) - sys.exit(1) - - mod = sys.modules[name] - - if launcher is not True and launcher not in mod.__dict__: - print("No launch function named %s for %s" % (launcher, component)) - sys.exit(1) - - doc = inspect.getdoc(mod) or '' - if short: doc = doc.split("\n")[0] - - if not doc: - # Make sure we try to show SOMETHING... - no_args = False - - launcher_doc = "" - - multi = '' - args = '' - - if launcher is True and 'launch' in mod.__dict__: - launcher = 'launch' - if not no_args and launcher in mod.__dict__: - f = mod.__dict__[launcher] - if type(f) is not type(launch): - # This isn't quite right if they didn't specify a launcher - print(launch, "in", name, "isn't a function") - - launcher_doc = inspect.getdoc(f) or '' - if short: launcher_doc = launcher_doc.split("\n")[0] - - if len(launcher_doc): - launcher_doc = ' ' + launcher_doc.replace('\n', '\n ').strip() - if launcher == 'launch': - launcher_doc = "Default launcher:\n" + launcher_doc - else: - launcher_doc = launcher + " launcher:\n" + launcher_doc - - args = _show_args(f,launcher) - - alldoc = [doc,launcher_doc,args,multi] - - alldoc = [x.strip() + "\n\n" for x in alldoc if len(x)] - - alldoc = ''.join(alldoc).strip() or 'No documentation available' - - print() - print(alldoc) - - sys.exit(0) diff --git a/pox/host_tracker/__init__.py b/pox/host_tracker/__init__.py @@ -1,40 +0,0 @@ -# Copyright 2011 Dorgival Guedes -# Copyright 2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Tracks host location and configuration - -See host_tracker.host_tracker for more info. -""" - -from pox.core import core -from . import host_tracker -log = core.getLogger() -import logging -log.setLevel(logging.INFO) -from pox.lib.addresses import EthAddr - -def launch (src_mac = None, no_flow = False, **kw): - for k, v in kw.items(): - if k in host_tracker.timeoutSec: - host_tracker.timeoutSec[k] = int(v) - log.debug("Changing timer parameter: %s = %s",k,v) - elif k == 'pingLim': - host_tracker.PingCtrl.pingLim = int(v) - log.debug("Changing ping limit to %s",v) - else: - log.error("Unknown option: %s(=%s)",k,v) - core.registerNew(host_tracker.host_tracker, ping_src_mac = src_mac, - install_flow = not no_flow) diff --git a/pox/host_tracker/host_tracker.py b/pox/host_tracker/host_tracker.py @@ -1,416 +0,0 @@ -# Copyright 2011 Dorgival Guedes -# Copyright 2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Tracks host location and configuration - -Keep track of hosts in the network, where they are and how they are -configured (at least MAC/IP addresses). - -For the time being, it keeps tables with the information; later, it should -transfer that information to Topology and handle just the actual -discovery/update of host information. - -Timer configuration can be changed when needed (e.g., for debugging) using -the launch facility (check timeoutSec dict and PingCtrl.pingLim). - -You can set various timeouts from the commandline. Names and defaults: - arpAware=60*2 Quiet ARP-responding entries are pinged after this - arpSilent=60*20 This is for uiet entries not known to answer ARP - arpReply=4 Time to wait for an ARP reply before retrial - timerInterval=5 Seconds between timer routine activations - entryMove=60 Minimum expected time to move a physical entry - -Good values for testing: - --arpAware=15 --arpSilent=45 --arpReply=1 --entryMove=4 - -You can also specify how many ARP pings we try before deciding it failed: - --pingLim=2 -""" - -from pox.core import core - -from pox.lib.addresses import EthAddr -from pox.lib.packet.ethernet import ethernet -from pox.lib.packet.ipv4 import ipv4 -from pox.lib.packet.arp import arp - -from pox.lib.recoco import Timer -from pox.lib.revent import Event, EventHalt - -import pox.openflow.libopenflow_01 as of - -import pox.openflow.discovery as discovery - -from pox.lib.revent.revent import * - -import time - -import pox -log = core.getLogger() - -# Times (in seconds) to use for differente timouts: -timeoutSec = dict( - arpAware=60*2, # Quiet ARP-responding entries are pinged after this - arpSilent=60*20, # This is for uiet entries not known to answer ARP - arpReply=4, # Time to wait for an ARP reply before retrial - timerInterval=5, # Seconds between timer routine activations - entryMove=60 # Minimum expected time to move a physical entry - ) - -# Address to send ARP pings from. -# The particular one here is just an arbitrary locally administered address. -DEFAULT_ARP_PING_SRC_MAC = '02:00:00:00:be:ef' - - -class HostEvent (Event): - """ - Event when hosts join, leave, or move within the network - """ - def __init__ (self, entry, new_dpid = None, new_port = None, join = False, - leave = False, move = False): - super(HostEvent,self).__init__() - self.entry = entry - self.join = join - self.leave = leave - self.move = move - - assert sum(1 for x in [join,leave,move] if x) == 1 - - # You can alter these and they'll change where we think it goes... - self._new_dpid = new_dpid - self._new_port = new_port - - #TODO: Allow us to cancel add/removes - - @property - def new_dpid (self): - """ - New DPID for move events" - """ - assert self.move - return self._new_dpid - - @property - def new_port (self): - """ - New port for move events" - """ - assert self.move - return self._new_port - - -class Alive (object): - """ - Holds liveliness information for MAC and IP entries - """ - def __init__ (self, livelinessInterval=timeoutSec['arpAware']): - self.lastTimeSeen = time.time() - self.interval=livelinessInterval - - def expired (self): - return time.time() > self.lastTimeSeen + self.interval - - def refresh (self): - self.lastTimeSeen = time.time() - - -class PingCtrl (Alive): - """ - Holds information for handling ARP pings for hosts - """ - # Number of ARP ping attemps before deciding it failed - pingLim=3 - - def __init__ (self): - super(PingCtrl,self).__init__(timeoutSec['arpReply']) - self.pending = 0 - - def sent (self): - self.refresh() - self.pending += 1 - - def failed (self): - return self.pending > PingCtrl.pingLim - - def received (self): - # Clear any pending timeouts related to ARP pings - self.pending = 0 - - -class IpEntry (Alive): - """ - This entry keeps track of IP addresses seen from each MAC entry and will - be kept in the macEntry object's ipAddrs dictionary. At least for now, - there is no need to refer to the original macEntry as the code is organized. - """ - def __init__ (self, hasARP): - if hasARP: - super(IpEntry,self).__init__(timeoutSec['arpAware']) - else: - super(IpEntry,self).__init__(timeoutSec['arpSilent']) - self.hasARP = hasARP - self.pings = PingCtrl() - - def setHasARP (self): - if not self.hasARP: - self.hasARP = True - self.interval = timeoutSec['arpAware'] - - -class MacEntry (Alive): - """ - Not strictly an ARP entry. - When it gets moved to Topology, may include other host info, like - services, and it may replace dpid by a general switch object reference - We use the port to determine which port to forward traffic out of. - """ - def __init__ (self, dpid, port, macaddr): - super(MacEntry,self).__init__() - self.dpid = dpid - self.port = port - self.macaddr = macaddr - self.ipAddrs = {} - - def __str__(self): - return ' '.join([str(self.dpid), str(self.port), str(self.macaddr)]) - - def __eq__ (self, other): - if other is None: - return False - elif type(other) == tuple: - return (self.dpid,self.port,self.macaddr)==other - - if self.dpid != other.dpid: return False - if self.port != other.port: return False - if self.macaddr != other.macaddr: return False - if self.dpid != other.dpid: return False - # What about ipAddrs?? - return True - - def __ne__ (self, other): - return not self.__eq__(other) - - -class host_tracker (EventMixin): - """ - Host tracking component - """ - _eventMixin_events = set([HostEvent]) - - def __init__ (self, ping_src_mac = None, install_flow = True, - eat_packets = True): - - if ping_src_mac is None: - ping_src_mac = DEFAULT_ARP_PING_SRC_MAC - - self.ping_src_mac = EthAddr(ping_src_mac) - self.install_flow = install_flow - self.eat_packets = eat_packets - - # The following tables should go to Topology later - self.entryByMAC = {} - self._t = Timer(timeoutSec['timerInterval'], - self._check_timeouts, recurring=True) - - # Listen to openflow with high priority if we want to eat our ARP replies - listen_args = {} - if eat_packets: - listen_args={'openflow':{'priority':0}} - core.listen_to_dependencies(self, listen_args=listen_args) - - def _all_dependencies_met (self): - log.info("host_tracker ready") - - # The following two functions should go to Topology also - def getMacEntry (self, macaddr): - try: - result = self.entryByMAC[macaddr] - except KeyError as e: - result = None - return result - - def sendPing (self, macEntry, ipAddr): - """ - Builds an ETH/IP any-to-any ARP packet (an "ARP ping") - """ - r = arp() - r.opcode = arp.REQUEST - r.hwdst = macEntry.macaddr - r.hwsrc = self.ping_src_mac - r.protodst = ipAddr - # src is IP_ANY - e = ethernet(type=ethernet.ARP_TYPE, src=r.hwsrc, dst=r.hwdst) - e.payload = r - log.debug("%i %i sending ARP REQ to %s %s", - macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst)) - msg = of.ofp_packet_out(data = e.pack(), - action = of.ofp_action_output(port=macEntry.port)) - if core.openflow.sendToDPID(macEntry.dpid, msg.pack()): - ipEntry = macEntry.ipAddrs[ipAddr] - ipEntry.pings.sent() - else: - # macEntry is stale, remove it. - log.debug("%i %i ERROR sending ARP REQ to %s %s", - macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst)) - del macEntry.ipAddrs[ipAddr] - return - - def getSrcIPandARP (self, packet): - """ - Gets source IPv4 address for packets that have one (IPv4 and ARP) - - Returns (ip_address, has_arp). If no IP, returns (None, False). - """ - if isinstance(packet, ipv4): - log.debug("IP %s => %s",str(packet.srcip),str(packet.dstip)) - return ( packet.srcip, False ) - elif isinstance(packet, arp): - log.debug("ARP %s %s => %s", - {arp.REQUEST:"request",arp.REPLY:"reply"}.get(packet.opcode, - 'op:%i' % (packet.opcode,)), - str(packet.protosrc), str(packet.protodst)) - if (packet.hwtype == arp.HW_TYPE_ETHERNET and - packet.prototype == arp.PROTO_TYPE_IP and - packet.protosrc != 0): - return ( packet.protosrc, True ) - - return ( None, False ) - - def updateIPInfo (self, pckt_srcip, macEntry, hasARP): - """ - Update given MacEntry - - If there is IP info in the incoming packet, update the macEntry - accordingly. In the past we assumed a 1:1 mapping between MAC and IP - addresses, but removed that restriction later to accomodate cases - like virtual interfaces (1:n) and distributed packet rewriting (n:1) - """ - if pckt_srcip in macEntry.ipAddrs: - # that entry already has that IP - ipEntry = macEntry.ipAddrs[pckt_srcip] - ipEntry.refresh() - log.debug("%s already has IP %s, refreshing", - str(macEntry), str(pckt_srcip) ) - else: - # new mapping - ipEntry = IpEntry(hasARP) - macEntry.ipAddrs[pckt_srcip] = ipEntry - log.info("Learned %s got IP %s", str(macEntry), str(pckt_srcip) ) - if hasARP: - ipEntry.pings.received() - - def _handle_openflow_ConnectionUp (self, event): - if not self.install_flow: return - - log.debug("Installing flow for ARP ping responses") - - m = of.ofp_flow_mod() - m.priority += 1 # Higher than normal - m.match.dl_type = ethernet.ARP_TYPE - m.match.dl_dst = self.ping_src_mac - - m.actions.append(of.ofp_action_output(port=of.OFPP_CONTROLLER)) - event.connection.send(m) - - def _handle_openflow_PacketIn (self, event): - """ - Populate MAC and IP tables based on incoming packets. - - Handles only packets from ports identified as not switch-only. - If a MAC was not seen before, insert it in the MAC table; - otherwise, update table and enry. - If packet has a source IP, update that info for the macEntry (may require - removing the info from antoher entry previously with that IP address). - It does not forward any packets, just extract info from them. - """ - dpid = event.connection.dpid - inport = event.port - packet = event.parsed - if not packet.parsed: - log.warning("%i %i ignoring unparsed packet", dpid, inport) - return - - if packet.type == ethernet.LLDP_TYPE: # Ignore LLDP packets - return - # This should use Topology later - if not core.openflow_discovery.is_edge_port(dpid, inport): - # No host should be right behind a switch-only port - log.debug("%i %i ignoring packetIn at switch-only port", dpid, inport) - return - - log.debug("PacketIn: %i %i ETH %s => %s", - dpid, inport, str(packet.src), str(packet.dst)) - - # Learn or update dpid/port/MAC info - macEntry = self.getMacEntry(packet.src) - if macEntry is None: - # there is no known host by that MAC - # should we raise a NewHostFound event (at the end)? - macEntry = MacEntry(dpid,inport,packet.src) - self.entryByMAC[packet.src] = macEntry - log.info("Learned %s", str(macEntry)) - self.raiseEventNoErrors(HostEvent, macEntry, join=True) - elif macEntry != (dpid, inport, packet.src): - # there is already an entry of host with that MAC, but host has moved - # should we raise a HostMoved event (at the end)? - log.info("Learned %s moved to %i %i", str(macEntry), dpid, inport) - # if there has not been long since heard from it... - if time.time() - macEntry.lastTimeSeen < timeoutSec['entryMove']: - log.warning("Possible duplicate: %s at time %i, now (%i %i), time %i", - str(macEntry), macEntry.lastTimeSeen, - dpid, inport, time.time()) - # should we create a whole new entry, or keep the previous host info? - # for now, we keep it: IP info, answers pings, etc. - e = HostEvent(macEntry, move=True, new_dpid = dpid, new_port = inport) - self.raiseEventNoErrors(e) - macEntry.dpid = e._new_dpid - macEntry.inport = e._new_port - - macEntry.refresh() - - (pckt_srcip, hasARP) = self.getSrcIPandARP(packet.__next__) - if pckt_srcip is not None: - self.updateIPInfo(pckt_srcip,macEntry,hasARP) - - if self.eat_packets and packet.dst == self.ping_src_mac: - return EventHalt - - def _check_timeouts (self): - """ - Checks for timed out entries - """ - for macEntry in list(self.entryByMAC.values()): - entryPinged = False - for ip_addr, ipEntry in list(macEntry.ipAddrs.items()): - if ipEntry.expired(): - if ipEntry.pings.failed(): - del macEntry.ipAddrs[ip_addr] - log.info("Entry %s: IP address %s expired", - str(macEntry), str(ip_addr) ) - else: - self.sendPing(macEntry,ip_addr) - ipEntry.pings.sent() - entryPinged = True - if macEntry.expired() and not entryPinged: - log.info("Entry %s expired", str(macEntry)) - # sanity check: there should be no IP addresses left - if len(macEntry.ipAddrs) > 0: - for ip in list(macEntry.ipAddrs.keys()): - log.warning("Entry %s expired but still had IP address %s", - str(macEntry), str(ip_addr) ) - del macEntry.ipAddrs[ip_addr] - self.raiseEventNoErrors(HostEvent, macEntry, leave=True) - del self.entryByMAC[macEntry.macaddr] diff --git a/pox/info/debug_deadlock.py b/pox/info/debug_deadlock.py @@ -1,55 +0,0 @@ -# Copyright 2012 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Primitive help for debugging deadlocks. -Prints stack info for all threads. -(Might be more useful if it only printed stack frames that -were not changing, sort of like recoco_spy.) - -This was initially factored out from a pox.py modification by -Colin or Andi. -""" - -import sys -import time -import inspect -import traceback -import threading -from pox.core import core - -def _trace_thread_proc (): - try: - while core.running: - frames = sys._current_frames() - for key in frames: - frame = frames[key] - print(inspect.getframeinfo(frame)) - outer_frames = inspect.getouterframes(frame) - for i in range(0, len(outer_frames)): - print(" " + str(inspect.getframeinfo(outer_frames[i][0]))) - - time.sleep(5) - except: - traceback.print_exc() - - -def launch (): - - _trace_thread = threading.Thread(target=_trace_thread_proc) - _trace_thread.daemon = True - - # Start it up a bit in the future so that it doesn't print all over - # init messages. - core.callDelayed(3, _trace_thread.start) diff --git a/pox/info/packet_dump.py b/pox/info/packet_dump.py @@ -1,108 +0,0 @@ -# Copyright 2012 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A simple component that dumps packet_in info to the log. - -Use --verbose for really verbose dumps. -Use --show to show all packets. -""" - -from pox.core import core -import pox.openflow.libopenflow_01 as of -import pox.lib.packet as pkt -from pox.lib.util import dpidToStr - -log = core.getLogger() - -_verbose = None -_max_length = None -_types = None -_show_by_default = None - -def _handle_PacketIn (event): - packet = event.parsed - - show = _show_by_default - p = packet - while p: - if p.__class__.__name__.lower() in _types: - if _show_by_default: - # This packet is hidden - return - else: - # This packet should be shown - show = True - break - return - if not hasattr(p, 'next'): break - p = p.__next__ - - if not show: return - - msg = dpidToStr(event.dpid) + ": " - msg = "" - if _verbose: - msg += packet.dump() - else: - p = packet - while p: - if isinstance(p, str): - msg += "[%s bytes]" % (len(p),) - break - msg += "[%s]" % (p.__class__.__name__,) - p = p.__next__ - - if _max_length: - if len(msg) > _max_length: - msg = msg[:_max_length-3] - msg += "..." - core.getLogger("dump:" + dpidToStr(event.dpid)).debug(msg) - - -def launch (verbose = False, max_length = 110, full_packets = True, - hide = False, show = False): - global _verbose, _max_length, _types, _show_by_default - _verbose = verbose - _max_length = max_length - force_show = (show is True) or (hide is False and show is False) - if isinstance(hide, str): - hide = hide.replace(',', ' ').replace('|', ' ') - hide = set([p.lower() for p in hide.split()]) - else: - hide = set() - if isinstance(show, str): - show = show.replace(',', ' ').replace('|', ' ') - show = set([p.lower() for p in show.split()]) - else: - show = set() - - if hide and show: - raise RuntimeError("Can't both show and hide packet types") - - if show: - _types = show - else: - _types = hide - _show_by_default = not not hide - if force_show: - _show_by_default = force_show - - if full_packets: - # Send full packets to controller - core.openflow.miss_send_len = 0xffff - - core.openflow.addListenerByName("PacketIn", _handle_PacketIn) - - log.info("Packet dumper running") diff --git a/pox/info/recoco_spy.py b/pox/info/recoco_spy.py @@ -1,108 +0,0 @@ -# Copyright 2012 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This is an extremely primitive start at some debugging. -At the moment, it is really just for recoco (maybe it belongs in there?). -""" - -from pox.core import core -log = core.getLogger() -import time -import traceback -import pox.lib.recoco - -_frames = [] - -def _tf (frame, event, arg): - if _frames is None: return _tf - #print " " * len(_frames) + event - if event == 'call': - _frames.append(frame) - return _tf - elif event == 'line': - return _tf - elif event == 'exception': - #_frames.pop() - return _tf - elif event == 'return': - _frames.pop() - elif event == 'c_call': - print("c_call") - _frames.append((frame,arg)) - elif event == 'c_exception': - _frames.pop() - elif event == 'c_return': - _frames.pop() - - -def _trace_thread_proc (): - last = None - last_time = None - warned = None - while True: - try: - time.sleep(1) - c = len(_frames) - if c == 0: continue - f = _frames[-1] - stopAt = None - count = 0 - sf = f - while sf is not None: - if sf.f_code == pox.lib.recoco.Scheduler.cycle.__func__.__code__: - stopAt = sf - break - count += 1 - sf = sf.f_back - #if stopAt == None: continue - - f = "\n".join([s.strip() for s in - traceback.format_stack(f,count)]) - #f = " / ".join([s.strip() for s in - # traceback.format_stack(f,1)[0].strip().split("\n")]) - #f = "\n".join([s.strip() for s in - # traceback.format_stack(f)]) - - if f != last: - if warned: - log.warning("Running again") - warned = None - last = f - last_time = time.time() - elif f != warned: - if time.time() - last_time > 3: - if stopAt is not None: - warned = f - log.warning("Stuck at:\n" + f) - - #from pox.core import core - #core.f = f - - except: - traceback.print_exc() - pass - - - -def launch (): - def f (): - import sys - sys.settrace(_tf) - core.callLater(f) - - import threading - _trace_thread = threading.Thread(target=_trace_thread_proc) - _trace_thread.daemon = True - _trace_thread.start() diff --git a/pox/info/switch_info.py b/pox/info/switch_info.py @@ -1,85 +0,0 @@ -# Copyright 2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Dumps info about switches when they first connect -""" - -from pox.core import core -import pox.openflow.libopenflow_01 as of -from pox.lib.util import dpid_to_str - -log = core.getLogger() - -# Formatted switch descriptions we've logged -# (We rememeber them so that we only print them once) -_switches = set() - -# .. unless always is True in which case we always print them -_always = False - -def _format_entry (desc): - def fmt (v): - if not v: return "<Empty>" - return str(v) - dpid = dpid_to_str(desc.connection.dpid) - ofp = desc.ofp.body - s = [] - ports = [(p.port_no,p.name) for p in list(desc.connection.ports.values())] - ports.sort() - ports = " ".join(p[1] for p in ports) - #if len(ports) > len(dpid)+12: - # ports = "%s ports" % (len(desc.connection.ports),) - - s.append("New Switch: " + dpid) - s.append("Hardware: " + fmt(ofp.hw_desc)) - s.append("Software: " + fmt(ofp.sw_desc)) - s.append("SerialNum: " + fmt(ofp.serial_num)) - s.append("Desc: " + fmt(ofp.dp_desc)) - s.append("Ports: " + fmt(ports)) - - # Let's get fancy - width = max(len(line) for line in s) - s.insert(0, "=" * width) - s.insert(2, "-" * width) - s.append( "=" * width) - - return "\n".join(s) - -def _handle_ConnectionUp (event): - msg = of.ofp_stats_request(body=of.ofp_desc_stats_request()) - msg.type = 0 # For betta bug, can be removed - event.connection.send(msg) - -def _handle_SwitchDescReceived (event): - s = _format_entry(event) - if not _always and s in _switches: - # We've already logged it. - return - _switches.add(s) - ss = s.split("\n") - - logger = core.getLogger("info." + dpid_to_str(event.connection.dpid)) - for s in ss: - logger.info(s) - - -def launch (always = False): - global _always - _always = always - - core.openflow.addListenerByName("ConnectionUp", - _handle_ConnectionUp) - core.openflow.addListenerByName("SwitchDescReceived", - _handle_SwitchDescReceived) diff --git a/pox/lib/addresses.py b/pox/lib/addresses.py @@ -1,845 +0,0 @@ -# Copyright 2011,2012,2013,2014 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Classes and utilities for addresses of various types. -""" - - -import struct -import socket - -# Slightly tested attempt at Python 3 friendliness -import sys -if 'long' not in sys.modules['builtins'].__dict__: - long = int - - - -_eth_oui_to_name = {} # OUI (3 bytes) -> name - -def _load_oui_names (): - """ - Load OUI names from textfile - - Assumes the textfile is adjacent to this source file. - """ - import inspect - import os.path - filename = os.path.join(os.path.dirname(inspect.stack()[0][1]), 'oui.txt') - f = None - try: - f = open(filename, encoding='latin-1') - for line in f.readlines(): - if len(line) < 1: - continue - if line[0].isspace(): - continue - split = line.split(' ') - if not '-' in split[0]: - continue - # grab 3-byte OUI - oui = b''.join(int(x,16).to_bytes(1, 'little') for x in split[0].split('-')) - # strip off (hex) identifer and keep rest of name - end = ' '.join(split[1:]).strip() - end = end.split('\t') - end.remove('(hex)') - oui_name = ' '.join(end) - _eth_oui_to_name[oui] = oui_name.strip() - except: - raise - import logging - logging.getLogger().warn("Could not load OUI list") - if f: f.close() -_load_oui_names() - - - -class EthAddr (object): - """ - An Ethernet (MAC) address type. - - Internal storage is six raw bytes. - """ - def __init__ (self, addr): - """ - Constructor - - Understands Ethernet address is various forms. Hex strings, raw byte - strings, etc. - """ - if isinstance(addr, bytes) or isinstance(addr, str): - if len(addr) == 6: - # raw - pass - elif len(addr) == 17 or len(addr) == 12 or addr.count(':') == 5: - # hex - if len(addr) == 17: - if addr[2::3] != ':::::' and addr[2::3] != '-----': - raise RuntimeError("Bad format for ethernet address") - # Address of form xx:xx:xx:xx:xx:xx - # Pick out the hex digits only - addr = ''.join((addr[x*3:x*3+2] for x in range(0,6))) - elif len(addr) == 12: - pass - else: - # Assume it's hex digits but they may not all be in two-digit - # groupings (e.g., xx:x:x:xx:x:x). This actually comes up. - addr = ''.join(["%02x" % (int(x,16),) for x in addr.split(":")]) - # We should now have 12 hex digits (xxxxxxxxxxxx). - # Convert to 6 raw bytes. - addr = b''.join((int(addr[x*2:x*2+2], 16).to_bytes(1, "little") for x in range(0,6))) - else: - raise RuntimeError("Expected ethernet address string to be 6 raw " - "bytes or some hex") - self._value = addr - elif isinstance(addr, EthAddr): - self._value = addr.toRaw() - elif isinstance(addr, (list,tuple,bytearray)): - self._value = b''.join( (chr(x) for x in addr) ) - elif (hasattr(addr, '__len__') and len(addr) == 6 - and hasattr(addr, '__iter__')): - # Pretty much same as above case, but for sequences we don't know. - self._value = b''.join( (chr(x) for x in addr) ) - elif addr is None: - self._value = b'\x00' * 6 - else: - raise RuntimeError("Expected ethernet address to be a string of 6 raw " - "bytes or some hex") - - def isBridgeFiltered (self): - """ - Checks if address is an IEEE 802.1D MAC Bridge Filtered MAC Group Address - - This range is 01-80-C2-00-00-00 to 01-80-C2-00-00-0F. MAC frames that - have a destination MAC address within this range are not relayed by - bridges conforming to IEEE 802.1D - """ - return ((ord(self._value[0]) == 0x01) - and (ord(self._value[1]) == 0x80) - and (ord(self._value[2]) == 0xC2) - and (ord(self._value[3]) == 0x00) - and (ord(self._value[4]) == 0x00) - and (ord(self._value[5]) <= 0x0F)) - - @property - def is_bridge_filtered (self): - return self.isBridgeFiltered() - - def isGlobal (self): - """ - Returns True if this is a globally unique (OUI enforced) address. - """ - return not self.isLocal() - - def isLocal (self): - """ - Returns True if this is a locally-administered (non-global) address. - """ - return True if (ord(self._value[0]) & 2) else False - - @property - def is_local (self): - return self.isLocal() - - @property - def is_global (self): - return self.isGlobal() - - def isMulticast (self): - """ - Returns True if this is a multicast address. - """ - return True if (ord(self._value[0]) & 1) else False - - @property - def is_multicast (self): - return self.isMulticast() - - def toRaw (self): - return self.raw - - @property - def raw (self): - """ - Returns the address as a 6-long bytes object. - """ - return self._value - - def toTuple (self): - return self.to_tuple() - - def to_tuple (self): - """ - Returns a 6-entry long tuple where each entry is the numeric value - of the corresponding byte of the address. - """ - return tuple((ord(x) for x in self._value)) - - def toStr (self, separator = ':', resolveNames = False): - return self.to_str(separator, resolveNames) - - def to_str (self, separator = ':', resolve_names = False): - """ - Returns string representation of address - - Usually this is six two-digit hex numbers separated by colons. - If resolve_names is True, it the first three bytes may be replaced by a - string corresponding to the OUI. - """ - if resolve_names and self.is_global: - # Don't even bother for local (though it should never match and OUI!) - name = _eth_oui_to_name.get(self._value[:3]) - if name: - rest = separator.join('%02x' % (ord(x),) for x in self._value[3:]) - return name + separator + rest - - return separator.join(('%02x' % (ord(x),) for x in self._value)) - - def __str__ (self): - return self.toStr() - - def __cmp__ (self, other): - #TODO: Revisit this and other __cmp__ in Python 3.4 - try: - if type(other) == EthAddr: - other = other._value - elif type(other) == bytes: - pass - else: - other = EthAddr(other)._value - return cmp(self._value, other) - except: - return -cmp(other, self) - - def __hash__ (self): - return self._value.__hash__() - - def __repr__ (self): - return type(self).__name__ + "('" + self.to_str() + "')" - - def __len__ (self): - return 6 - - def __setattr__ (self, a, v): - if hasattr(self, '_value'): - raise TypeError("This object is immutable") - object.__setattr__(self, a, v) - - - -class IPAddr (object): - """ - Represents an IPv4 address. - - Internal storage is a signed int in network byte order. - """ - def __init__ (self, addr, networkOrder = False): - """ - Initialize using several possible formats - - If addr is an int/long, then it is assumed to be in host byte order - unless networkOrder = True - - We only handle dotted-quad textual representations. That is, three dots - and four numbers. Oddball representations ("10.1") maybe not so much. - """ - - # Always stores as a signed network-order int - if isinstance(addr, (str, bytes, bytearray)): - if len(addr) != 4: - # dotted quad - self._value = struct.unpack('i', socket.inet_aton(addr))[0] - else: - self._value = struct.unpack('i', addr)[0] - elif isinstance(addr, IPAddr): - self._value = addr._value - elif isinstance(addr, int) or isinstance(addr, int): - addr = addr & 0xffFFffFF # unsigned long - self._value = struct.unpack("!i", - struct.pack(('!' if networkOrder else '') + "I", addr))[0] - else: - raise RuntimeError("Unexpected IP address format") - - def toSignedN (self): - """ A shortcut """ - return self.toSigned(networkOrder = True) - - def toUnsignedN (self): - """ A shortcut """ - return self.toUnsigned(networkOrder = True) - - def toSigned (self, networkOrder = False): - """ Return the address as a signed int """ - if networkOrder: - return self._value - v = socket.htonl(self._value & 0xffFFffFF) - return struct.unpack("i", struct.pack("I", v))[0] - - def toRaw (self): - return self.raw - - @property - def raw (self): - """ - Returns the address as a four-character byte string. - """ - return struct.pack("i", self._value) - - def toUnsigned (self, networkOrder = False): - """ - Returns the address as an integer in either network or host (the - default) byte order. - """ - if not networkOrder: - return socket.htonl(self._value & 0xffFFffFF) - return self._value & 0xffFFffFF - - def toStr (self): - """ Return dotted quad representation """ - return socket.inet_ntoa(self.toRaw()) - - def in_network (self, *args, **kw): - return self.inNetwork(*args, **kw) - - def inNetwork (self, network, netmask = None): - """ - Returns True if this network is in the specified network. - network is a dotted quad (with or without a CIDR or normal style - netmask, which can also be specified separately via the netmask - parameter), or it can be a tuple of (address,network-bits) like that - returned by parse_cidr(). - """ - if type(network) is not tuple: - if netmask is not None: - network = str(network) - network += "/" + str(netmask) - n,b = parse_cidr(network) - else: - n,b = network - if type(n) is not IPAddr: - n = IPAddr(n) - - return (self.toUnsigned() & ~((1 << (32-b))-1)) == n.toUnsigned() - - @property - def is_multicast (self): - return ((self.toSigned(networkOrder = False) >> 24) & 0xe0) == 0xe0 - - @property - def multicast_ethernet_address (self): - """ - Returns corresponding multicast EthAddr - - Assumes this is, in fact, a multicast IP address! - """ - if not self.is_multicast: - raise RuntimeError("No multicast EthAddr for non-multicast IPAddr!") - n = self.toUnsigned(networkOrder = False) & 0x7fffff - return EthAddr("01005e" + ("%06x" % (n))) - - def __str__ (self): - return self.toStr() - - def __cmp__ (self, other): - if other is None: return 1 - try: - if not isinstance(other, IPAddr): - other = IPAddr(other) - return cmp(self.toUnsigned(), other.toUnsigned()) - except: - return -other.__cmp__(self) - - def __hash__ (self): - return self._value.__hash__() - - def __repr__ (self): - return self.__class__.__name__ + "('" + self.toStr() + "')" - - def __len__ (self): - return 4 - - def __setattr__ (self, a, v): - if hasattr(self, '_value'): - raise TypeError("This object is immutable") - object.__setattr__(self, a, v) - - -IP_ANY = IPAddr("0.0.0.0") -IP_BROADCAST = IPAddr("255.255.255.255") - - - -class IPAddr6 (object): - """ - Represents an IPv6 address. - - Internally stored as 16 raw bytes. - """ - @classmethod - def from_raw (cls, raw): - """ - Factory that creates an IPAddr6 from six raw bytes - """ - return cls(raw, raw=True) - - @classmethod - def from_num (cls, num): - """ - Factory that creates an IPAddr6 from a large integer - """ - o = b'' - for i in range(16): - o = chr(num & 0xff) + o - num >>= 8 - return cls.from_raw(o) - - def __init__ (self, addr = None, raw = False, network_order = False): - """ - Construct IPv6 address - - We accept the following as inputs: - Textual IPv6 representations as a str or unicode (including mixed notation - with an IPv4-like component) - Raw IPv6 addresses (128 bits worth of bytearray or, if raw=True, bytes) - IPAddr (converted to IPv4-mapped IPv6 addresses) - IPAddr6 (just copied) - None (creates an "undefined" IPv6 address) - """ - # When we move to Python 3, we can use bytes to infer raw. For now, we - # have the 'raw' argument, which we'll take as either a boolean indicating - # that addr is raw, or we'll take it as the raw address itself. - if addr is None and isinstance(raw, (bytes,bytearray)): - # Allow passing in raw value using either addr=address + raw=True or - # addr=None + raw=address - addr = raw - raw = True - - if addr is None: - # Should we even allow this? It's a weird case. - self._value = self.UNDEFINED._value - elif isinstance(addr, str) or (isinstance(addr, bytes) and not raw): - # A textual IPv6 representation - ip4part = None - if '.' in addr: - # It contains a dot, so it is in "mixed notation" - addr,ip4part = addr.rsplit(':',1) - if '.' in addr: - # We don't implement this, which is probably fine because they are - # deprecated. - raise RuntimeError('IPv4-compatible representation unimplemented') - if ':' in ip4part: - raise RuntimeError('Bad address format') - addr += ':0:0' - - segs = addr.split(':') - if addr.count('::') > 1: - raise RuntimeError("Bad address format " + str(addr)) - if len(segs) < 3 or len(segs) > 8: - raise RuntimeError("Bad address format " + str(addr)) - - # Parse the two "sides" of the address (left and right of the optional - # dropped section) - p = ([],[]) - side = 0 - for i,s in enumerate(segs): - if len(s) == 0: - #if side != 0: - #if i != len(segs)-1: - # raise RuntimeError("Bad address format " + str(addr)) - side = 1 - continue - s = int(s,16) - if s < 0 or s > 0xffff: - # Each chunk must be at most 16 bits! - raise RuntimeError("Bad address format " + str(addr)) - p[side].append(s) - - # Add the zeroes (if any) between the sides - o = p[0] + ([0] * (8-len(p[0])-len(p[1]))) + p[1] - - # Pack into raw format - v = b'' - for b in o: - v += struct.pack('!H', b) - - # Append IPv4 part which we chopped off earlier - if ip4part is not None: - v = v[:-4] + IPAddr(ip4part).toRaw() - - self._value = v - elif isinstance(addr, type(self)): - # Copy constructor - self._value = addr._value - elif isinstance(addr, IPAddr): - # IPv4-mapped - self._value = IPAddr6("::ffff:0:0:" + str(addr))._value - elif isinstance(addr, bytearray): - # Raw value - if len(addr) != 16: raise ValueError("Raw IPv6 addresses are 16 bytes") - self._value = bytes(addr) - elif isinstance(addr, bytes): - # Raw value - if len(addr) != 16: raise ValueError("Raw IPv6 addresses are 16 bytes") - self._value = addr - else: - raise RuntimeError("Unexpected IP address format") - - @property - def raw (self): - return self._value - - @property - def ipv4 (self): - return self.to_ipv4(check_ipv4=False) - - def to_ipv4 (self, check_ipv4 = True): - """ - Convert to an IPAddr - - This only makes sense if this address is ipv4 mapped/compatible. By - default we check that this is the case. - """ - if check_ipv4: - if not self.is_ipv4: - raise RuntimeError('Not an IPv4ish IPv6 address') - return IPAddr(self._value[-4:]) - - @property - def num (self): - o = 0 - for b in self._value: - o = (o << 8) | b - return o - - @property - def is_multicast (self): - return self.in_network('ff00::/8') - - @property - def is_global_unicast (self): - return self.in_network('2000::/3') - - @property - def is_unique_local_unicast (self): - return self.in_network('fc00::/7') - - @property - def is_link_unicast (self): - return self.in_network('fe80::/10') - - @property - def is_ipv4 (self): - return self.in_network('::/80') - - @property - def is_ipv4_compatible (self): - return self.in_network('::/96') - - @property - def is_ipv4_mapped (self): - return self.in_network('::ffff:0:0/96') - - @property - def is_reserved (self): - #TODO - raise RuntimeError("Not implemented") - - @staticmethod - def netmask_to_cidr (dq): - """ - Takes a netmask as either an IPAddr or a string, and returns the number - of network bits. e.g., 255.255.255.0 -> 24 - Raise exception if subnet mask is not CIDR-compatible. - """ - if isinstance(dq, str): - dq = IPAddr6(dq) - v = dq.num - c = 0 - while v & (1<<127): - c += 1 - v <<= 1 - v = v & ((1<<128)-1) - if v != 0: - raise RuntimeError("Netmask %s is not CIDR-compatible" % (dq,)) - return c - - @staticmethod - def cidr_to_netmask (bits): - """ - Takes a number of network bits, and returns the corresponding netmask - as an IPAddr6. - """ - v = (1 << bits) - 1 - v = v << (128-bits) - return IPAddr6.from_num(v) - - @staticmethod - def parse_cidr (addr_and_net, allow_host = False): - """ - Parses addr/netbits or addr/netmask - - Returns (IPAddr6,netbits) - """ - addr = addr_and_net - def check (r0, r1): - a = r0.num - b = r1 - if (not allow_host) and (a & ((1<<b)-1)): - raise RuntimeError("Host part of CIDR address is not zero (%s)" - % (addr,)) - return (r0,128-r1) - addr = addr.split('/', 2) - if len(addr) == 1: - return check(IPAddr6(addr[0]), 0) - try: - wild = 128-int(addr[1]) - except: - # Maybe they passed a netmask - m = IPAddr6(addr[1]).num - b = 0 - while m & (1<<127): - b += 1 - m <<= 1 - if m & ((1<<127)-1) != 0: - raise RuntimeError("Netmask " + str(addr[1]) - + " is not CIDR-compatible") - wild = 128-b - assert wild >= 0 and wild <= 128 - return check(IPAddr6(addr[0]), wild) - assert wild >= 0 and wild <= 128 - return check(IPAddr6(addr[0]), wild) - - def in_network (self, network, netmask = None): - """ - Returns True if this address is in the specified network. - - network can be specified as: - IPAddr6 with numeric netbits or netmask in netmask parameter - textual network with numeric netbits or netmask in netmask parameter - textual network with netbits or netmask separated by a slash - tuple of textual address and numeric netbits - tuple of IPAddr6 and numeric netbits - """ - if type(network) is not tuple: - if netmask is not None: - network = str(network) + "/" + str(netmask) - n,b = self.parse_cidr(network) - else: - n,b = network - if type(n) is not IPAddr6: - n = IPAddr6(n) - - return (self.num & ~((1 << (128-b))-1)) == n.num - - def to_str (self, zero_drop = True, section_drop = True, ipv4 = None): - """ - Creates string representation of address - - There are many ways to represent IPv6 addresses. You get some options. - zero_drop and section_drop allow for creating minimized representations. - ipv4 controls whether we print a "mixed notation" representation. By - default, we do this only for IPv4-mapped addresses. You can stop this by - passing ipv4=False. You can also force mixed notation representation - by passing ipv4=True; this probably only makes sense if .is_ipv4_compatible - (or .is_ipv4_mapped, of course). - """ - o = [lo | (hi<<8) for hi,lo in - (self._value[i:i+2] for i in range(0,16,2))] - - if (ipv4 is None and self.is_ipv4_mapped) or ipv4: - ip4part = o[-2:] - o[-2:] = [1,1] - def finalize (s): - s = s.rsplit(':',2)[0] - return s + ":" + str(IPAddr(self.raw[-4:])) - else: - def finalize (s): - return s - - if zero_drop: - def fmt (n): - return ':'.join('%x' % (b,) for b in n) - else: - def fmt (n): - return ':'.join('%04x' % (b,) for b in n) - - if section_drop: - z = [] # [length,pos] of zero run - run = None - for i,b in enumerate(o): - if b == 0: - if run is None: - run = [1,i] - z.append(run) - else: - run[0] += 1 - else: - run = None - - if len(z): - # Sloppy! - max_len = max([length for length,pos in z]) - if max_len > 1: - z = [pos for length,pos in z if length == max_len] - z.sort() - pos = z[0] - return finalize('::'.join((fmt(o[:pos]),fmt(o[pos+max_len:])))) - - return finalize(fmt(o)) - - def __str__ (self): - return self.to_str() - - def __cmp__ (self, other): - if other is None: return 1 - try: - if not isinstance(other, type(self)): - other = type(self)(other) - return cmp(self._value, other._value) - except: - return -cmp(other,self) - - def __hash__ (self): - return self._value.__hash__() - - def __repr__ (self): - return type(self).__name__ + "('" + self.to_str() + "')" - - def __len__ (self): - return 16 - - def __setattr__ (self, a, v): - if hasattr(self, '_value'): - raise TypeError("This object is immutable") - object.__setattr__(self, a, v) - - def set_mac (self, eth): - e = list(EthAddr(eth).toTuple()) - e[0] ^= 2 - e[3:3] = [0xff,0xfe] - e = ''.join(chr(b) for b in e) - return IPAddr6.from_raw(self._value[:8]+e) - - -IPAddr6.UNDEFINED = IPAddr6('::') -IPAddr6.ALL_NODES_LINK_LOCAL = IPAddr6('ff02::1') -IPAddr6.ALL_ROUTERS_LINK_LOCAL = IPAddr6('ff02::2') -IPAddr6.ALL_NODES_INTERFACE_LOCAL = IPAddr6('ff01::1') -IPAddr6.ALL_ROUTERS_INTERFACE_LOCAL = IPAddr6('ff01::2') -#ff02::1:3 link local multicast name resolution -#ff02::1:ff00:0/104 solicited-node -#ff02::2:ff00:0/104 node information query - - - -def netmask_to_cidr (dq): - """ - Takes a netmask as either an IPAddr or a string, and returns the number - of network bits. e.g., 255.255.255.0 -> 24 - Raise exception if subnet mask is not CIDR-compatible. - """ - if isinstance(dq, str): - dq = IPAddr(dq) - v = dq.toUnsigned(networkOrder=False) - c = 0 - while v & 0x80000000: - c += 1 - v <<= 1 - v = v & 0xffFFffFF - if v != 0: - raise RuntimeError("Netmask %s is not CIDR-compatible" % (dq,)) - return c - - -def cidr_to_netmask (bits): - """ - Takes a number of network bits, and returns the corresponding netmask - as an IPAddr. e.g., 24 -> 255.255.255.0 - """ - v = (1 << bits) - 1 - v = v << (32-bits) - return IPAddr(v, networkOrder = False) - - -def parse_cidr (addr, infer=True, allow_host=False): - """ - Takes a CIDR address or plain dotted-quad, and returns a tuple of address - and count-of-network-bits. - Can infer the network bits based on network classes if infer=True. - Can also take a string in the form 'address/netmask', as long as the - netmask is representable in CIDR. - - FIXME: This function is badly named. - """ - def check (r0, r1): - a = r0.toUnsigned() - b = r1 - if (not allow_host) and (a & ((1<<b)-1)): - raise RuntimeError("Host part of CIDR address is not zero (%s)" - % (addr,)) - return (r0,32-r1) - addr = addr.split('/', 2) - if len(addr) == 1: - if infer is False: - return check(IPAddr(addr[0]), 0) - addr = IPAddr(addr[0]) - b = 32-infer_netmask(addr) - m = (1<<b)-1 - if (addr.toUnsigned() & m) == 0: - # All bits in wildcarded part are 0, so we'll use the wildcard - return check(addr, b) - else: - # Some bits in the wildcarded part are set, so we'll assume it's a host - return check(addr, 0) - try: - wild = 32-int(addr[1]) - except: - # Maybe they passed a netmask - m = IPAddr(addr[1]).toUnsigned() - b = 0 - while m & (1<<31): - b += 1 - m <<= 1 - if m & 0x7fffffff != 0: - raise RuntimeError("Netmask " + str(addr[1]) + " is not CIDR-compatible") - wild = 32-b - assert wild >= 0 and wild <= 32 - return check(IPAddr(addr[0]), wild) - assert wild >= 0 and wild <= 32 - return check(IPAddr(addr[0]), wild) - - -def infer_netmask (addr): - """ - Uses network classes to guess the number of network bits - """ - addr = addr.toUnsigned() - if addr == 0: - # Special case -- default network - return 32-32 # all bits wildcarded - if (addr & (1 << 31)) == 0: - # Class A - return 32-24 - if (addr & (3 << 30)) == 2 << 30: - # Class B - return 32-16 - if (addr & (7 << 29)) == 6 << 29: - # Class C - return 32-8 - if (addr & (15 << 28)) == 14 << 28: - # Class D (Multicast) - return 32-0 # exact match - # Must be a Class E (Experimental) - return 32-0 diff --git a/pox/lib/graph/graph.py b/pox/lib/graph/graph.py @@ -1,709 +0,0 @@ -# Copyright 2011 James McCauley -# Copyright 2012 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#import networkx as nx -import pox.lib.graph.minigraph as nx -from collections import defaultdict -from copy import copy - -LINK = 'link' - -class Link (object): - - def reorder (self, l): - """ - Flips a list of Links so that this node is first in each - """ - return Link.order(l, self) - - @staticmethod - def order (links, n): - """ - Give a list of Links that each contain node n, flips any links so - that n is always the first element of the link. - """ - r = [] - for l in links: - assert n in l - if l._n[0] == n: - r.append(l) - else: - r.append(l.flip()) - return r - - def __init__ (self, np1, np2): - self._n = [np1[0],np2[0]] - self._p = [np1[1],np2[1]] - - def _index (self, i): - if i in self._n: - i = self._n.index(i) - assert i == 0 or i == 1 - return i - - def flip (self): - """ - Returns the same link, but flipped (a,b) becomes (b,a) - """ - return Link(self[1], self[0]) - - def port (self, n): - return self._p[_index(n)] - - def other_port (self, n): - """ - Returns the other end's port. - See other(). - """ - return self.other(n)[1] - - def other (self, n): - """ - Returns the other end of a link. - Given a node or (node,port) that is part of this link, it returns - the opposite end's (node,port). - """ - if type(n) is tuple: - if self[0] == n: - return self[1] - assert self[1] == n - return self[0] - - if self[0][0] == n: - return self[1] - assert self[1][0] == n - return self[0] - - def __contains__ (self, n): - """ - Does this link contain (node,port) or node? - """ - if type(n) is tuple: - return n in [self[0], self[1]] - else: - return n in [self._n] - - def __len__ (self): - return 2 - - def __getitem__ (self, i): - """ - Gets (node,port) based on index - """ - i = self._index(i) - return (self._n[i], self._p[i]) - - def __repr__ (self): - return "Link(%s, %s)" % (self[0], self[1]) - - -class Node (object): - pass - #TODO: Add back in some convenience methods that call real methods - # on the parent graph? Or just remove? - - -def _void (): - return None - -class LeaveException (RuntimeError): - pass - -class Operator (object): - def __repr__ (self): - return "<%s>" % (self.__class__.__name__) - -class Literal (Operator): - def __init__ (self, v): - self._v = v - def __call__ (self, n, li=None): - return self._v - def __repr__ (self): - return repr(self._v) - -class Anything (Operator): - def __call__ (self, n, li): - return True - - def __repr__ (self): - return "Anything" - -class Self (Operator): - def __call__ (self, n, li=None): - return n - def __repr__ (self): - return "Self" - -class Port (Operator): - def __call__ (self, n, li): - if li is None: - raise RuntimeError("You can only use Port for link queries") - return li[0][1] - - def __repr__ (self): - return "Port" - -class OtherPort (Operator): - def __call__ (self, n, li): - if li is None: - raise RuntimeError("You can only use OtherPort for link queries") - return li[1][1] - - def __repr__ (self): - return "OtherPort" - -class Other (Operator): - def __call__ (self, n, li): - if li is None: - raise RuntimeError("You can only use Other for link queries") - return li[1][0] - - def __repr__ (self): - return "Other" - -class Call (Operator): - def __init__ (_self, *arg, **kw): - _self._arg = [] - for v in arg: - ao = None - if isinstance(v, Operator): - ao = v - else: - ao = Literal(v) - _self._arg.append(ao) - _self._kw = {} - for k,v in kw.items(): - ao = None - if isinstance(v, Operator): - ao = v - else: - ao = Literal(v) - _self._kw[k].append(ao) - - def __call__ (self, n, li): - arglist = [] - for arg in self._arg: - arglist.append(arg(n,li)) - kws = {} - for k,v in self._kw.items(): - kws[k] = v(n) - func = arglist.pop(0) - return func(*arglist, **kws) - - def __repr__ (self): - r = str(self._arg[0]) - args = [str(s) for s in self._arg[1:]] - args.append(["%s=%s" % (k,str(v)) for k,v in self._kw]) - return "%s(%s)" % (self._arg[0], ', '.join(args)) - -class UnaryOp (Operator): - def __init__ (self, operand): - if isinstance(operand, Operator): - self._operand = operand - else: - self._operand = Literal(operand) - - def __call__ (self, n, li): - a = self._operand(n, li) - return self._apply(a) - - def _apply (self, attr): - raise RuntimeError("Unimplemented") - -class BinaryOp (Operator): - def __init__ (self, left, right): - if isinstance(left, Operator): - self._left = left - else: - self._left = Literal(left) - if isinstance(right, Operator): - self._right = right - else: - self._right = Literal(right) - - def __call__ (self, n, li): - l = self._left(n, li) - r = self._right(n, li) - return self._apply(l, r) - - def _apply (self, l, r): - raise RuntimeError("Unimplemented") - - def __repr__ (self): - if hasattr(self, '_symbol'): - return "%s %s %s" % (self._left, self._symbol, self._right) - else: - return "%s(%s, %s)" % (self.__class__.__name__, self._left, self._right) - -class Or (BinaryOp): - _symbol = "or" - def _apply (self, l, r): - return l or r - -class And (BinaryOp): - _symbol = "and" - def _apply (self, l, r): - return l and r - -class LessThan (BinaryOp): - _symbol = "<" - def _apply (self, value): - return value < self._value - -class GreaterThan (BinaryOp): - _symbol = ">" - def _apply (self, l, r): - return value > self._value - -class LessThanEqualTo (BinaryOp): - _symbol = "<=" - def _apply (self, l, r): - return value <= self._value - -class GreaterThanEqualTo (BinaryOp): - _symbol = "=>" - def _apply (self, l, r): - return value > self._value - -class Not (UnaryOp): - def _apply (self, v): - return not v - - def __repr__ (self): - return "(Not %s)" % (self._operand,) - -class Length (UnaryOp): - def _apply (self, v): - return len(v) - - def __repr__ (self): - return "len(%s)" % (self._operand,) - -class Index (BinaryOp): - def _apply (self, l, r): - return l[r] - - def __repr__ (self): - return "%s[%s]" % (self._left, self._right) - -_dummy = object() -class NodeOp (Operator): - """ - Can be a binary operator, or if only one argument supplied, the - left one defaults to the node. - """ - def __init__ (self, left, right=_dummy): - if right is _dummy: - right = left - left = Self() - - if isinstance(left, Operator): - self._left = left - else: - self._left = Literal(left) - if isinstance(right, Operator): - self._right = right - else: - self._right = Literal(right) - - def __call__ (self, n, li): - l = self._left(n, li) - r = self._right(n, li) - return self._apply(l, r) - - def _apply (self, l, r): - raise RuntimeError("Unimplemented") - - def __repr__ (self): - if hasattr(self, '_symbol'): - return "%s %s %s" % (self._left, self._symbol, self._right) - else: - return "%s(%s, %s)" % (self.__class__.__name__, self._left, self._right) - -class Equal (NodeOp): - _symbol = "==" - def _apply (self, l, r): - #print "???", repr(l), repr(r), l == r - return l == r - -class Is (NodeOp): - _symbol = "is" - def _apply (self, l, r): - return l is r - -class Field (NodeOp): - def __init__ (self, left, right=_dummy, optional=True): - NodeOp.__init__(self, left, right) - self._optional = optional - - def _apply (self, l, r): - #print ">>",self._attr_name,hasattr(n, self._attr_name) - do_call = r.endswith("()") - if do_call: r = r[:-2] - if not hasattr(l, r) and self._optional: - raise LeaveException - a = getattr(l, r) - if do_call: a = a() - #print ">>>",a - return a -F = Field # Short alias - -class IsInstance (NodeOp): - def _apply (self, l, r): - return isinstance(l, r) - def __repr__ (self): - return "isinstance(%s, %s)" % (self._left, self._right) - -class IsType (NodeOp): - def _apply (self, l, r): - if isinstance(r, str): - return type(l).__name__ == r - return type(l) is r - def __repr__ (self): - return "type(%s) == %s" % (self._left, self._right) - -class ConnectedTo (NodeOp): - def _apply (self, l, r): - return l.connected_to(r) - def __repr__ (self): - return "%s.connected_to(%s)" % (self._left, self._right) - -class InValues (BinaryOp): - def __init__ (self, left, right): - super(Member, self).__init__(left, right) - self._optional = optional - - def _apply (self, l, r): - return l in list(r.values()) - -class In (BinaryOp): - def _apply (self, l, r): - return l in r - -class Member (BinaryOp): - _symbol = "." - def __init__ (self, left, right, optional = True): - super(Member, self).__init__(left, right) - self._optional = optional - - def _apply (self, l, r): - if not hasattr(l, r) and self._optional: - raise LeaveException - return getattr(l, r) - - -class Graph (object): - def __init__ (self): - self._g = nx.MultiGraph() - self.node_port = {} - - def __contains__ (self, n): - return n in self._g - - def add (self, node): - self._g.add_node(node) - self.node_port[node] = {} - - def remove (self, node): - self._g.remove_node(node) - - def neighbors (self, n): - return self._g.neighbors(n) - - def find_port (self, node1, node2): - for n1, n2, k, d in self._g.edges([node1, node2], data=True, keys=True): - return (d[LINK][node1][1], d[LINK][node2][1]) - return None - - def connected(self, node1, node2): - return (self.find_port(node1, node2) != None) - - def disconnect_port (self, np): - """ - Disconnects the given (node,port) - """ - assert type(np) is tuple - remove = [] - if self.port_for_node(np[0], np[1]) is None: - return 0 - for n1,n2,k,d in self._g.edges([np[0], self.node_port[np[0]][np[1]][0]], data=True, keys=True): - if np in d[LINK]: - remove.append((n1,n2,k)) - del self.node_port[n1][d[LINK][n1][1]] - del self.node_port[n2][d[LINK][n2][1]] - for e in remove: - #print "remove",e - self._g.remove_edge(*e) - return len(remove) - - def unlink (self, np1, np2): - count = 0 - if isinstance(np1, tuple): - count = disconnect_port(np1) - elif isinstance(np2, tuple): - count = disconnect_port(np2) - else: - for n1, n2, k, d in self._g.edges([np1, np2], data=True, keys=True): - self._g.remove_edge(n1,n2,k) - del self.node_port[n1][d[LINK][n1][1]] - del self.node_port[n2][d[LINK][n2][1]] - count = count + 1 - return count - - def link (self, np1, np2): - """ - Links two nodes on given ports - np1 is (node1, port1) - np2 is (node2, port2) - """ - #FIXME: the portless variation doesn't really make sense with - # allow_multiples yet. - try: - _ = np1[0] - except: - # portless (hacky) - for free in range(1000): - if free not in np1.ports: - np1 = (np1,free) - break - try: - _ = np2[0] - except: - # portless (hacky) - for free in range(1000): - if free not in np2.ports: - np2 = (np2,free) - break - self._g.add_node(np1[0]) - self._g.add_node(np2[0]) - self.disconnect_port(np1) - self.disconnect_port(np2) - self._g.add_edge(np1[0],np2[0],link=Link(np1,np2)) - self.node_port[np1[0]][np1[1]] = np2 - self.node_port[np2[0]][np2[1]] = np1 - - def find_links (self, query1=None, query2=()): - # No idea if new link query stuff works. - if query2 is None: query2 = query1 - if query1 == (): query1 = None - if query2 == (): query2 = None - o = set() - for n1,n2,k,d in self._g.edges(data=True, keys=True): - l = d[LINK] - ok = False - if query1 is None or self._test_node(l[0][0], args=(query1,), link=l): - if query2 is None or self._test_node(l[1][0], args=(query2,), link=l): - ok = True - if not ok and (query1 != query2): - if query2 is None or self._test_node(l[0][0], args=(query2,), link=l): - if query1 is None or self._test_node(l[1][0], args=(query1,), link=l): - ok = True - l = l.flip() - if ok: - o.add(l) - return list(o) - - def ports_for_node (self, node): - """ - Map of local port -> (other, other_port) - """ - ports = defaultdict(_void) - for n1, n2, k, d in self._g.edges([node], data=True, keys=True): - p = d[LINK] - assert n1 is node - assert ports.get(p[node]) is None - ports[p[node][1]] = p.other(node) - return ports - - def port_for_node(self, node, port): - assert node in self.node_port - return self.node_port[node].get(port) - - def disconnect_nodes(self, node1, node2): - """ Disconnect node1 from node2. Either of node1 or node2 - can be a node, or a (node, port) pair - Returns number of nodes disconnected - """ - self.unlink(node1, node2) - - def disconnect_node(self, node1): - """ Disconnecte node from all neighbours """ - for neighbor in self.neighbors(node1): - self.disconnect_nodes(node1, neighbor) - - def get_one_link (self, query1=None, query2=(), **kw): - return self.get_link(query1, query2, one=True, **kw) - - def get_link (self, query1=None, query2=(), **kw): - """ - Keyword argument "default" lets you set a default value if - no node is found. Note that this means you must use - Equal(F("default"), <value>) to actually check a field called - "default" on a node. - """ - if 'default' in kw: - has_default = True - default = kw['default'] - del kw['default'] - else: - has_default = False - one = False - if 'one' in kw: - one = kw['one'] - del kw['one'] - assert len(kw) == 0 - r = self.find_links(query1, query2) - if len(r) > 1 and one: - raise RuntimeError("More than one match") - elif len(r) == 0: - if has_default: - return default - raise RuntimeError("Could not get element") - return r[0] - - def has_link (self, query1=None, query2=()): - # Really bad implementation. We can easily scape early. - return len(self.find_links(query1, query2)) > 0 - - def _test_node (self, n, args=(), kw={}, link=None): - #TODO: Should use a special value for unspecified n2 - for k,v in kw.items(): - if k == "is_a": - if not isinstance(n,v): return False - elif k == "type": - if type(n) is not v: return False - else: - if not hasattr(n, k): return False - if getattr(n, k) != v: return False - for a in args: - try: - if not a(n, link): - return False - except LeaveException: - return False - return True - - def find (self, *args, **kw): - r = [] - def test (n): - return self._test_node(n, args, kw) - for n in self._g.nodes(): - if test(n): - r.append(n) - return r - - def get_one (self, *args, **kw): - kw['one'] = True - return self.get(*args, **kw) - - def get (self, *args, **kw): - """ - Keyword argument "default" lets you set a default value if - no node is found. Note that this means you must use - Equal(F("default"), <value>) to actually check a field called - "default" on a node. - """ - if 'default' in kw: - has_default = True - default = kw['default'] - del kw['default'] - else: - has_default = False - one = False - if 'one' in kw: - del kw['one'] - one = True - r = self.find(*args,**kw) - if len(r) > 1 and one: - raise RuntimeError("More than one match") - elif len(r) == 0: - if has_default: - return default - raise RuntimeError("Could not get element") - return r[0] - - def has (self, *args, **kw): - # Really bad implementation. We can easily scape early. - return len(self.find(*args,**kw)) > 0 - - def __len__ (self): - return len(self._g) - -def test(): - class Node1 (object): - _next_num = 0 - def __init__ (self): - self._num = self.__class__._next_num - self.__class__._next_num += 1 - - def __repr__ (self): - return "Node1 #" + str(self._num) - - class Node2 (object): - _next_num = 0 - def __init__ (self): - self._num = self.__class__._next_num - self.__class__._next_num += 1 - - def __repr__ (self): - return "Node2 #" + str(self._num) - - class Node3 (Node1): - _next_num = 0 - def __init__ (self): - self._num = self.__class__._next_num - self.__class__._next_num += 1 - - def __repr__ (self): - return "Node3 #" + str(self._num) - g = Graph() - n1 = Node1();n1.label=1 - n2 = Node2();n2.label=2 - n3 = Node3();n3.label=3 - - g.add(n1) - g.add(n2) - g.add(n3) - g.link((n1,0),(n2,0)) - g.link((n1,1),(n3,0)) - - print(g.find(is_a=Node1)) - print(g.find(is_a=Node2)) - print(g.find(type=Node1)) - print(g.find(type=Node3)) - print(g.find_links()) - print("=== NEIGHBORS ===") - print(g.neighbors(n1)) - print(g.find_port(n1, n2)) - print(g.connected(n1, n3)) - print(g.ports_for_node(n3)) - - print([(n, x[0], x[1][0], x[1][1]) for n in g.find(is_a=Node1) for x in g.ports_for_node(n).items() ]) - - g.disconnect_nodes(n1, n3) - - print(g.find_links()) - g.link((n2, 1), (n3, 1)) - g.link((n1,1), (n3, 0)) - g.link((n1,0), (n2, 0)) - print(g.find_links()) - g.disconnect_node(n3) - print(g.find_links()) - import code - code.interact(local=locals()) - - -if __name__ == "__main__": - test() diff --git a/pox/lib/graph/nom.py b/pox/lib/graph/nom.py @@ -1,152 +0,0 @@ -# Copyright 2012 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -""" - -from pox.lib.revent import * -from pox.core import core -from pox.lib.addresses import * -from pox.lib.graph.graph import * - -class EntityEvent (Event): - def __init__ (self, entity): - self.entity = entity - -class EntityJoin (EntityEvent): - """ - An entity has been added. - - Note that if there is a more specific join event defined for a particular - entity, (e.g., SwitchJoin), this event will not be fired. - - TODO: or we could always raise EntityJoins along with SwitchJoins, which - seems more intuitive to me. - """ - pass - -class EntityLeave (EntityEvent): - """ - An entity has been removed - - Note that if there is a more specific leave event defined for a particular - entity, (e.g., SwitchLeave), this event will not be fired. - - TODO: or we could always raise EntityLeaves along with SwitchLeaves, which - seems more intuitive to me. - """ - pass - -class Update (Event): - """ - Fired by Topology whenever anything has changed - """ - def __init__ (self, event): - self.event = event - -class Entity (Node): - """ - Note that the Entity class is intentionally simple; It only serves as a - convenient SuperClass type. - - It's up to subclasses to implement specific functionality (e.g. - OpenFlow1.0 switch functionality). The purpose of this design decision - is to prevent protocol specific details from being leaked into this - module... but this design decision does /not/ imply that pox.toplogy - serves to define a generic interface to abstract entity types. - """ - -class Host (Entity): - """ - A generic Host entity. - """ - def __init__(self): - Entity.__init__(self) - -class Switch (Entity): - """ - Subclassed by protocol-specific switch classes, - e.g. pox.openflow.topology.OpenFlowSwitch - """ - -""" -class Port (Entity): - def __init__ (self, num, hwAddr, name): - Entity.__init__(self) - self.number = num - self.hwAddr = EthAddr(hwAddr) - self.name = name -""" - -class NOM (Graph, EventMixin): - __eventMixin_events = [ - EntityJoin, - EntityLeave, - - Update - ] - - def __init__ (self): - Graph.__init__(self) - EventMixin.__init__(self) - self._eventMixin_addEvents(self.__eventMixin_events) - self._entities = {} - self.log = core.getLogger(self.__class__.__name__) - - def getEntityByID (self, ID, fail=False): - """ - Raises an exception if fail is True and the entity doesn't exist - See also: The 'entity' property. - """ - r = self.find(Or(Equal('DPID', ID),Equal(F('ID'), ID))) - if len(r) == 0: - if fail: - raise RuntimeError("No entity with ID " + str(ID)) - else: - return None - assert len(r) == 1 - return r[0] - - def removeEntity (self, entity): - if entity in self: - self.remove(entity) - self.log.info(str(entity) + " left") - self.raiseEvent(EntityLeave, entity) - - def addEntity (self, entity): - """ Will raise an exception if entity.id already exists """ - if entity in self: - raise RuntimeError("Entity exists") - self.add(entity) - self.log.info(str(entity) + " joined") - self.raiseEvent(EntityJoin, entity) - - def getEntitiesOfType (self, t=Entity, subtypes=True): - if subtypes is False: - return self.find(is_a=t) - else: - return self.find(type=t) - - def raiseEvent (self, event, *args, **kw): - """ - Whenever we raise any event, we also raise an Update, so we extend - the implementation in EventMixin. - """ - rv = EventMixin.raiseEvent(self, event, *args, **kw) - if type(event) is not Update: - EventMixin.raiseEvent(self, Update(event)) - return rv - - def __str__(self): - return "<%s len:%i>" % (self.__class__.__name__, len(self)) diff --git a/pox/lib/ioworker/__init__.py b/pox/lib/ioworker/__init__.py @@ -1,467 +0,0 @@ -# Copyright 2012 Colin Scott -# Copyright 2012 Andreas Wundsam -# Copyright 2012 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -IOWorkers provide a convenient IO abstraction. Sends are fire-and-forget, -and read data is buffered and you can get notifications when data is -available. -""" - -import sys -import errno -from collections import deque -import socket - -from pox.lib.util import assert_type, makePinger -from pox.lib.recoco import Select, Task - -from pox.core import core -log = core.getLogger() - -_dummy_handler = lambda worker : None - -def _call_safe (f, socket=None): - try: - f() - except Exception as e: - if socket: - log.error("Exception on socket %s..." % (socket)) - log.exception(e) - - -class IOWorker (object): - """ - Generic IOWorker class. - Fire and forget semantics for send. - Received data is queued until read. - """ - def __init__(self): - super(IOWorker,self).__init__() - self.send_buf = b"" - self.receive_buf = b"" - self.closed = False - - self._custom_rx_handler = None - self._custom_close_handler = None - self._custom_connect_handler = None - - self._connecting = False - self._shutdown_send = False - - self.rx_handler = None - self.close_handler = None - self.connect_handler = None - - def _handle_rx (self): - """ Can be overridden OR you can just use rx_handler """ - self._custom_rx_handler(self) - - def _handle_close (self): - """ Can be overridden OR you can just use close_handler """ - self._custom_close_handler(self) - - def _handle_connect (self): - """ Can be overridden OR you can just use connect_handler """ - self._custom_connect_handler(self) - - def _do_exception (self, loop): - self.close() - loop._workers.discard(self) - - def _try_connect (self, loop): - if not self._connecting: return False - self._connecting = False - try: - self.socket.recv(0) - except socket.error as xxx_todo_changeme1: - (s_errno, strerror) = xxx_todo_changeme1.args - if s_errno == errno.EAGAIN or s_errno == 10035: # 10035=WSAEWOULDBLOCK - # On Linux, this seems to mean we're connected. - # I think this is right for the Windows case too. - # If we want to stay in the connecting state until - # we actually get data, re-set _connecting to True, - # and return. - pass - #self._connecting = True - #return True - else: - self.close() - loop._workers.discard(self) - return True - _call_safe(self._handle_connect) - return False - - def _do_recv (self, loop): - if self._connecting and self._try_connect(loop): return - try: - data = self.socket.recv(loop._BUF_SIZE) - if len(data) == 0: - self.close() - loop._workers.discard(self) - else: - self._push_receive_data(data) - except socket.error as xxx_todo_changeme2: - (s_errno, strerror) = xxx_todo_changeme2.args - if s_errno == errno.ENOENT: - # SSL library does this sometimes - log.error("Socket %s: ENOENT", str(self)) - return - log.error("Socket %s error %i during recv: %s", str(self), - s_errno, strerror) - self.close() - loop._workers.discard(self) - - def _do_send (self, loop): - if self._connecting and self._try_connect(loop): return - try: - if len(self.send_buf): - l = self.socket.send(self.send_buf) - if l > 0: - self._consume_send_buf(l) - if self._shutdown_send and len(self.send_buf) == 0: - self.socket.shutdown(socket.SHUT_WR) - except socket.error as xxx_todo_changeme3: - (s_errno, strerror) = xxx_todo_changeme3.args - if s_errno != errno.EAGAIN: - log.error("Socket %s error %i during send: %s", str(self), - s_errno, strerror) - self.close() - loop._workers.discard(self) - - @property - def available (self): - """ - Number of available bytes to read() - """ - return len(self.receive_buf) - - @property - def connect_handler (self): - if self._custom_connect_handler is _dummy_handler: - return None - return self._custom_connect_handler - - @connect_handler.setter - def connect_handler (self, callback): - """ - Handler to call when connected - """ - # Not sure if this is a good idea, but it might be... - if self.connect_handler is not None or callback is not None: - log.debug("Resetting connect_handler on %s?", self) - if callback is None: callback = _dummy_handler - self._custom_connect_handler = callback - - @property - def close_handler (self): - if self._custom_close_handler is _dummy_handler: - return None - return self._custom_close_handler - - @close_handler.setter - def close_handler (self, callback): - """ - Handler to call when closing - """ - # Not sure if this is a good idea, but it might be... - if self.close_handler is not None or callback is not None: - log.debug("Resetting close_handler on %s?", self) - if callback is None: callback = _dummy_handler - self._custom_close_handler = callback - - @property - def rx_handler (self): - if self._custom_rx_handler is _dummy_handler: - return None - return self._custom_rx_handler - - @rx_handler.setter - def rx_handler (self, callback): - """ - Handler to call when data is available to read - """ - # Not sure if this is a good idea, but it might be... - if self.rx_handler is not None or callback is not None: - log.debug("Resetting rx_handler on %s?", self) - if callback is None: callback = _dummy_handler - self._custom_rx_handler = callback - - def send_fast (self, data): - return self.send(data) - - def send (self, data): - """ Send data. Fire and forget. """ - assert assert_type("data", data, [bytes], none_ok=False) - self.send_buf += data - - def _push_receive_data (self, new_data): - # notify client of new received data. called by a Select loop - self.receive_buf += new_data - self._handle_rx() - - def peek (self, length = None): - """ Peek up to length bytes from receive buffer. """ - if length is None: - return self.receive_buf - else: - return self.receive_buf[:length] - - def consume_receive_buf (self, l): - """ Consume receive buffer """ - # called from the client - if len(self.receive_buf) < l: - raise RuntimeError("Receive buffer underrun") - self.receive_buf = self.receive_buf[l:] - - def read (self, length = None): - """ - Read up to length bytes from receive buffer - (defaults to all) - """ - if length is None: - length = len(self.receive_buf) - r = self.receive_buf[:length] - self.receive_buf = self.receive_buf[length:] - return r - - @property - def _ready_to_send (self): - # called by Select loop - return len(self.send_buf) > 0 or self._connecting - - def _consume_send_buf (self, l): - # Throw out the first l bytes of the send buffer - # Called by Select loop - assert(len(self.send_buf)>=l) - self.send_buf = self.send_buf[l:] - - def close (self): - """ Close this socket """ - if self.closed: return - self.closed = True - _call_safe(self._handle_close) - - def shutdown (self, send = True, recv = True): - """ - Shut down socket - """ - self._shutdown_send |= send - #TODO: recv - - def __repr__ (self): - return "<" + self.__class__.__name__ + ">" - - -class RecocoIOWorker (IOWorker): - """ - An IOWorker that works with our RecocoIOLoop. - """ - - # Set by register - on_close = None - pinger = None - - def __init__ (self, socket): - """ - pinger is a pinger that will wake the RecocoIOLoop - on_close is a factory that hides details of Select loop - """ - super(RecocoIOWorker,self).__init__() - self.socket = socket - - def fileno (self): - """ Return the wrapped sockets' fileno """ - return self.socket.fileno() - - def send_fast (self, data): - """ - send data from the client side. fire and forget. - Must only be called from the same cooperative context as the - IOWorker. - """ - if len(self.send_buf)==0 and not self._connecting and not self.closed: - try: - l = self.socket.send(data, socket.MSG_DONTWAIT) - if l == len(self.send_buf): - return - data = data[l] - except socket.error as xxx_todo_changeme: - (s_errno, strerror) = xxx_todo_changeme.args - if s_errno != errno.EAGAIN: - log.error("Socket error: " + strerror) - self.close() - return - - IOWorker.send(self, data) - self.pinger.ping() - - def send (self, data): - IOWorker.send(self, data) - self.pinger.ping() - - def close (self): - """ Register this socket to be closed. fire and forget """ - # (don't close until Select loop is ready) - if self.closed: return - IOWorker.close(self) - # on_close is a function not a method - try: - self.socket.shutdown(socket.SHUT_RD) - except Exception: - pass - self.on_close(self) - -if not hasattr(socket, "MSG_DONTWAIT"): - # Don't have this feature. - RecocoIOWorker.send_fast = RecocoIOWorker.send - log.debug("RecocoIOWorker.send_fast() not available") -else: - pass - - -def _format_lists (rlist, wlist, elist): - everything = set() - everything.update(rlist) - everything.update(wlist) - everything.update(elist) - if len(everything) == 0: return "None" - everything = list(everything) - everything.sort() - msg = "" - for fd in everything: - msg += str(fd).strip("<>").replace(" ", "-") + "|" - if fd in rlist: msg += "R" - if fd in wlist: msg += "W" - if fd in elist: msg += "X" - msg += " " - msg = msg.strip() - return msg - - -class RecocoIOLoop (Task): - """ - recoco task that handles the actual IO for our IO workers - """ - _select_timeout = 5 - _BUF_SIZE = 8192 - more_debugging = False - - def __init__ (self, worker_type = RecocoIOWorker): - super(RecocoIOLoop,self).__init__() - self._worker_type = worker_type - self._workers = set() - self.pinger = makePinger() - # socket.open() and socket.close() are performed by this Select task - # other threads register open() and close() requests by adding lambdas - # to this thread-safe queue. - self._pending_commands = deque() - - def new_worker (self, *args, **kw): - ''' - Return an IOWorker wrapping the given socket. - - You can create a specific worker type by specifying - _worker_type. - ''' - # Called from external threads. - # Does not register the IOWorker immediately with the select loop -- - # rather, adds a command to the pending queue - - _worker_type = kw.pop("_worker_type", None) - - if _worker_type is None: - _worker_type = self._worker_type - assert issubclass(_worker_type, RecocoIOWorker) - worker = _worker_type(*args, **kw) - - self.register_worker(worker) - - return worker - - def register_worker (self, worker): - """ - Register a worker with this ioloop - """ - - # Our callback for io_worker.close(): - def on_close (worker): - def close_worker (worker): - # Actually close the worker (called by Select loop) - worker.socket.close() - self._workers.discard(worker) - # schedule close_worker to be called by Select loop - self._pending_commands.append(lambda: close_worker(worker)) - self.pinger.ping() - - worker.on_close = on_close - worker.pinger = self.pinger - - # Don't add immediately, since we may be in the wrong thread - self._pending_commands.append(lambda: self._workers.add(worker)) - self.pinger.ping() - - def stop (self): - self.running = False - self.pinger.ping() - - def run (self): - self.running = True - - while self.running and core.running: - try: - # First, execute pending commands - while len(self._pending_commands) > 0: - self._pending_commands.popleft()() - - # Now grab workers - read_sockets = list(self._workers) + [ self.pinger ] - write_sockets = [ worker for worker in self._workers - if worker._ready_to_send ] - exception_sockets = list(self._workers) - - if self.more_debugging: - log.debug("Select In : " + _format_lists(read_sockets, - write_sockets, exception_sockets)) - - rlist, wlist, elist = yield Select(read_sockets, write_sockets, - exception_sockets, self._select_timeout) - - if self.more_debugging: - log.debug("Select Out: " + _format_lists(rlist, wlist, elist)) - - if self.pinger in rlist: - self.pinger.pongAll() - rlist.remove(self.pinger) - - for worker in elist: - worker._do_exception(self) - if worker in rlist: - rlist.remove(worker) - if worker in wlist: - wlist.remove(worker) - - for worker in rlist: - worker._do_recv(self) - - for worker in wlist: - worker._do_send(self) - - except GeneratorExit: - # Must be shutting down - break - except BaseException as e: - log.exception(e) - break diff --git a/pox/lib/ioworker/notify_demo.py b/pox/lib/ioworker/notify_demo.py @@ -1,168 +0,0 @@ -# Copyright 2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A demo of working with IOWorker clients and servers - -Run the server as: - lib.ioworker.notify_demo:server - -Clients can be run in several ways... - -To just listen for notifications and show them as log messages: - lib.ioworker.notify_demo:client --server=127.0.0.1 --name=SirSpam - -To send a notification and quit, append --msg="Spam eggs spam". - -Run with the Python interpreter (the 'py' component), and you get a -notify("<message>") command: - POX> notify("Grilled tomatoes") - -Run with Tk (the 'tk' component) to get a GUI. -""" - -from pox.lib.ioworker import * -from pox.lib.ioworker.workers import * -from pox.core import core - -log = core.getLogger() - - -# --------------------------------------------------------------------------- -# Client Stuff -# --------------------------------------------------------------------------- - -client_worker = None -username = None -single_message = None - -def notify (msg): - if msg is None: return - if client_worker is None: - log.error("Can't send notification -- not connected") - msg = msg.split("\n") - for m in msg: - client_worker.send("N %s %s\n" % (username, m)) - -class ClientWorker (PersistentIOWorker): - def __init__ (self, *args, **kw): - self.data = b'' - super(ClientWorker,self).__init__(*args,**kw) - - def _handle_close (self): - global client_worker - if client_worker is self: - client_worker = None - log.info("Disconnect") - super(ClientWorker, self)._handle_close() - if single_message: - core.quit() - - def _handle_connect (self): - global client_worker - if client_worker is not None: - client_worker.close() - log.info("Connect") - super(ClientWorker, self)._handle_connect() - client_worker = self - if single_message: - notify(single_message) - self.shutdown() - - def _handle_rx (self): - self.data += self.read() - while '\n' in self.data: - msg,self.data = self.data.split('\n',1) - if msg.startswith("N "): - _,name,content = msg.split(None,2) - log.warn("** %s: %s **", name, content) - if core.hasComponent('tk'): - # If Tk is running, pop up the message. - core.tk.dialog.showinfo("Message from " + name, content) - - -def setup_input (): - def cb (msg): - if msg is None: core.quit() - setup_input() # Pop box back up - notify(msg) - if not core.running: return - core.tk.dialog.askstring_cb(cb, "Notification", - "What notification would you like to send?") - - -def client (server, name = "Unknown", port = 8111, msg = None): - - global loop, username, single_message - username = str(name).replace(" ", "_") - single_message = msg - - core.Interactive.variables['notify'] = notify - - loop = RecocoIOLoop() - #loop.more_debugging = True - loop.start() - - w = ClientWorker(loop=loop, addr=server, port=int(port)) - - if not msg: - # If we have Tk running, pop up an entry box - core.call_when_ready(setup_input, ['tk']) - - -# --------------------------------------------------------------------------- -# Server Stuff -# --------------------------------------------------------------------------- - -class ServerWorker (TCPServerWorker, RecocoIOWorker): - pass - -clients = set() - -class NotifyWorker (RecocoIOWorker): - def __init__ (self, *args, **kw): - super(NotifyWorker, self).__init__(*args, **kw) - self._connecting = True - self.data = b'' - - def _handle_close (self): - log.info("Client disconnect") - super(NotifyWorker, self)._handle_close() - clients.discard(self) - - def _handle_connect (self): - log.info("Client connect") - super(NotifyWorker, self)._handle_connect() - clients.add(self) - - def _handle_rx (self): - self.data += self.read() - while '\n' in self.data: - msg,self.data = self.data.split('\n',1) - if msg.startswith("N "): - _,name,content = msg.split(None,2) - log.warn("** %s: %s **", name, content) - for c in clients: - if c is not self: - c.send(msg + "\n") - - -def server (port = 8111): - global loop - loop = RecocoIOLoop() - #loop.more_debugging = True - loop.start() - - w = ServerWorker(child_worker_type=NotifyWorker, port = int(port)) - loop.register_worker(w) diff --git a/pox/lib/ioworker/workers.py b/pox/lib/ioworker/workers.py @@ -1,224 +0,0 @@ -# Copyright 2012-2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -A collection of some useful IOWorkers - -These were quickly adapted from another project. The versions of the -server ones here haven't been tested. The persistent ones at least -sort of have. The backoff one is new. -""" - -import errno -import socket -from pox.lib.addresses import IP_ANY, IPAddr -from pox.lib.ioworker import * -from pox.core import core - -log = core.getLogger() - - -class LoggerBase (object): - def _error (self, *args, **kw): - log.error(type(self).__name__ + ": " + str(args[0]), *args[1:], **kw) - def _warn (self, *args, **kw): - log.warn(type(self).__name__ + ": " + str(args[0]), *args[1:], **kw) - def _info (self, *args, **kw): - log.info(type(self).__name__ + ": " + str(args[0]), *args[1:], **kw) - def _debug (self, *args, **kw): - log.debug(type(self).__name__ + ": " + str(args[0]), *args[1:], **kw) - - -class TCPServerWorkerBase (IOWorker, LoggerBase): - def __init__ (self, ip = IP_ANY, port = None, - backlog = 5, *args, **kw): - """ - Listens on ip/port and fires _do_accept when there's a connection - """ - #super(TCPServerWorkerBase,self).__init__(*args, **kw) - IOWorker.__init__(self) - - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.socket = s - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - #s.setblocking(0) - if port is None: port = 0 - s.bind((str(IPAddr(ip)), port)) - s.listen(backlog) - - @property - def local_ip (self): - return IPAddr(s.getsockname()[0]) - @property - def local_port (self): - return s.getsockname()[1] - - def _do_accept (self, loop, socket): - """ - Override me - """ - pass - - def _do_recv (self, loop): - s,addr = self.socket.accept() - s.setblocking(0) - - self._do_accept(loop, s) - - def _handle_close (self): - # Just here to kill log message - pass - - -class TCPServerWorker (TCPServerWorkerBase): - def __init__ (self, child_worker_type, ip = IP_ANY, port = None, - child_args = {}, *args, **kw): - """ - Listens on ip/port and creates a child_worker_type for each connnection - """ - super(TCPServerWorker,self).__init__(ip=ip,port=port,*args, **kw) - - self.child_worker_type = child_worker_type - self.child_args = child_args - - def _do_accept (self, loop, socket): - addr = socket.getpeername() - self._debug("accepting %s:%i" % addr) - out = loop.new_worker(socket = socket, - _worker_type = self.child_worker_type, - **self.child_args) - return out - - -class RecocoServerWorker (TCPServerWorker, RecocoIOWorker): - """ - Recoco TCP server worker - """ - pass - - -class PersistentIOWorker (RecocoIOWorker, LoggerBase): - """ - An IOWorker which opens a duplicate of itself when it closes - - Subclasses can add keyword parameters for constructor - """ - - _default_retry_delay = 2 - - def __repr__ (self): - return object.__repr__(self) - - def __init__ (self, **kw): - """ - Initialize - - See _make_connection for arg list. - - callbacks take a single arg -- the worker in question - If the disconnect callback returns False, a new connection will NOT - be opened. - """ - #IOWorker.__init__(self) - - # We pass None in as the socket, because we set it up in a moment in - # _make_connection(). This probably means that it shouldn't be - # a required argument for RecocoIOWorker... - super(PersistentIOWorker,self).__init__(None) - - self.kw = kw - - self._connecting = True - - self._make_connection(**kw) - - def _make_connection (self, loop, addr, port, - reconnect_delay = _default_retry_delay, - connect_callback = None, disconnect_callback = None, **kw): - - self.loop = loop - self.addr = addr #IPAddr(addr) - self.port = port - self.reconnect_delay = reconnect_delay - self.connect_callback = connect_callback - self.disconnect_callback = disconnect_callback - - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.socket = s - s.setblocking(0) - self._debug("Attempting to connect to %s:%s", self.addr, self.port) - r = s.connect_ex((str(self.addr), self.port)) - if r in (0, errno.EINPROGRESS, errno.EAGAIN, 10035): # 10035=WSAEWOULDBLOCK - # We either connected or connection is in progress - pass - else: - #self._error("Couldn't connect to %s:%s", self.addr, self.port) - #raise RuntimeError("Couldn't connect") - core.callLater(self._handle_close) - return - - self.loop.register_worker(self) - - @classmethod - def begin (cls, **kw): - #if len(args) >= 4: - # reconnect_delay = args[3] - #else: - reconnect_delay = kw.get('reconnect_delay', - cls._default_retry_delay) - - try: - w = cls(**kw) - return w - except: - raise - core.callDelayed(reconnect_delay, cls.begin, **kw) - return None - - def open_later (self): - core.callDelayed(self.reconnect_delay, self.begin, **self.kw) - - def _handle_close (self): - self._debug("Disconnected") - super(PersistentIOWorker, self)._handle_close() - if self.disconnect_callback: - if self.disconnect_callback(self) is False: - return - self.open_later() - - def _handle_connect (self): - super(PersistentIOWorker, self)._handle_connect() - if self.connect_callback: - self.connect_callback(self) - - -class BackoffWorker (PersistentIOWorker): - def __init__ (self, **kw): - kw.setdefault('reconnect_delay', 0.5) - self.max_retry_delay = kw.get('max_retry_delay',16) - super(BackoffWorker,self).__init__(**kw) - - def _handle_connect (self): - self.reconnect_delay = 0.5 - super(BackoffWorker, self)._handle_connect() - - def open_later (self): - self.reconnect_delay *= 2 - self.reconnect_delay = int(self.reconnect_delay) - if self.reconnect_delay > self.max_retry_delay: - self.reconnect_delay = self.max_retry_delay - self.kw['reconnect_delay'] = self.reconnect_delay - self._debug("Try again in %s seconds", self.reconnect_delay) - from pox.core import core - core.callDelayed(self.reconnect_delay, self.begin, **self.kw) diff --git a/pox/lib/packet/__init__.py b/pox/lib/packet/__init__.py @@ -1,96 +0,0 @@ -# Copyright 2011,2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -The POX packet library for packet parsing and creation. - -This is based heavily on NOX's packet library, though it has undergone -some signficant change, particularly with regard to making packet -assembly easier. - -Could still use more work. -""" - -# None of this is probably that big, and almost all of it gets loaded -# under most circumstances anyway. Let's just load all of it. -from . import arp as ARP -from . import dhcp as DHCP -from . import dns as DNS -from . import eap as EAP -from . import eapol as EAPOL -from . import ethernet as ETHERNET -from . import ipv4 as IPV4 -from . import ipv6 as IPV6 -from . import icmp as ICMP -from . import icmpv6 as ICMPV6 -from . import lldp as LLDP -from . import tcp as TCP -from . import udp as UDP -from . import vlan as VLAN -from . import mpls as MPLS -from . import llc as LLC - -from .arp import * -from .dhcp import * -from .dns import * -from .eap import * -from .eapol import * -from .ethernet import * -from .ipv6 import * -from .ipv4 import * -from .icmpv6 import * -from .icmp import * -from .lldp import * -from .tcp import * -from .udp import * -from .vlan import * -from .mpls import * -from .llc import * - -__all__ = [ - 'arp', - 'dhcp', - 'dns', - 'eap', - 'eapol', - 'ethernet', - 'ipv4', - 'ipv6', - 'icmp', - 'icmpv6', - 'lldp', - 'tcp', - 'tcp_opt', - 'udp', - 'vlan', - 'mpls', - 'llc', - - 'ARP', - 'DHCP', - 'DNS', - 'EAP', - 'EAPOL', - 'ETHERNET', - 'IPV4', - 'IPV6', - 'ICMP', - 'ICMPV6', - 'LLDP', - 'TCP', - 'UDP', - 'VLAN', - 'MPLS', - 'LLC', -] diff --git a/pox/lib/packet/arp.py b/pox/lib/packet/arp.py @@ -1,177 +0,0 @@ -# Copyright 2020 Cody Lewis -# Copyright 2011 James McCauley -# Copyright 2008 (C) Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is derived from the packet library in NOX, which was -# developed by Nicira, Inc. - -#===================================================================== -# -# 0 1 2 3 -# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Hardware type | Protocol type | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Source hardware address ::: | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Source protocol address ::: | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Destination hardware address ::: | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Destination protocol address ::: | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Data ::: | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# -#===================================================================== -import struct - -from .packet_base import packet_base -from .ipv4 import ipv4 - -from .ethernet import ethernet -from .ethernet import ETHER_ANY -from .ethernet import ETHER_BROADCAST - -from .ipv4 import IP_ANY -from .ipv4 import IP_BROADCAST - -from pox.lib.addresses import IPAddr, EthAddr - -from .packet_utils import * - -class arp (packet_base): - "ARP/RARP packet struct" - - MIN_LEN = 28 - - HW_TYPE_ETHERNET = 1 - PROTO_TYPE_IP = 0x0800 - - # OPCODES - REQUEST = 1 # ARP - REPLY = 2 # ARP - REV_REQUEST = 3 # RARP - REV_REPLY = 4 # RARP - - def __init__(self, raw=None, prev=None, **kw): - packet_base.__init__(self) - - self.prev = prev - - self.hwtype = arp.HW_TYPE_ETHERNET - self.prototype = arp.PROTO_TYPE_IP - self.hwsrc = ETHER_ANY - self.hwdst = ETHER_ANY - self.hwlen = 6 - self.opcode = 0 - self.protolen = 4 - self.protosrc = IP_ANY - self.protodst = IP_ANY - self.next = b'' - - if raw is not None: - self.parse(raw) - - self._init(kw) - - def parse (self, raw): - assert isinstance(raw, bytes) - self.next = None # In case of unfinished parsing - self.raw = raw - dlen = len(raw) - if dlen < arp.MIN_LEN: - self.msg('(arp parse) warning IP packet data too short to parse header: data len %u' % dlen) - return - - (self.hwtype, self.prototype, self.hwlen, self.protolen,self.opcode) =\ - struct.unpack('!HHBBH', raw[:8]) - - if self.hwtype != arp.HW_TYPE_ETHERNET: - self.msg('(arp parse) hw type unknown %u' % self.hwtype) - return - if self.hwlen != 6: - self.msg('(arp parse) unknown hw len %u' % self.hwlen) - return - else: - self.hwsrc = EthAddr(raw[8:14]) - self.hwdst = EthAddr(raw[18:24]) - if self.prototype != arp.PROTO_TYPE_IP: - self.msg('(arp parse) proto type unknown %u' % self.prototype) - return - if self.protolen != 4: - self.msg('(arp parse) unknown proto len %u' % self.protolen) - return - else: - self.protosrc = IPAddr(struct.unpack('!I',raw[14:18])[0]) - self.protodst = IPAddr(struct.unpack('!I',raw[24:28])[0]) - - self.next = raw[28:] - self.parsed = True - - def hdr(self, payload): - buf = struct.pack('!HHBBH', self.hwtype, self.prototype, - self.hwlen, self.protolen,self.opcode) - if type(self.hwsrc) == bytes: - buf += self.hwsrc - else: - buf += self.hwsrc.toRaw() - if type(self.protosrc) is IPAddr: - buf += struct.pack('!I',self.protosrc.toUnsigned()) - else: - buf += struct.pack('!I',self.protosrc) - if type(self.hwdst) == bytes: - buf += self.hwdst - else: - buf += self.hwdst.toRaw() - if type(self.protodst) is IPAddr: - buf += struct.pack('!I',self.protodst.toUnsigned()) - else: - buf += struct.pack('!I',self.protodst) - return buf - - def _to_str(self): - op = str(self.opcode) - - eth_type = None - # Ethernet - if hasattr(self.prev, 'type'): - eth_type = self.prev.type - # Vlan - elif hasattr(self.prev, 'eth_type'): - eth_type = self.prev.eth_type - else: - self.err('(arp) unknown datalink type') - eth_type = ethernet.ARP_TYPE - - if eth_type == ethernet.ARP_TYPE: - if self.opcode == arp.REQUEST: - op = "REQUEST" - elif self.opcode == arp.REPLY: - op = "REPLY" - elif eth_type == ethernet.RARP_TYPE: - if self.opcode == arp.REV_REQUEST: - op = "REV_REQUEST" - elif self.opcode == arp.REV_REPLY: - op = "REV_REPLY" - - s = "[ARP {0} hw:{1} p:{2} {3}>{4} {5}>{6}]".format(op, - self.hwtype, - self.prototype, - EthAddr(self.hwsrc), - EthAddr(self.hwdst), - IPAddr(self.protosrc), - IPAddr(self.protodst)) - return s diff --git a/pox/lib/packet/dhcp.py b/pox/lib/packet/dhcp.py @@ -1,599 +0,0 @@ -# Copyright 2020 Cody Lewis -# Copyright 2011,2013 James McCauley -# Copyright 2008 (C) Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is derived from the packet library in NOX, which was -# developed by Nicira, Inc. - -#====================================================================== -# -# DHCP Message Format -# -# 0 1 2 3 -# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | op (1) | htype (1) | hlen (1) | hops (1) | -# +---------------+---------------+---------------+---------------+ -# | xid (4) | -# +-------------------------------+-------------------------------+ -# | secs (2) | flags (2) | -# +-------------------------------+-------------------------------+ -# | ciaddr (4) | -# +---------------------------------------------------------------+ -# | yiaddr (4) | -# +---------------------------------------------------------------+ -# | siaddr (4) | -# +---------------------------------------------------------------+ -# | giaddr (4) | -# +---------------------------------------------------------------+ -# | | -# | chaddr (16) | -# | | -# | | -# +---------------------------------------------------------------+ -# | | -# | sname (64) | -# +---------------------------------------------------------------+ -# | | -# | file (128) | -# +---------------------------------------------------------------+ -# | | -# | options (variable) | -# +---------------------------------------------------------------+ -# -#====================================================================== -import struct -import string -from .packet_utils import * - -from .packet_base import packet_base -import pox.lib.util as util -from pox.lib.util import is_subclass -from pox.lib.addresses import * - -_dhcp_option_unpackers = {} - - -class dhcp(packet_base): - "DHCP Packet struct" - - STRUCT_BOUNDARY = 28 - MIN_LEN = 240 - - SERVER_PORT = 67 - CLIENT_PORT = 68 - - BROADCAST_FLAG = 0x8000 - - BOOTREQUEST = 1 - BOOTREPLY = 2 - - MSG_TYPE_OPT = 53 - NUM_MSG_TYPES = 8 - DISCOVER_MSG = 1 - OFFER_MSG = 2 - REQUEST_MSG = 3 - DECLINE_MSG = 4 - ACK_MSG = 5 - NAK_MSG = 6 - RELEASE_MSG = 7 - INFORM_MSG = 8 - - SUBNET_MASK_OPT = 1 - GATEWAY_OPT = 3 - ROUTERS_OPT = 3 # Synonym for above - TIME_SERVERS_OPT = 4 - DNS_SERVER_OPT = 6 - HOST_NAME_OPT = 12 - DOMAIN_NAME_OPT = 15 - MTU_OPT = 26 - BCAST_ADDR_OPT = 28 - - VENDOR_OPT = 43 - - REQUEST_IP_OPT = 50 - REQUEST_LEASE_OPT = 51 - OVERLOAD_OPT = 52 - SERVER_ID_OPT = 54 - PARAM_REQ_OPT = 55 - ERROR_MSG_OPT = 56 - T1_OPT = 58 - T2_OPT = 59 - CLIENT_ID_OPT = 61 - PAD_OPT = 0 - END_OPT = 255 - - MAGIC = b'\x63\x82\x53\x63' - - def __init__(self, raw=None, prev=None, **kw): - packet_base.__init__(self) - - self.prev = prev - - self.op = 0 - self.htype = 0 - self.hlen = 0 - self.hops = 0 - self.xid = 0 - self.secs = 0 - self.flags = 0 - self.ciaddr = IP_ANY - self.yiaddr = IP_ANY - self.siaddr = IP_ANY - self.giaddr = IP_ANY - self.chaddr = None - self.sname = b'' - self.file = b'' - self.magic = self.MAGIC - self._raw_options = b'' - - if raw is not None: - self.parse(raw) - else: - self.options = util.DirtyDict() - - self._init(kw) - - def _to_str(self): - s = '[DHCP op:'+str(self.op) - s += ' htype:'+str(self.htype) - s += ' hlen:'+str(self.hlen) - s += ' hops:'+str(self.hops) - s += ' xid:'+str(self.xid) - s += ' secs:'+str(self.secs) - s += ' flags:'+str(self.flags) - s += ' ciaddr:'+str(self.ciaddr) - s += ' yiaddr:'+str(self.yiaddr) - s += ' siaddr:'+str(self.siaddr) - s += ' giaddr:'+str(self.giaddr) - s += ' chaddr:' - if isinstance(self.chaddr, EthAddr): - s += str(self.chaddr) - elif self.chaddr is not None: - s += ' '.join(["{0:02x}".format(x) for x in self.chaddr]) - s += ' magic:'+' '.join( - ["{0:02x}".format(ord(x)) for x in self.magic]) - #s += ' options:'+' '.join(["{0:02x}".format(ord(x)) for x in - # self._raw_options]) - if len(self.options): - s += ' options:' - s += ','.join(repr(x) for x in list(self.options.values())) - s += ']' - return s - - def parse(self, raw): - assert isinstance(raw, bytes) - self.raw = raw - dlen = len(raw) - if dlen < dhcp.MIN_LEN: - self.msg('(dhcp parse) warning DHCP packet data too short ' + - 'to parse header: data len %u' % (dlen,)) - return None - - (self.op, self.htype, self.hlen, self.hops, self.xid,self.secs, - self.flags, self.ciaddr, self.yiaddr, self.siaddr, - self.giaddr) = struct.unpack('!BBBBIHHIIII', raw[:28]) - - self.ciaddr = IPAddr(self.ciaddr) - self.yiaddr = IPAddr(self.yiaddr) - self.siaddr = IPAddr(self.siaddr) - self.giaddr = IPAddr(self.giaddr) - - self.chaddr = raw[28:44] - if self.hlen == 6: - # Assume chaddr is ethernet - self.chaddr = EthAddr(self.chaddr[:6]) - self.sname = raw[44:108] - self.file = raw[102:236] - self.magic = raw[236:240] - - self.hdr_len = dlen - self.parsed = True - - if self.hlen > 16: - self.warn('(dhcp parse) DHCP hlen %u too long' % (self.hlen),) - return - - for i in range(4): - if dhcp.MAGIC[i] != self.magic[i]: - self.warn('(dhcp parse) bad DHCP magic value %s' % - str(self.magic)) - return - - self._raw_options = raw[240:] - self.parseOptions() - self.unpackOptions() - self.parsed = True - - def unpackOptions(self): - for k,v in list(self.options.items()): - unpack = _dhcp_option_unpackers.get(k, DHCPRawOption.unpack) - try: - self.options[k] = unpack(v,k) - except Exception as e: - self.warn("(dhcp parse) bad option %s: %s" % (k,e)) - #import traceback - #traceback.print_exc() - self.options[k] = DHCPRawOption.unpack(v,k,True) - - def parseOptions(self): - self.options = util.DirtyDict() - self.parseOptionSegment(self._raw_options) - if dhcp.OVERLOAD_OPT in self.options: - opt_val = self.options[dhcp.OVERLOAD_OPT] - if len(opt_val) != 1: - self.warn('DHCP overload option has bad len %u' % - (len(opt_val),)) - return - if opt_val == 1 or opt_val == 3: - self.parseOptionSegment(self.file) - if opt_val == 2 or opt_val == 3: - self.parseOptionSegment(self.sname) - - def parseOptionSegment(self, barr): - ofs = 0; - l = len(barr) - while ofs < l: - opt = ord(barr[ofs]) - if opt == dhcp.END_OPT: - return - ofs += 1 - if opt == dhcp.PAD_OPT: - continue - if ofs >= l: - self.warn('DHCP option ofs extends past segment') - return - opt_len = ord(barr[ofs]) - ofs += 1 # Account for the length octet - if ofs + opt_len > l: - return False - if opt in self.options: - # Append option, per RFC 3396 - self.options[opt] += barr[ofs:ofs+opt_len] - else: - self.options[opt] = barr[ofs:ofs+opt_len] - ofs += opt_len - self.warn('DHCP end of option segment before END option') - - def packOptions (self): - o = b'' - def addPart (k, v): - o = b'' - o += chr(k) - o += chr(len(v)) - o += bytes(v) - if len(o) & 1: # Length is not even - o += chr(dhcp.PAD_OPT) - return o - - for k,v in self.options.items(): - if k == dhcp.END_OPT: continue - if k == dhcp.PAD_OPT: continue - if isinstance(v, DHCPOption): - v = v.pack() - if isinstance(v, bytes) and (len(v) > 255): - # Long option, per RFC 3396 - v = [v[i:i+255] for i in range(0, len(v), 255)] - if isinstance(v, list): # Better way to tell? - for part in v: - o += addPart(k, part) - else: - o += addPart(k, v) - o += chr(dhcp.END_OPT) - self._raw_options = o - - if isinstance(self.options, util.DirtyDict): - self.options.dirty = False - - def add_option(self, option, code=None): - if code is None: - code = option.CODE - self.options[code] = option - - def hdr(self, payload): - if isinstance(self.options, util.DirtyDict): - if self.options.dirty: - self.packOptions() - else: - self.packOptions() - - if isinstance(self.chaddr, EthAddr): - chaddr = self.chaddr.toRaw() + (b'\x00' * 10) - fmt = '!BBBBIHHiiii16s64s128s4s' - return struct.pack(fmt, self.op, self.htype, self.hlen, - self.hops, self.xid, self.secs, self.flags, - IPAddr(self.ciaddr).toSigned(), - IPAddr(self.yiaddr).toSigned(), - IPAddr(self.siaddr).toSigned(), - IPAddr(self.giaddr).toSigned(), - chaddr, self.sname, self.file, - self.magic) + self._raw_options - - def appendRawOption (self, code, val = None, length = None): - """ - In general, a much better way to add options should just be - to add them to the .options dictionary. - """ - - self._raw_options += chr(code) - if length is None: - if val is None: - return - length = len(val) - self._raw_options += chr(length) - self._raw_options += val - - -def dhcp_option_def (msg_type): - """ - DPCP Option decorator - """ - def f (cls): - _dhcp_option_unpackers[msg_type] = cls.unpack - cls.CODE = msg_type - return cls - return f - -class DHCPOption (object): - CODE = None - - @classmethod - def unpack (cls, data, code = None): - pass - - def pack (self): - return b'' - - @property - def _name (self): - n = type(self).__name__ - if n.startswith("DHCP"): n = n[4:] - if n.endswith("Option"): n = n[:-6] - if n == "": return "Option" - return n - -class DHCPRawOption (DHCPOption): - def __init__ (self, data = b'', bad = False): - self.data = data - self.bad = bad # True if option wasn't parsed right - - @classmethod - def unpack (cls, data, code = None, bad = False): - self = cls() - self.data = data - self.bad = bad - self.CODE = code - return self - - def pack (self): - return self.data - - def __repr__ (self): - data = self.data - if not all(ord(c)<127 and c in string.printable for c in data): - data = " ".join("%02x" % (ord(x),) for x in data) - else: - data = "".join(x if ord(x) >= 32 else "." for x in data) - if len(data) > 30: - data = data[:30] + "..." - n = self._name - if n == 'Raw': n += str(self.CODE) - return "%s(%s)" % (n, data) - -class DHCPIPOptionBase (DHCPOption): - """ - Superclass for options which are an IP address - """ - def __init__ (self, addr = None): - self.addr = IPAddr(0) if addr is None else IPAddr(addr) - - @classmethod - def unpack (cls, data, code = None): - self = cls() - if len(data) != 4: raise RuntimeError("Bad option length") - self.addr = IPAddr(data) - return self - - def pack (self): - return self.addr.toRaw() - - def __repr__ (self): - return "%s(%s)" % (self._name, self.addr) - -class DHCPIPsOptionBase (DHCPOption): - """ - Superclass for options which are a list of IP addresses - """ - def __init__ (self, addrs=[]): - if isinstance(addrs, (str,IPAddr)): - self.addrs = [IPAddr(addrs)] - else: - self.addrs = [IPAddr(a) for a in addrs] - - @classmethod - def unpack (cls, data, code = None): - self = cls() - if (len(data) % 4) != 0: raise RuntimeError("Bad option length") - while len(data): - self.addrs.append(IPAddr(data[:4])) - data = data[4:] - return self - - def pack (self): - r = b'' - for addr in self.addrs: - r += addr.toRaw() - return r - - @property - def addr (self): - if len(self.addrs) == 0: return None - return self.addrs[0] - - def __repr__ (self): - return "%s(%s)" % (self._name, self.addrs) - -class DHCPSecondsOptionBase (DHCPOption): - """ - Superclass for options which are a number of seconds as 4 bytes - """ - def __init__ (self, seconds = None): - self.seconds = seconds - - @classmethod - def unpack (cls, data, code = None): - self = cls() - if len(data) != 4: raise RuntimeError("Bad option length") - self.seconds, = struct.unpack('!I', data) - return self - - def pack (self): - return struct.pack('!I', self.seconds) - - def __repr__ (self): - return "%s(%s)" % (self._name, self.seconds) - -@dhcp_option_def(dhcp.MSG_TYPE_OPT) -class DHCPMsgTypeOption (DHCPOption): - def __init__ (self, type=None): - self.type = type - - @classmethod - def unpack (cls, data, code = None): - self = cls() - if len(data) != 1: raise RuntimeError("Bad option length") - self.type = ord(data[0]) - return self - - def pack (self): - return chr(self.type) - - def __repr__ (self): - t = { - 1:'DISCOVER', - 2:'OFFER', - 3:'REQUEST', - 4:'DECLINE', - 5:'ACK', - 6:'NAK', - 7:'RELEASE', - 8:'INFORM', - }.get(self.type, "TYPE"+str(self.type)) - return "%s(%s)" % (self._name, t) - -@dhcp_option_def(dhcp.SUBNET_MASK_OPT) -class DHCPSubnetMaskOption (DHCPIPOptionBase): - pass - -@dhcp_option_def(dhcp.ROUTERS_OPT) -class DHCPRoutersOption (DHCPIPsOptionBase): - pass - -@dhcp_option_def(dhcp.TIME_SERVERS_OPT) -class DHCPTimeServersOption (DHCPIPsOptionBase): - pass - -@dhcp_option_def(dhcp.DNS_SERVER_OPT) -class DHCPDNSServersOption (DHCPIPsOptionBase): - pass - -@dhcp_option_def(dhcp.HOST_NAME_OPT) -class DHCPHostNameOption (DHCPRawOption): - pass - -@dhcp_option_def(dhcp.DOMAIN_NAME_OPT) -class DHCPDomainNameOption (DHCPRawOption): - pass - -@dhcp_option_def(dhcp.BCAST_ADDR_OPT) -class DHCPBroadcastAddressOption (DHCPIPOptionBase): - pass - -@dhcp_option_def(dhcp.VENDOR_OPT) -class DHCPVendorOption (DHCPRawOption): - pass - -@dhcp_option_def(dhcp.REQUEST_IP_OPT) -class DHCPRequestIPOption (DHCPIPOptionBase): - pass - -@dhcp_option_def(dhcp.REQUEST_LEASE_OPT) -class DHCPIPAddressLeaseTimeOption (DHCPSecondsOptionBase): - pass - -@dhcp_option_def(dhcp.OVERLOAD_OPT) -class DHCPOptionOverloadOption (DHCPOption): - def __init__ (self, value = None): - self.value = value - - @classmethod - def unpack (cls, data, code = None): - self = cls() - if len(data) != 1: raise RuntimeError("Bad option length") - self.value = ord(data[0]) - return self - - def pack (self): - return chr(self.value) - - def __repr__ (self): - return "%s(%s)" % (self._name, self.value) - -@dhcp_option_def(dhcp.SERVER_ID_OPT) -class DHCPServerIdentifierOption (DHCPIPOptionBase): - pass - -@dhcp_option_def(dhcp.ERROR_MSG_OPT) -class DHCPErrorMessageOption (DHCPRawOption): - pass - -@dhcp_option_def(dhcp.T1_OPT) -class DHCPRenewalTimeOption (DHCPSecondsOptionBase): - pass - -@dhcp_option_def(dhcp.T2_OPT) -class DHCPRebindingTimeOption (DHCPSecondsOptionBase): - pass - -@dhcp_option_def(dhcp.PARAM_REQ_OPT) -class DHCPParameterRequestOption (DHCPOption): - def __init__ (self, options = []): - self.options = options - - @classmethod - def unpack (cls, data, code = None): - self = cls() - self.options = [ord(x) for x in data] - return self - - def pack (self): - opt = ((o.CODE if is_subclass(o, DHCPOption) else o) for o in self.options) - return b''.join(chr(x) for x in opt) - - def __repr__ (self): - names = [] - for o in sorted(self.options): - n = _dhcp_option_unpackers.get(o) - if n is None or not hasattr(n, 'im_self'): - n = "Opt/" + str(o) - else: - n = n.__self__.__name__ - if n.startswith("DHCP"): n = n[4:] - if n.endswith("Option"): n = n[:-6] - if n == "": n = "Opt" - n += '/' + str(o) - names.append(n) - - return "%s(%s)" % (self._name, " ".join(names)) diff --git a/pox/lib/packet/dns.py b/pox/lib/packet/dns.py @@ -1,530 +0,0 @@ -# Copyright 2011,2012 James McCauley -# Copyright 2008 (C) Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is derived from the packet library in NOX, which was -# developed by Nicira, Inc. - -#====================================================================== -# -# DNS Message Format -# -# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | ID | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# |QR| Opcode |AA|TC|RD|RA|Z |AD|CD| RCODE | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | Total Questions | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | Total Answerrs | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | Total Authority RRs | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | Total Additional RRs | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | Questions ... | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | Answer RRs ... | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | Authority RRs.. | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | Additional RRs. | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# -# Question format: -# -# 1 1 1 1 1 1 -# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | | -# / QNAME / -# / / -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | QTYPE | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | QCLASS | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# -# -# -# All RRs have the following format: -# 1 1 1 1 1 1 -# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | | -# / / -# / NAME / -# | | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | TYPE | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | CLASS | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | TTL | -# | | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | RDLENGTH | -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--| -# / RDATA / -# / / -# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# -# -#====================================================================== - -# TODO: -# SOA data -# General cleaup/rewrite (code is/has gotten pretty bad) - -import struct -from .packet_utils import * -from .packet_utils import TruncatedException as Trunc - -from .packet_base import packet_base - -from pox.lib.addresses import IPAddr,IPAddr6,EthAddr - -rrtype_to_str = { - 1: "A", # host address - 2: "NS", #an authoritative name server - 3: "MD", # a mail destination (Obsolete - use MX) - 4: "MF", # a mail forwarder (Obsolete - use MX) - 5: "CNAME", # the canonical name for an alias - 6: "SOA", # marks the start of a zone of authority - 7: "MB" , # a mailbox domain name (EXPERIMENTAL) - 8: "MG" , # a mail group member (EXPERIMENTAL) - 9: "MR" , # a mail rename domain name (EXPERIMENTAL) - 10: "NULL" , # a null RR (EXPERIMENTAL) - 11: "WKS" , # a well known service description - 12: "PTR" , # a domain name pointer - 13: "HINFO", # host information - 14: "MINFO", # mailbox or mail list information - 15: "MX" , # mail exchange - 16: "TXT", # text strings - 28: "AAAA" # IPV6 address request -} - -rrclass_to_str = { - 1 :"IN", # 1 the Internet - 2 :"CS", # 2 the CSNET class (Obsolete) - 3 :"CH", # 3 the CHAOS class - 4 :"HS", # 4 Hesiod [Dyer 87] - 255 :"* " # 255 any class -} - - -class dns(packet_base): - "DNS Packet struct" - - MDNS_ADDRESS = IPAddr('224.0.0.251') - MDNS6_ADDRESS = IPAddr6('ff02::fb') - MDNS_ETH = EthAddr('01:00:5E:00:00:fb') - MDNS6_ETH = EthAddr('33:33:00:00:00:fb') - - SERVER_PORT = 53 - MDNS_PORT = 5353 - MIN_LEN = 12 - - def __init__(self, raw=None, prev=None, **kw): - packet_base.__init__(self) - - self.prev = prev - - self.questions = [] - self.answers = [] - self.authorities = [] - self.additional = [] - - self.id = 0 - self.qr = False # Is Query - self.opcode = 0 - self.aa = False # Authoritative Answer - self.tc = False # Truncated - self.rd = False # Recursion Desired - self.ra = False # Recursion Available - self.z = False - self.ad = False - self.cd = False - self.rcode = 0 - # TODO: everything else here - - if raw is not None: - self.parse(raw) - - self._init(kw) - - def _exc (self, e, part = None): - """ - Turn exception into log message - """ - msg = "(dns)" - if part is not None: - msg += " " + part - msg += ": " - msg += str(e) - if isinstance(e, Trunc): - self.msg(msg) - else: - self.err(msg) - - def hdr (self, payload): - bits0 = 0 - if self.qr: bits0 |= 0x80 - bits0 |= (self.opcode & 0x7) << 4 - if self.rd: bits0 |= 1 - if self.tc: bits0 |= 2 - if self.aa: bits0 |= 4 - bits1 = 0 - if self.ra: bits1 |= 0x80 - if self.z: bits1 |= 0x40 - if self.ad: bits1 |= 0x20 - if self.cd: bits1 |= 0x10 - bits1 |= (self.rcode & 0xf) - - s = struct.pack("!HBBHHHH", self.id, bits0, bits1, - len(self.questions), len(self.answers), - len(self.authorities), len(self.additional)) - - def makeName (labels, term): - o = '' #TODO: unicode - for l in labels.split('.'): - o += chr(len(l)) - o += l - if term: o += '\x00' - return o - - name_map = {} - - def putName (s, name): - pre = '' - post = name - while True: - at = s.find(makeName(post, True)) - if at == -1: - if post in name_map: - at = name_map[post] - if at == -1: - post = post.split('.', 1) - if pre: pre += '.' - pre += post[0] - if len(post) == 1: - if len(pre) == 0: - s += '\x00' - else: - name_map[name] = len(s) - s += makeName(pre, True) - break - post = post[1] - else: - if len(pre) > 0: - name_map[name] = len(s) - s += makeName(pre, False) - s += struct.pack("!H", at | 0xc000) - break - return s - - def putData (s, r): - if r.qtype in (2,12,5,15): # NS, PTR, CNAME, MX - return putName(s, r.rddata) - elif r.qtype == 1: # A - assert isinstance(r.rddata, IPAddr) - return s + r.rddata.raw - elif r.qtype == 28: # AAAA - assert isinstance(r.rddata, IPAddr6) - return s + r.rddata.raw - else: - return s + r.rddata - - for r in self.questions: - s = putName(s, r.name) - s += struct.pack("!HH", r.qtype, r.qclass) - - rest = self.answers + self.authorities + self.additional - for r in rest: - s = putName(s, r.name) - s += struct.pack("!HHIH", r.qtype, r.qclass, r.ttl, 0) - fixup = len(s) - 2 - s = putData(s, r) - fixlen = len(s) - fixup - 2 - s = s[:fixup] + struct.pack('!H', fixlen) + s[fixup+2:] - - return s - - def parse(self, raw): - assert isinstance(raw, bytes) - self.raw = raw - dlen = len(raw) - if dlen < dns.MIN_LEN: - self.msg('(dns) packet data too short to ' - + 'parse header: data len %u' % (dlen,)) - return None - - bits0 = 0 - bits1 = 0 - total_questions = 0 - total_answers = 0 - total_auth_rr = 0 - total_add_rr = 0 - (self.id, bits0,bits1, total_questions, total_answers, - total_auth_rr, total_add_rr)\ - = struct.unpack('!HBBHHHH', raw[:12]) - - self.qr = True if (bits0 & 0x80) else False - self.opcode = (bits0 >> 4) & (0x07) - self.aa = True if (bits0 & (0x04)) else False - self.tc = True if (bits0 & (0x02)) else False - self.rd = True if (bits0 & (0x01)) else False - self.ra = True if (bits1 & 0x80) else False - self.z = True if (bits1 & 0x40) else False - self.ad = True if (bits1 & 0x20) else False - self.cd = True if (bits1 & 0x10) else False - self.rcode = bits1 & 0x0f - - query_head = 12 - - # questions - for i in range(0,total_questions): - try: - query_head = self.next_question(raw, query_head) - except Exception as e: - self._exc(e, 'parsing questions') - return None - - # answers - for i in range(0,total_answers): - try: - query_head = self.next_rr(raw, query_head, self.answers) - except Exception as e: - self._exc(e, 'parsing answers') - return None - - # authoritative name servers - for i in range(0,total_auth_rr): - try: - query_head = self.next_rr(raw, query_head, self.authorities) - except Exception as e: - self._exc(e, 'parsing authoritative name servers') - return None - - # additional resource records - for i in range(0,total_add_rr): - try: - query_head = self.next_rr(raw, query_head, self.additional) - except Exception as e: - self._exc(e, 'parsing additional resource records') - return None - - self.parsed = True - - def _to_str(self): - flags = "|" - - if self.qr != 0: - flags += "QR " - if self.tc != 0: - flags += "TR " - if self.rd != 0: - flags += "RD " - if self.ra != 0: - flags += "RA " - if self.z != 0: - flags += "Z " - - flags += "|" - - s = "(id:%x fl:%s op:%d nq:%d na:%d nath:%d nadd:%d)" % (self.id, - flags, self.opcode, len(self.questions), len(self.answers), - len(self.authorities), len(self.additional)) - - if len(self.questions) > 0: - for q in self.questions: - s += "(q? "+str(q)+")" - - if len(self.answers) > 0: - for a in self.answers: - s += "(answ: "+str(a)+")" - - if len(self.authorities) > 0: - for a in self.authorities: - s += "(auth: "+str(a)+")" - - if len(self.additional) > 0: - for a in self.additional: - s += "(add: "+str(a)+")" - - return s - - # Utility methods for parsing. Generally these would be pulled out - # into a separate class. However, because the lengths are not known - # until the fields have been parsed, it is more convenient to keep - # them in the DNS class - - @classmethod - def _read_dns_name_from_index(cls, l, index, retlist): - try: - while True: - chunk_size = ord(l[index]) - - # check whether we have an internal pointer - if (chunk_size & 0xc0) == 0xc0: - # pull out offset from last 14 bits - offset = ((ord(l[index]) & 0x3) << 8 ) | ord(l[index+1]) - cls._read_dns_name_from_index(l, offset, retlist) - index += 1 - break - if chunk_size == 0: - break - index += 1 - retlist.append(l[index : index + chunk_size]) - index += chunk_size - return index - except IndexError: - raise Trunc("incomplete name") - - @classmethod - def read_dns_name_from_index(cls, l, index): - retlist = [] - next = cls._read_dns_name_from_index(l, index, retlist) - return (next + 1, ".".join(retlist)) - - def next_rr(self, l, index, rr_list): - array_len = len(l) - - # verify whether name is offset within packet - if index > array_len: - raise Trunc("next_rr: name truncated") - - index,name = self.read_dns_name_from_index(l, index) - - if index + 10 > array_len: - raise Trunc("next_rr: truncated") - - (qtype,qclass,ttl,rdlen) = struct.unpack('!HHIH', l[index:index+10]) - if index+10+rdlen > array_len: - raise Trunc("next_rr: data truncated") - - rddata = self.get_rddata(l, qtype, rdlen, index + 10) - rr_list.append(dns.rr(name, qtype, qclass,ttl,rdlen,rddata)) - - return index + 10 + rdlen - - def get_rddata(self, l, type, dlen, beg_index): - if beg_index + dlen > len(l): - raise Trunc('(dns) truncated rdata') - # A - if type == 1: - if dlen != 4: - raise Exception('(dns) invalid a data size',system='packet') - return IPAddr(l[beg_index : beg_index + 4]) - # AAAA - elif type == 28: - if dlen != 16: - raise Exception('(dns) invalid a data size',system='packet') - return IPAddr6.from_raw(l[beg_index : beg_index + dlen]) - # NS - elif type == 2: - return self.read_dns_name_from_index(l, beg_index)[1] - # PTR - elif type == 12: - return self.read_dns_name_from_index(l, beg_index)[1] - # CNAME - elif type == 5: - return self.read_dns_name_from_index(l, beg_index)[1] - # MX - elif type == 15: - #TODO: Save priority (don't just jump past it) - return self.read_dns_name_from_index(l, beg_index + 2)[1] - else: - return l[beg_index : beg_index + dlen] - - def next_question(self, l, index): - array_len = len(l) - - index,name = self.read_dns_name_from_index(l, index) - - if index + 4 > array_len: - raise Trunc("next_question: truncated") - - (qtype,qclass) = struct.unpack('!HH', l[index:index+4]) - self.questions.append(dns.question(name, qtype, qclass)) - return index + 4 - - # Utility classes for questions and RRs - - class question: - - def __init__(self, name, qtype, qclass): - self.name = name - self.qtype = qtype - self.qclass = qclass - - def __str__(self): - s = self.name - if self.qtype in rrtype_to_str: - s += " " + rrtype_to_str[self.qtype] - else: - s += " ??? " - if self.qclass in rrclass_to_str: - s += " " + rrclass_to_str[self.qclass] - else: - s += " ??? " - - return s - - class rr (object): - A_TYPE = 1 - NS_TYPE = 2 - MD_TYPE = 3 - MF_TYPE = 4 - CNAME_TYPE = 5 - SOA_TYPE = 6 - MB_TYPE = 7 - MG_TYPE = 8 - MR_TYPE = 9 - NULL_TYPE = 10 - WKS_TYPE = 11 - PTR_TYPE = 12 - HINFO_TYPE = 13 - MINFO_TYPE = 14 - MX_TYPE = 15 - TXT_TYPE = 16 - AAAA_TYPE = 28 - - def __init__ (self, _name, _qtype, _qclass, _ttl, _rdlen, _rddata): - self.name = _name - self.qtype = _qtype - self.qclass = _qclass - self.ttl = _ttl - self.rdlen = _rdlen - self.rddata = _rddata - - def __str__ (self): - s = self.name - if self.qtype in rrtype_to_str: - s += " " + rrtype_to_str[self.qtype] - else: - s += " ??? " - if self.qclass in rrclass_to_str: - s += " " + rrclass_to_str[self.qclass] - else: - s += " ??? " - s += " ttl:"+str(self.ttl) - s += " rdlen:"+str(self.rdlen) - s += " datalen:" + str(len(self.rddata)) - if len(self.rddata) == 4: - #FIXME: can be smarter about whether this is an IP - s+= " data:" + str(IPAddr(self.rddata)) - - return s diff --git a/pox/lib/packet/ethernet.py b/pox/lib/packet/ethernet.py @@ -1,179 +0,0 @@ -# Copyright 2011,2012,2013 James McCauley -# Copyright 2008 (C) Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is derived from the packet library in NOX, which was -# developed by Nicira, Inc. - -#====================================================================== -# Ethernet header -# -#====================================================================== - -import struct - -from .packet_base import packet_base -from .packet_utils import ethtype_to_str - -from pox.lib.addresses import * - -ETHER_ANY = EthAddr(b"\x00\x00\x00\x00\x00\x00") -ETHER_BROADCAST = EthAddr(b"\xff\xff\xff\xff\xff\xff") -BRIDGE_GROUP_ADDRESS = EthAddr(b"\x01\x80\xC2\x00\x00\x00") -LLDP_MULTICAST = EthAddr(b"\x01\x80\xc2\x00\x00\x0e") -PAE_MULTICAST = EthAddr(b'\x01\x80\xc2\x00\x00\x03') # 802.1x Port - # Access Entity -NDP_MULTICAST = EthAddr(b'\x01\x23\x20\x00\x00\x01') # Nicira discovery - # multicast - -class ethernet(packet_base): - "Ethernet packet struct" - - resolve_names = False - - MIN_LEN = 14 - - IP_TYPE = 0x0800 - ARP_TYPE = 0x0806 - RARP_TYPE = 0x8035 - VLAN_TYPE = 0x8100 - LLDP_TYPE = 0x88cc - PAE_TYPE = 0x888e # 802.1x Port Access Entity - #MPLS_UNICAST_TYPE = 0x8847 - #MPLS_MULTICAST_TYPE = 0x8848 - MPLS_TYPE = 0x8847 - MPLS_MC_TYPE = 0x8848 # Multicast - IPV6_TYPE = 0x86dd - PPP_TYPE = 0x880b - LWAPP_TYPE = 0x88bb - GSMP_TYPE = 0x880c - IPX_TYPE = 0x8137 - IPX_TYPE = 0x8137 - WOL_TYPE = 0x0842 - TRILL_TYPE = 0x22f3 - JUMBO_TYPE = 0x8870 - SCSI_TYPE = 0x889a - ATA_TYPE = 0x88a2 - QINQ_TYPE = 0x9100 - - INVALID_TYPE = 0xffff - - type_parsers = {} - - def __init__(self, raw=None, prev=None, **kw): - packet_base.__init__(self) - - if len(ethernet.type_parsers) == 0: - from .vlan import vlan - ethernet.type_parsers[ethernet.VLAN_TYPE] = vlan - from .arp import arp - ethernet.type_parsers[ethernet.ARP_TYPE] = arp - ethernet.type_parsers[ethernet.RARP_TYPE] = arp - from .ipv4 import ipv4 - ethernet.type_parsers[ethernet.IP_TYPE] = ipv4 - from .ipv6 import ipv6 - ethernet.type_parsers[ethernet.IPV6_TYPE] = ipv6 - from .lldp import lldp - ethernet.type_parsers[ethernet.LLDP_TYPE] = lldp - from .eapol import eapol - ethernet.type_parsers[ethernet.PAE_TYPE] = eapol - from .mpls import mpls - ethernet.type_parsers[ethernet.MPLS_TYPE] = mpls - ethernet.type_parsers[ethernet.MPLS_MC_TYPE] = mpls - from .llc import llc - ethernet._llc = llc - - self.prev = prev - - self.dst = ETHER_ANY - self.src = ETHER_ANY - - self.type = 0 - self.next = b'' - - if raw is not None: - self.parse(raw) - - self._init(kw) - - def parse (self, raw): - assert isinstance(raw, bytes) - self.next = None # In case of unfinished parsing - self.raw = raw - alen = len(raw) - if alen < ethernet.MIN_LEN: - self.msg('warning eth packet data too short to parse header: data len %u' - % (alen,)) - return - - self.dst = EthAddr(raw[:6]) - self.src = EthAddr(raw[6:12]) - self.type = struct.unpack('!H', raw[12:ethernet.MIN_LEN])[0] - - self.hdr_len = ethernet.MIN_LEN - self.payload_len = alen - self.hdr_len - - self.next = ethernet.parse_next(self, self.type, raw, ethernet.MIN_LEN) - self.parsed = True - - @staticmethod - def parse_next (prev, typelen, raw, offset=0, allow_llc=True): - parser = ethernet.type_parsers.get(typelen) - if parser is not None: - return parser(raw[offset:], prev) - elif typelen < 1536 and allow_llc: - return ethernet._llc(raw[offset:], prev) - else: - return raw[offset:] - - @staticmethod - def getNameForType (ethertype): - """ Returns a string name for a numeric ethertype """ - return ethtype_to_str(ethertype) - - @property - def effective_ethertype (self): - return self._get_effective_ethertype(self) - - @staticmethod - def _get_effective_ethertype (self): - """ - Get the "effective" ethertype of a packet. - - This means that if the payload is something like a VLAN or SNAP header, - we want the type from that deeper header. This is kind of ugly here in - the packet library, but it should make user code somewhat simpler. - """ - if not self.parsed: - return ethernet.INVALID_TYPE - if self.type == ethernet.VLAN_TYPE or type(self.payload) == ethernet._llc: - try: - return self.payload.effective_ethertype - except: - return ethernet.INVALID_TYPE - return self.type - - def _to_str(self): - s = ''.join(('[',str(EthAddr(self.src)),'>',str(EthAddr(self.dst)),' ', - ethernet.getNameForType(self.type),']')) - return s - - def hdr(self, payload): - dst = self.dst - src = self.src - if type(dst) is EthAddr: - dst = dst.toRaw() - if type(src) is EthAddr: - src = src.toRaw() - return struct.pack('!6s6sH', dst, src, self.type) diff --git a/pox/lib/packet/icmpv6.py b/pox/lib/packet/icmpv6.py @@ -1,1015 +0,0 @@ -# Copyright 2011-2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#====================================================================== -# -# ICMPv6 Header Format -# -# 0 1 2 3 -# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Type | Code | Checksum | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Data | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# -# -#====================================================================== - -""" -This file parses ICMPv6 as well as NDP - -See RFCs 4443 and 4861 in particular. -""" - -#TODO: Move NDP into its own file? -#TODO: Clean this up in general -#TODO: Write tests (at least pack/unpack) - -import struct -import random -# import new -from .packet_utils import * -from .packet_base import packet_base - -from pox.lib.addresses import IPAddr6,EthAddr -from pox.lib.util import hexdump, init_helper - -# Errors -TYPE_DEST_UNREACH = 1 -TYPE_PACKET_TOO_BIG = 2 -TYPE_TIME_EXCEED = 3 -TYPE_PARAM_PROB = 4 - -# Informational -TYPE_ECHO_REQUEST = 128 -TYPE_ECHO_REPLY = 129 -TYPE_MC_LISTENER_QUERY = 130 -TYPE_MC_LISTENER_REPORT = 131 -TYPE_MC_LISTENER_DONE = 132 -TYPE_ROUTER_SOLICITATION = 133 # NDP -TYPE_ROUTER_ADVERTISEMENT = 134 # NDP -TYPE_NEIGHBOR_SOLICITATION = 135 # NDP -TYPE_NEIGHBOR_ADVERTISEMENT = 136 # NDP -TYPE_REDIRECT = 137 # NDP -TYPE_ROUTER_RENUMBER = 138 -TYPE_MC_LISTENER_REPORT_V2 = 143 -TYPE_MRD_ADVERTISEMENT = 151 -TYPE_MRD_SOLICITATION = 152 -TYPE_MRD_TERMINATION = 153 - -CODE_UNREACH_NO_ROUTE = 0 -CODE_UNREACH_ADMIN_PROHIBIT = 1 -CODE_UNREACH_BEYOND_SRC_SCOPE = 2 -CODE_UNREACH_ADDR_UNREACHABLE = 3 -CODE_UNREACH_PORT_UNREACHABLE = 4 -CODE_UNREACH_SRC_POLICY_FAIL = 5 -CODE_UNREACH_DST_ROUTE_REJECT = 6 -CODE_UNREACH_SRC_ROUTE_ERROR = 7 - -CODE_TIME_HOP_EXCEEDED = 0 -CODE_TIME_FRAG_TIME_EXCEEDED = 1 - -CODE_PARAM_BAD_HEADER = 0 -CODE_PARAM_BAD_NEXT_HEADER = 1 -CODE_PARAM_BAD_OPTION = 2 - -#TODO: Use a class registry for this -_type_to_name = { - 1 : "TYPE_DEST_UNREACH", - 2 : "TYPE_PACKET_TOO_BIG", - 3 : "TYPE_TIME_EXCEED", - 4 : "TYPE_PARAM_PROB", - 128 : "TYPE_ECHO_REQUEST", - 129 : "TYPE_ECHO_REPLY", - 130 : "TYPE_MC_LISTENER_QUERY", - 131 : "TYPE_MC_LISTENER_REPORT", - 132 : "TYPE_MC_LISTENER_DONE", - 133 : "TYPE_ROUTER_SOLICITATION", - 134 : "TYPE_ROUTER_ADVERTISEMENT", - 135 : "TYPE_NEIGHBOR_SOLICITATION", - 136 : "TYPE_NEIGHBOR_ADVERTISEMENT", - 137 : "TYPE_REDIRECT", - 138 : "TYPE_ROUTER_RENUMBER", - 143 : "TYPE_MC_LISTENER_REPORT_V2", - 151 : "TYPE_MRD_ADVERTISEMENT", - 152 : "TYPE_MRD_SOLICITATION", - 153 : "TYPE_MRD_TERMINATION", -} - - -_nd_options = {} - - -def nd_option_def (cls): - """ - Neighbor Discovery option decorator - """ - _nd_options[cls.TYPE] = cls - return cls - - -def _parse_ndp_options (raw, prev, offset = 0, buf_len = None): - """ - Parse ICMPv6 options and return (new_offset,[option_list]) - """ - # This is pretty bad at the moment - _offset = offset - if buf_len is None: buf_len = len(raw) - remaining = buf_len - offset - r = [] - - while offset < buf_len - 2: - if (buf_len - offset) % 8 != 0: - raise RuntimeError("Bad option data length") - offset,o = NDOptionBase.unpack_new(raw, offset, buf_len, prev=prev) - r.append(o) - - return offset,r - - -class NDOptionBase (packet_base): - "Neighbor Discovery option base class" - - #LENGTH = <fixed padded payload length in bytes or None> - #TYPE = <type> - - def __init__ (self, *args, **kw): - self.prev = kw.pop('prev', None) - self._init(*args, **kw) - init_helper(self, kw) - - def __repr__ (self): - s = type(self).__name__ - if s.startswith("NDOption"): - s = s[8:] - elif s.startswith("NDOpt"): - s = s[5:] - ss = self._fields() - if ss: - s += ' ' - s += " ".join(["%s:%s" % (k,v) for k,v in ss.items()]) - return "[" + s + "]" - - @property - def type (self): - return self.prev.type - @property - def code (self): - return self.prev.code - - def _fields (self): - """ - Override to add fields to stringizing - """ - return None - - def _init (self, *args, **kw): - """ - Called during initialization - - Override me - """ - pass - - def __len__ (self): - """ - Payload length in bytes - - Override if your option type has flexible length - """ - assert self.LENGTH is not None - return self.LENGTH - - @staticmethod - def unpack_new (raw, offset = 0, buf_len = None, prev = None): - """ - Unpacks a new instance of the appropriate subclass from a buffer - - returns (new_offset, object) - """ - if buf_len is None: buf_len = len(raw) - - if buf_len < 2: - raise TruncatedException() - t,l = struct.unpack_from("BB", raw, offset) - if l == 0: - raise RuntimeError("Zero-length NDP option") - - offset += 2 - length_bytes = l * 8 - 2 - if (buf_len - offset) < length_bytes: - raise TruncatedException() - - c = _nd_options.get(t) #FIXME: Ugh, *class registry* - if c is None: - c = NDOptionGeneric - if c.LENGTH is not None and c.LENGTH != length_bytes: - raise RuntimeError("Bad length for NDP option") - - new_off,o = c._unpack_new(raw, offset, t, length_bytes, prev=prev) - - assert new_off == offset+length_bytes - return new_off,o - - def pack (self): - d = self._pack_body() - while (len(d)+2) % 8: d += "\x00" # sloppy - return struct.pack("BB", self.TYPE, int((len(d)+2)/8)) + d - - @classmethod - def _unpack_new (cls, raw, offset, t, length, prev): - """ - Unpacks the body portion of this option type into a new object - - Override me. - """ - raise RuntimeError("Not implemented") - #o = new.instance(cls) - #o._init() - #return offset+length,o - - def _pack_body (self): - """ - Returns the body of this option packed into bytes - - Override me - """ - raise RuntimeError("Not implemented") - #return b'' - - -class NDOptionGeneric (NDOptionBase): - LENGTH = None - TYPE = None - - def __repr__ (self): - return "<NDP Option Type %s>" % (self.TYPE,) - - def _init (self, *args, **kw): - self.raw = b'' - - def __len__ (self): - return len(self.raw) - - def _pack_body (self): - return self.raw - - @classmethod - def _unpack_new (cls, raw, offset, t, length, prev): - """ - Unpacks the body portion of this option type into a new object - - Override me. - """ - #o = new.instance(cls) # Weird; this doesn't work despite the fact - # that it should be a new style class. - o = cls() - o._init() - o.TYPE = t - o.prev = prev - #o.LENGTH = length - o.raw = raw[offset:offset+length] - return offset+length,o - - -class NDOptLinkLayerAddress (NDOptionBase): - """ - Superclass for this source/target LL address options - - Assumes L2 is Ethernet - """ - LENGTH = 6 - - def _init (self, *args, **kw): - a = kw.pop('address',None) - if a is None: - self.address = None - else: - self.address = EthAddr(a) - - def _fields (self): - return {'addr':self.address} - - @classmethod - def _unpack_new (cls, raw, offset, t, length, prev): - return offset+length,cls(address = EthAddr(raw[offset:offset+length]), - prev=prev) - - def _pack_body (self): - return self.address.raw - - -@nd_option_def -class NDOptSourceLinkLayerAddress (NDOptLinkLayerAddress): - TYPE = 1 - -@nd_option_def -class NDOptTargetLinkLayerAddress (NDOptLinkLayerAddress): - TYPE = 2 - -@nd_option_def -class NDOptPrefixInformation (NDOptionBase): - LENGTH = 1 + 1 + 4 + 4 + 4 + 4 * 4 - TYPE = 3 - - ON_LINK_FLAG = 0x80 - AUTONOMOUS_FLAG = 0x40 - - def _init (self, *args, **kw): - self.prefix_length = 0 - self.on_link = False - self.is_autonomous = False - self.valid_lifetime = 0 - self.preferred_lifetime = 0 - self.prefix = IPAddr6.UNDEFINED - - def _fields (self): - r = {} - if self.on_link: r['on_link'] = True - if self.is_autonomous: r['autonomous'] = True - r['valid'] = self.valid_lifetime - r['preferred'] = self.preferred_lifetime - r['prefix'] = "%s/%s" % (self.prefix, self.prefix_length) - return r - - @classmethod - def _unpack_new (cls, raw, offset, t, length, prev): - o = cls() - o.prefix_length,flags,o.valid_lifetime,o.preferred_lifetime = \ - struct.unpack_from('!BBII', raw, offset) - offset += 1 + 1 + 4 + 4 - offset += 4 # Reserved - o.prefix = IPAddr6(raw=raw[offset:offset+16]) - offset += 16 - o.on_link = (flags & cls.ON_LINK_FLAG) != 0 - o.is_autonomous = (flags & cls.AUTONOMOUS_FLAG) != 0 - o.prev = prev - - return offset,o - - @property - def flags (self): - f = 0 - if self.on_link: f |= self.ON_LINK_FLAG - if self.is_autonomous: f |= self.AUTONOMOUS_FLAG - return f - - def pack (self): - s = struct.pack("!BBII", self.prefix_length, self.flags, - self.valid_lifetime,self.preferred_lifetime) - s += '\x00' * 4 - s += self.prefix.raw - return s - - -@nd_option_def -class NDOptMTU (NDOptionBase): - LENGTH = 6 - TYPE = 5 - - def _init (self, *args, **kw): - self.mtu = 0 - - def _fields (self): - return {'mtu':self.mtu} - - @classmethod - def _unpack_new (cls, raw, offset, t, length, prev): - o = cls() - o.prev = prev - _,o.mtu = struct.unpack_from('!HI', raw, offset) - offset += 2 + 4 - return offset,o - - def pack (self): - return struct.pack("!HI", 0, self.mtu) - - - -#NOTE: icmp_base sort of ignores the usual packet_base API. Hopefully -# the way it does so doesn't break too much. The API it supports -# is closer to the way a newer version of the API would work. - -class icmp_base (packet_base): - "ICMPv6 base class" - - def __str__ (self): - s = "[ICMPv6/" + self.__class__.__name__ - ss = self._fields() - if ss: - s += ' ' - s += " ".join(["%s:%s" % (k,v) for k,v in ss.items()]) - return s + "]" - - def _fields (self): - """ - Return map of fields used for string formatting. - - Override me to customize stringizing. - """ - return {} - - def _init_ (self): - """ - Called during initialization - - Override me - - In most other hierarchies that follow a similar pattern, this method - would be named "_init", but that name is already used in the - packet_base hierarchy. - """ - pass - - @property - def type (self): - return self.prev.type - @property - def code (self): - return self.prev.code - - def __init__ (self, prev=None, **kw): - packet_base.__init__(self) - self.prev = prev - self.next = None - - self._init_() - - self._init(kw) - self.parsed = True - - @classmethod - def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None): - """ - Unpacks a new instance of this class from a buffer - - returns (new_offset, object) - """ - raise RuntimeError("Unimplemented on class %s" % (cls.__name__,)) - #.parsed = True - - def pack (self): - raise RuntimeError("Unimplemented on class %s" % (type(self).__name__,)) - - -class ICMPGeneric (icmp_base): - def _fields (self): - return {'bytes':len(self.raw)} - - def _init_ (self): - self.raw = b'' - - @classmethod - def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None): - o = cls() - o.raw = raw[offset:offset+buf_len] - o.prev = prev - o.parsed = True - return offset+buf_len,o - - def pack (self): - return self.raw - - -class NDRouterSolicitation (icmp_base): - "Router Solicitation" - def _init_ (self): - self.options = [] - - def _fields (self): - return {"num_opts":len(self.options)} - - @classmethod - def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None): - o = cls() - - _offset = offset - if buf_len is None: buf_len = len(raw) - - try: - offset += 4 # Skip reserved - offset,o.options = _parse_ndp_options(raw, prev, offset, buf_len) - - o.parsed = True - except TruncatedException: - pass - - o.prev = prev - return offset,o - - def pack (self): - o = b'\x00' * 4 # _PAD4 - for opt in self.options: - o += opt.pack() - return o - - -class NDRouterAdvertisement (icmp_base): - "Router Advertisement" - MANAGED_FLAG = 0x80 - OTHER_FLAG = 0x40 - - def __init__ (self, raw=None, prev=None, **kw): - icmp_base.__init__(self) - self.prev = prev - - self.hop_limit = 0 - self.is_managed = False - self.is_other = False - self.lifetime = 0 # seconds - self.reachable = 0 # milliseconds - self.retrans_timer = 0 # milliseconds - self.options = [] - - if raw is not None: self.parse(raw) - self._init(kw) - - def _fields (self): - f = ['hop_limit','lifetime','reachable', - 'retrans_timer'] - r = {} - #if len(self.options): r['num_opts'] = len(self.options) - if len(self.options): r["opts"] = self.options - if self.is_managed: r['managed'] = True - if self.is_other: r['other'] = True - for ff in f: - r[ff] = getattr(self, ff) - return r - - @classmethod - def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None): - o = cls() - - _offset = offset - if buf_len is None: buf_len = len(raw) - - try: - o.hop_limit,flags,o.lifetime,o.reachable,o.retrans_time = \ - struct.unpack_from("!BBHII", raw, offset) - offset += 1 + 1 + 2 + 4 + 4 - offset,o.options = _parse_ndp_options(raw, prev, offset, buf_len) - o.is_managed = flags & cls.MANAGED_FLAG - o.is_other = flags & cls.OTHER_FLAG - - o.parsed = True - except TruncatedException: - pass - - o.raw = raw[_offset:offset] - o.prev = prev - return offset,o - - @property - def flags (self): - f = 0 - if self.is_managed: f |= self.MANAGED_FLAG - if self.is_other: f |= self.OTHER_FLAG - return f - - def pack (self): - o = '\x00' * 4 # _PAD4 - - o += struct.pack("!BBHII", self.hop_limit, self.flags, self.lifetime, - self.reachable, self.retrans_time) - - for opt in self.options: - o += opt.pack() - return o - - -class NDNeighborSolicitation (icmp_base): - "Neighbor Solicitation" - def __init__ (self, raw=None, prev=None, **kw): - icmp_base.__init__(self) - self.prev = prev - - self.target = IPAddr6.UNDEFINED - self.options = [] - - if raw is not None: self.parse(raw) - self._init(kw) - - def _fields (self): - f = ['target'] - r = {'num_opts':len(self.options)} - r["opts"]=self.options - for ff in f: - r[ff] = getattr(self, ff) - return r - - @classmethod - def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None): - o = cls() - - _offset = offset - if buf_len is None: buf_len = len(raw) - - try: - offset += 4 # Skip reserved - o.target = IPAddr6(raw=raw[offset:offset+16]) - offset += 16 - offset,o.options = _parse_ndp_options(raw, prev, offset, buf_len) - - o.parsed = True - except TruncatedException: - pass - - o.raw = raw[_offset:offset] - o.prev = prev - return offset,o - - def pack (self): - o = b'\x00' * 4 # _PAD4 - o += self.target.raw - for opt in self.options: - o += opt.pack() - return o - - -class NDNeighborAdvertisement (icmp_base): - "Neighbor Advertisement" - - ROUTER_FLAG = 0x80 - SOLICITED_FLAG = 0x40 - OVERRIDE_FLAG = 0x20 - - def __init__ (self, raw=None, prev=None, **kw): - icmp_base.__init__(self) - self.prev = prev - - self.target = IPAddr6.UNDEFINED - self.options = [] - self.is_router = False - self.is_solicited = False - self.is_override = False - - if raw is not None: self.parse(raw) - self._init(kw) - - def _fields (self): - f = ['target'] - r = {} - #if len(self.options): r['num_opts'] = len(self.options) - if len(self.options): r["opts"] = self.options - if self.is_router: r['router'] = True - if self.is_solicited: r['solicited'] = True - if self.is_override: r['override'] = True - for ff in f: - r[ff] = getattr(self, ff) - return r - - @classmethod - def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None): - o = cls() - - _offset = offset - if buf_len is None: buf_len = len(raw) - - try: - flags = ord(raw[offset]) - o.is_router = (flags & cls.ROUTER_FLAG) != 0 - o.is_solicited = (flags & cls.SOLICITED_FLAG) != 0 - o.is_override = (flags & cls.OVERRIDE_FLAG) != 0 - - offset += 4 # Skip reserved - o.target = IPAddr6(raw=raw[offset:offset+16]) - offset += 16 - offset,o.options = _parse_ndp_options(raw, prev, offset, buf_len) - - o.parsed = True - except TruncatedException: - pass - - o.raw = raw[_offset:offset] - o.prev = prev - return offset,o - - def pack (self): - o = 0 - if self.is_router: o |= self.ROUTER_FLAG - if self.is_solicited: o |= self.SOLICITED_FLAG - if self.is_override : o |= self.OVERRIDE_FLAG - o = chr(o) - o += '\x00' * 3 # _PAD3 - o += self.target.raw - for opt in self.options: - o += opt.pack() - return o - - -class TimeExceeded (icmp_base): - "Time Exceeded Big Message" - - def __init__ (self, raw=None, prev=None, **kw): - icmp_base.__init__(self) - self.prev = prev - self.next = None - - if raw is not None: self.parse(raw) - self._init(kw) - - def _fields (self): - f = ['mtu'] - r = {} - for ff in f: - r[ff] = getattr(self, ff) - return r - - @classmethod - def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None): - o = cls() - - _offset = offset - if buf_len is None: buf_len = len(raw) - - try: - offset += 4 # Unused - - o.next = raw[offset:buf_len] - offset = buf_len - - o.parsed = True - except TruncatedException: - pass - - o.raw = raw[_offset:offset] - o.prev = prev - return offset,o - - def hdr (self, payload): - return struct.pack('!I', 0) # Unused - - -class PacketTooBig (icmp_base): - "Packet Too Big Message" - - def __init__ (self, raw=None, prev=None, **kw): - icmp_base.__init__(self) - self.prev = prev - self.next = None - - self.mtu = 0 - - if raw is not None: self.parse(raw) - self._init(kw) - - def _fields (self): - f = ['mtu'] - r = {} - for ff in f: - r[ff] = getattr(self, ff) - return r - - @classmethod - def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None): - o = cls() - - _offset = offset - if buf_len is None: buf_len = len(raw) - - try: - o.mtu = struct.unpack_from("!I", raw, offset) - offset += 4 - - o.next = raw[offset:buf_len] - offset = buf_len - - o.parsed = True - except TruncatedException: - pass - - o.raw = raw[_offset:offset] - o.prev = prev - return offset,o - - def hdr (self, payload): - return struct.pack('!I', self.mtu) - - -class unpack_new_adapter (object): - """ - Mixin to support unpack_new on classes with old-style construction/parse() - """ - @classmethod - def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None): - raw = raw[offset:] - if buf_len is not None: - raw = raw[:buf_len] - o = cls(raw=raw,prev=prev) - #o.parse(raw) - return offset+len(o.raw),o - -#---------------------------------------------------------------------- -# -# Echo Request/Reply -# 0 1 2 3 -# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Identifier | Sequence Number | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Data | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# -#---------------------------------------------------------------------- -class echo (packet_base, unpack_new_adapter): - "ICMP echo packet struct" - - MIN_LEN = 4 - - def __init__ (self, raw=None, prev=None, **kw): - packet_base.__init__(self) - - self.prev = prev - - self.id = random.randint(0, 65535) - self.seq = 0 - - if raw is not None: - self.parse(raw) - - self._init(kw) - - def __str__ (self): - return "[ICMP6 echo id:%i seq:%i]" % (self.id, self.seq) - - def parse (self, raw): - assert isinstance(raw, bytes) - self.raw = raw - - dlen = len(raw) - - if dlen < self.MIN_LEN: - self.msg('(echo parse) warning echo payload too short to ' - 'parse header: data len %u' % (dlen,)) - return - - (self.id, self.seq) = struct.unpack('!HH', raw[:self.MIN_LEN]) - - self.parsed = True - self.next = raw[echo.MIN_LEN:] - - def hdr (self, payload): - return struct.pack('!HH', self.id, self.seq) - - -#---------------------------------------------------------------------- -# -# Destination Unreachable -# 0 1 2 3 -# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Unused | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | IP Header + 8 bytes of original datagram's data | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# -#---------------------------------------------------------------------- -class unreach (packet_base, unpack_new_adapter): - "ICMP unreachable packet struct" - - MIN_LEN = 4 - - def __init__ (self, raw=None, prev=None, **kw): - - self.prev = prev - - self.unused = 0 - - if raw is not None: - self.parse(raw) - - self._init(kw) - - def __str__ (self): - s = ''.join(('[', 'm:', str(self.next_mtu), ']')) - - return _str_rest(s, self) - - def parse (self, raw): - assert isinstance(raw, bytes) - self.raw = raw - dlen = len(raw) - if dlen < self.MIN_LEN: - self.msg('(unreach parse) warning unreachable payload too ' - + 'short to parse header: data len %u' % (dlen,)) - return - - (self.unused,) = struct.unpack('!I', raw[:self.MIN_LEN]) - - self.parsed = True - - from . import ipv6 - # xxx We're assuming this is IPv6! - if dlen >= 8 + ipv6.MIN_LEN: - self.next = ipv6.ipv6(raw=raw[unreach.MIN_LEN:],prev=self) - else: - self.next = raw[unreach.MIN_LEN:] - - def hdr (self, payload): - return struct.pack('!I', self.unused) - - - - -class icmpv6 (packet_base): - "ICMP packet struct" - - MIN_LEN = 4 - - def __init__ (self, raw=None, prev=None, **kw): - super(icmpv6, self).__init__() - - self.prev = prev - - self.type = 0 - self.code = 0 - self.csum = 0 - - if raw is not None: - self.parse(raw) - - self._init(kw) - - def _calc_checksum (self): - ph = self.prev.srcip.raw + self.prev.dstip.raw - ph += struct.pack('!IHBB', len(self.raw), 0, 0, 58) # 58 == ICMPv6 - return checksum(ph + self.raw, skip_word=21) - - @property - def checksum_ok (self): - if not self.prev: return True - if getattr(self, 'raw', None) is None: return True - return self.csum == self._calc_checksum() - - def _to_str (self): - t = _type_to_name.get(self.type, str(self.type)) - cs = '' - if not self.checksum_ok: - cs = " BAD_CHECKSUM(%02x!=%02x)" % (self.csum, self._calc_checksum()) - s = '[ICMP+%s/%i%s]' % (t, self.code, cs) - return s - - def parse (self, raw, buf_len=None): - assert isinstance(raw, bytes) - if buf_len is None: - buf_len = len(raw) - self.raw = raw[:buf_len] - else: - self.raw = raw - dlen = len(self.raw) - if dlen < self.MIN_LEN: - self.msg('(icmp parse) warning ICMP packet data too short to ' - + 'parse header: data len %u' % (dlen,)) - return - - (self.type, self.code, self.csum) \ - = struct.unpack('!BBH', raw[:self.MIN_LEN]) - #self.parsed = True - - if not self.checksum_ok: - self.msg("Bad ICMPv6 checksum") - self.next = raw[self.MIN_LEN:] - return - else: - self.parsed = True - - #TODO: Use a class registry - cls = { - TYPE_ECHO_REQUEST:echo, - TYPE_ECHO_REPLY:echo, - TYPE_PACKET_TOO_BIG:PacketTooBig, - TYPE_TIME_EXCEED:TimeExceeded, - TYPE_DEST_UNREACH:unreach, - TYPE_ROUTER_SOLICITATION:NDRouterSolicitation, - TYPE_NEIGHBOR_SOLICITATION:NDNeighborSolicitation, - TYPE_ROUTER_ADVERTISEMENT:NDRouterAdvertisement, - TYPE_NEIGHBOR_ADVERTISEMENT:NDNeighborAdvertisement, - }.get(self.type) - if cls is None: - #cls = unknown - self.next = raw[self.MIN_LEN:] - return - - offset,self.next = cls.unpack_new(raw, offset=self.MIN_LEN, - buf_len=buf_len,prev=self) - - - def hdr (self, payload): - payload_len = len(payload) + 4 - ph = self.prev.srcip.raw + self.prev.dstip.raw - ph += struct.pack('!IHBBBBH', payload_len, 0, 0, 58, # 58 == ICMPv6 - self.type, self.code, 0) - self.csum = checksum(ph + payload, 0, 21) - return struct.pack('!BBH', self.type, self.code, self.csum) diff --git a/pox/lib/packet/igmp.py b/pox/lib/packet/igmp.py @@ -1,120 +0,0 @@ -# Copyright 2012 James McCauley -# Copyright 2008 (C) Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is derived from the packet library in NOX, which was -# developed by Nicira, Inc. - -#====================================================================== -# -# IGMP v1/v2 -# -# 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 -# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Ver * | Type | MRT/Unused ** | Checksum | -# +-------+-------+---------------+-------------------------------+ -# | Group Address | -# +-------------------------------+-------------------------------+ -# -# * In v2, there is no Version field, and Type is the whole 8 bits -# ** Max Response Time in v2 only -# -#====================================================================== - -#TODO: Support for IGMP v3 - -import struct -from .packet_utils import * -from .packet_base import packet_base -from pox.lib.addresses import * - -MEMBERSHIP_QUERY = 0x11 -MEMBERSHIP_REPORT = 0x12 -MEMBERSHIP_REPORT_V2 = 0x16 -LEAVE_GROUP_V2 = 0x17 - -# IGMP multicast address -IGMP_ADDRESS = IPAddr("224.0.0.22") - -# IGMP IP protocol -IGMP_PROTOCOL = 2 - -class igmp (packet_base): - """ - IGMP Message - """ - - MIN_LEN = 8 - IGMP_ADDRESS = IGMP_ADDRESS - IGMP_PROTOCOL = IGMP_PROTOCOL - - MEMBERSHIP_QUERY = MEMBERSHIP_QUERY - MEMBERSHIP_REPORT = MEMBERSHIP_REPORT - MEMBERSHIP_REPORT_V2 = MEMBERSHIP_REPORT_V2 - LEAVE_GROUP_V2 = LEAVE_GROUP_V2 - - def __init__(self, raw=None, prev=None, **kw): - packet_base.__init__(self) - - self.prev = prev - - self.ver_and_type = 0 - self.max_response_time = 0 - self.csum = 0 - self.address = None - self.extra = b'' - - if raw is not None: - self.parse(raw) - - self._init(kw) - - def hdr (self, payload): - s = struct.pack("!BBHi", self.ver_and_type, self.max_response_time, - 0, self.address.toSigned(networkOrder=False)) - s += self.extra - self.csum = checksum(s) - s = struct.pack("!BBHi", self.ver_and_type, self.max_response_time, - self.csum, self.address.toSigned(networkOrder=False)) - s += self.extra - return s - - def parse (self, raw): - assert isinstance(raw, bytes) - self.raw = raw - dlen = len(raw) - if dlen < self.MIN_LEN: - self.msg('packet data too short to parse') - return None - - self.ver_and_type, self.max_response_time, self.csum, ip = \ - struct.unpack("!BBHi", raw[:self.MIN_LEN]) - self.extra = raw[self.MIN_LEN:] - - self.address = IPAddr(ip, networkOrder = False) - - s = struct.pack("!BBHi", self.ver_and_type, self.max_response_time, - 0, self.address.toSigned(networkOrder=False)) - s += self.extra - csum = checksum(s) - if csum != self.csum: - self.err("IGMP hecksums don't match") - else: - self.parsed = True - - def __str__ (self): - s = "[IGMP " - s += "vt:%02x %s" % (self.ver_and_type, self.address) - return s + "]" diff --git a/pox/lib/packet/ipv4.py b/pox/lib/packet/ipv4.py @@ -1,182 +0,0 @@ -# Copyright 2011 James McCauley -# Copyright 2008 (C) Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file is derived from the packet library in NOX, which was -# developed by Nicira, Inc. - -#====================================================================== -# -# IPv4 Header Format -# -# 0 1 2 3 -# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# |Version| IHL |Type of Service| Total Length | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Identification |Flags| Fragment Offset | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Time to Live | Protocol | Header Checksum | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Source Address | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Destination Address | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Options | Padding | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# -#====================================================================== - -import struct -import time -from .packet_utils import * -from .tcp import * -from .udp import * -from .icmp import * -from .igmp import * - -from .packet_base import packet_base - -from pox.lib.addresses import IPAddr, IP_ANY, IP_BROADCAST - -class ipv4(packet_base): - "IP packet struct" - - MIN_LEN = 20 - - IPv4 = 4 - ICMP_PROTOCOL = 1 - TCP_PROTOCOL = 6 - UDP_PROTOCOL = 17 - IGMP_PROTOCOL = 2 - - DF_FLAG = 0x02 - MF_FLAG = 0x01 - - ip_id = int(time.time()) - - def __init__(self, raw=None, prev=None, **kw): - packet_base.__init__(self) - - self.prev = prev - - self.v = 4 - self.hl = ipv4.MIN_LEN / 4 - self.tos = 0 - self.iplen = ipv4.MIN_LEN - ipv4.ip_id = (ipv4.ip_id + 1) & 0xffff - self.id = ipv4.ip_id - self.flags = 0 - self.frag = 0 - self.ttl = 64 - self.protocol = 0 - self.csum = 0 - self.srcip = IP_ANY - self.dstip = IP_ANY - self.next = b'' - - if raw is not None: - self.parse(raw) - - self._init(kw) - - def __str__(self): - s = "[IP+%s %s>%s (cs:%02x v:%s hl:%s l:%s t:%s)]" % ( - ipproto_to_str(self.protocol), - self.srcip, self.dstip, - self.csum, - self.v, self.hl, self.iplen, self.ttl) - - return s - - def parse(self, raw): - assert isinstance(raw, bytes) - self.next = None # In case of unfinished parsing - self.raw = raw - dlen = len(raw) - if dlen < ipv4.MIN_LEN: - self.msg('warning IP packet data too short to parse header: data len %u' % (dlen,)) - return - - (vhl, self.tos, self.iplen, self.id, self.frag, self.ttl, - self.protocol, self.csum, self.srcip, self.dstip) \ - = struct.unpack('!BBHHHBBHII', raw[:ipv4.MIN_LEN]) - - self.v = vhl >> 4 - self.hl = vhl & 0x0f - - self.flags = self.frag >> 13 - self.frag = self.frag & 0x1fff - - self.dstip = IPAddr(self.dstip) - self.srcip = IPAddr(self.srcip) - - if self.v != ipv4.IPv4: - self.msg('(ip parse) warning: IP version %u not IPv4' % self.v) - return - if self.hl < 5: - self.msg('(ip parse) warning: IP header length shorter than MIN_LEN (IHL=%u => header len=%u)' \ - % (self.hl, 4 * self.hl)) - return - if self.iplen < ipv4.MIN_LEN: - self.msg('(ip parse) warning: Invalid IP len %u' % self.iplen) - return - if (self.hl * 4) > self.iplen: - self.msg('(ip parse) warning: IP header longer than IP length including payload (%u vs %u)' \ - % (self.hl, self.iplen)) - return - if (self.hl * 4) > dlen: - self.msg('(ip parse) warning: IP header is truncated') - return - - # At this point, we are reasonably certain that we have an IP - # packet - self.parsed = True - - length = self.iplen - if length > dlen: - length = dlen # Clamp to what we've got - if self.protocol == ipv4.UDP_PROTOCOL: - self.next = udp(raw=raw[self.hl*4:length], prev=self) - elif self.protocol == ipv4.TCP_PROTOCOL: - self.next = tcp(raw=raw[self.hl*4:length], prev=self) - elif self.protocol == ipv4.ICMP_PROTOCOL: - self.next = icmp(raw=raw[self.hl*4:length], prev=self) - elif self.protocol == ipv4.IGMP_PROTOCOL: - self.next = igmp(raw=raw[self.hl*4:length], prev=self) - elif dlen < self.iplen: - self.msg('(ip parse) warning IP packet data shorter than IP len: %u < %u' % (dlen, self.iplen)) - else: - self.next = raw[self.hl*4:length] - - if isinstance(self.next, packet_base) and not self.next.parsed: - self.next = raw[self.hl*4:length] - - def checksum(self): - data = struct.pack('!BBHHHBBHII', (self.v << 4) + self.hl, self.tos, - self.iplen, self.id, - (self.flags << 13) | self.frag, self.ttl, - self.protocol, 0, self.srcip.toUnsigned(), - self.dstip.toUnsigned()) - return checksum(data, 0) - - - def hdr(self, payload): - self.iplen = self.hl * 4 + len(payload) - self.csum = self.checksum() - return struct.pack('!BBHHHBBHII', (self.v << 4) + self.hl, self.tos, - self.iplen, self.id, - (self.flags << 13) | self.frag, self.ttl, - self.protocol, self.csum, self.srcip.toUnsigned(), - self.dstip.toUnsigned()) diff --git a/pox/lib/packet/ipv6.py b/pox/lib/packet/ipv6.py @@ -1,437 +0,0 @@ -# Copyright 2013,2013 James McCauley -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -#====================================================================== -# -# IPv6 Header Format -# -# 0 1 2 3 -# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# |Version| Traffic Class | Flow Label | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | Payload Length | Next Header | Hop Limit | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | | -# | Source Address | -# | | -# | | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# | | -# | Destination Address | -# | | -# | | -# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -# -#====================================================================== - -""" -IPv6 packet classes - -This is still rough. There are a number of things remaining to do -(additional extension header types, payload inference), and there -are probably places where the API isn't quite right yet. But it's -a start. -""" - -import struct -from .packet_utils import * -from .tcp import * -from .udp import * -from .icmpv6 import * - -from .packet_base import packet_base - -from pox.lib.addresses import IPAddr6 -from pox.lib.util import init_helper - - -_extension_headers = {} - -def extension_header_def (header_type): - """ - Extension Header decorator - """ - #TODO: Switch to using generic class registry - def f (cls): - _extension_headers[header_type] = cls - cls.TYPE = header_type - return cls - return f - - -class ExtensionHeader (object): - next_header_type = None - - -class NormalExtensionHeader (ExtensionHeader): - """ - A superclass for many ExtensionHeaders - - Many Extension Headers follow the same basic format, which is also suggested - for future Extension Headers in RFC 6564. - """ - - #TYPE = <type number> - - def __init__ (self, *args, **kw): - self.payload_length = 0 - self._init(*args, **kw) - - init_helper(self, kw) - - def __len__ (self): - """ - Returns the packed length - """ - l = self.payload_length + 2 - return int(((l + 7) / 8) - 1) - - @classmethod - def unpack_new (cls, raw, offset = 0, max_length = None): - """ - Unpacks a new instance of this class from a buffer - - returns (new_offset, object) - """ - if max_length and max_length < 2: - raise TruncatedException() - nh,l = struct.unpack_from("!BB", raw, offset) - max_length -= 2 - l = l * 8 + 6 - if max_length is not None and max_length < l: - raise TruncatedException() - offset += 2 - d = cls._unpack_body(raw, offset, nh, l) - offset += l - d['payload_length'] = l - d['next_header_type'] = nh - return offset, cls(**d) - - def pack (self): - o = struct.pack("!BB", self.next_header_type, len(self)) - return o + self._pack_body() - - def _init (self, *args, **kw): - """ - Called during initialization - - Override me - """ - pass - - def _pack_body (self): - """ - Returns the body of this Extension Header packed into bytes - - Override me - """ - return b'' - - @classmethod - def _unpack_body (cls, raw, offset, next_header_type, length): - """ - Unpacks the body portion of an Extension Header - - Override me. - """ - return {} - - -class FixedExtensionHeader (ExtensionHeader): - """ - A superclass for fixed length Extension Headers - """ - - #TYPE = <type number> - #LENGTH = <total length in bytes> - - def __init__ (self, *args, **kw): - self.next_header_type = None - self._init(*args, **kw) - - init_helper(self, kw) - - def __len__ (self): - """ - Returns the packed length - """ - return self.LENGTH - - @classmethod - def unpack_new (cls, raw, offset = 0, max_length = None): - """ - Unpacks a new instance of this class from a buffer - """ - if max_length is not None and (max_length - offset) < cls.LENGTH: - raise TruncatedException() - - nh = struct.unpack_from("!B", raw, offset)[0] - d = cls._unpack_body(raw, offset + 1, nh, cls.LENGTH - 1) - offset += cls.LENGTH - d['next_header_type'] = nh - return offset, cls(**d) - - def pack (self): - o = struct.pack("!B", self.next_header_type) + self._pack_body() - assert len(o) == self.LENGTH, "Bad packed length" - return o - - def _init (self, *args, **kw): - """ - Called during initialization - - Override me - """ - pass - - def _pack_body (self): - """ - Returns the body of this Extension Header packed into bytes - - Override me - """ - return b'' - - @classmethod - def _unpack_body (self, raw, offset, next_header_type, length): - """ - Unpacks the body portion of an Extension Header - - Override me. - """ - return {} - - -class DummyExtensionHeader (NormalExtensionHeader): - """ - Just saves the raw body data - """ - def _init (self, *args, **kw): - self.raw_body = b'' - def _pack_body (self): - return self.raw_body - @classmethod - def _unpack_body (self, raw, offset, next_header_type, length): - return {'raw_body':raw[offset:offset+length]} - - -class DummyFixedExtensionHeader (FixedExtensionHeader): - """ - Just saves the raw body data - """ - def _init (self, *args, **kw): - self.raw_body = '\x00' * (self.LENGTH - 1) - def _pack_body (self): - return self.raw_body - @classmethod - def _unpack_body (self, raw, offset, next_header_type, length): - return {'raw_body':raw[offset:offset+length]} - - -#TODO: Implement Extension Headers for real (they're pretty much just -# placeholders at present) -#TODO: Implement the IPSec options (Authentication and ESP) - -@extension_header_def(0) -class HopByHopOptions (DummyExtensionHeader): - pass - -@extension_header_def(43) -class Routing (DummyExtensionHeader): - pass - -@extension_header_def(44) -class Fragment (DummyFixedExtensionHeader): - LENGTH = 8 - pass - -@extension_header_def(60) -class DestinationOptions (DummyExtensionHeader): - pass - - -class ipv6 (packet_base): - """ - IPv6 packet class - """ - - MIN_LEN = 40 - - ICMP6_PROTOCOL = 58 - TCP_PROTOCOL = 6 - UDP_PROTOCOL = 17 - IGMP_PROTOCOL = 2 - NO_NEXT_HEADER = 59 - - def __init__ (self, raw=None, prev=None, **kw): - packet_base.__init__(self) - - self.prev = prev - - self.v = 6