SDN開發實戰(2)-透明HTTP代理[Openflow+floodlight]

此教程繼續爲上一個SDN開發實戰(1)-透明HTTP代理[Openflow+floodlight]做相關配置和實驗結果說明

3.2 Mininet配置和代理服務器腳本

3.2.1 代理服務器腳本

代理主機prox中需要運行一段程序來轉發接收到的package,因此編寫proxy.c文件如下,prox.c經過編譯後運行在prox主機中,實現對收到的package在同樣的端口轉發出去的功能

#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <pcap/pcap.h>
#include <net/ethernet.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <arpa/inet.h>
#include <sys/types.h>
#include <sys/socket.h>

#define FILTER "icmp or tcp"
pcap_t *handle;
void got_packet(u_char *args, const struct pcap_pkthdr*, const u_char *pkt);

int main(int argc, char *argv[])
{
    char *dev ;                        /* The device to sniff on */
    char errbuf[PCAP_ERRBUF_SIZE];     /* Error string */
    struct bpf_program fp;             /* The compiled filter */
    bpf_u_int32 mask;                  /* Our netmask */
    bpf_u_int32 net;                   /* Our IP */

    if (argc != 2) {
        printf("usage: proxy <dev>\n");
        return EXIT_FAILURE;
    } else {
        dev = argv[1];
    }

    /* Find the properties for the device */
    if (pcap_lookupnet(dev, &net, &mask, errbuf) == -1) {
        printf("warning: %s: could not get network: %s\n", dev, errbuf);
        net  = 0;
        mask = 0;
    }

    /* Open the session in promiscuous mode */
    handle = pcap_open_live(dev, BUFSIZ, 1, 1000, errbuf);
    if (handle == NULL) {
        printf("error: %s: could not open: %s\n", dev, errbuf);
        return EXIT_FAILURE;
    }

    if (pcap_compile(handle, &fp, FILTER, 0, mask) == -1) {
        printf("error: could not compile filter '%s': %s\n", FILTER, pcap_geterr(handle));
        return EXIT_FAILURE;
    }

    if (pcap_setfilter(handle, &fp) == -1) {
        printf("error: could not set filter '%s': %s\n", FILTER, pcap_geterr(handle));
        return EXIT_FAILURE;
    }

    /* 獲取package,返回給got_packet處理 */
    int r = pcap_loop(handle, -1, got_packet, NULL);
    printf("pcal_loop() quit with: %d\n", r);

    pcap_close(handle);
    return EXIT_SUCCESS;
}

void got_packet(
    u_char                     *args,
    const struct pcap_pkthdr   *header,
    const u_char               *packet)
{
    const struct ether_header  *ethernet;
    const struct ip            *ip;
    char src_ip_str[16];
    char dst_ip_str[16];

    ethernet = (struct ether_header*) packet;
    if (ethernet->ether_type != ntohs(ETHERTYPE_IP)) {
        printf("ignoring non-ip packet (0x%02X) of length %d\n",
            ntohs(ethernet->ether_type), header->len);
        fflush(stdout);
        return;
    }

    ip = (struct ip*) (ethernet+1);
    strcpy(src_ip_str, inet_ntoa(ip->ip_src));
    strcpy(dst_ip_str, inet_ntoa(ip->ip_dst));

    if (ip->ip_p == IPPROTO_ICMP)
        printf("%15s --> %15s  [ICMP]\n", src_ip_str, dst_ip_str);
    else if (ip->ip_p == IPPROTO_TCP)
        printf("%15s --> %15s  [TCP]\n", src_ip_str, dst_ip_str);
    else
        printf("%15s --> %15s  [%d]\n", src_ip_str, dst_ip_str, ip->ip_p);
    fflush(stdout);

    /* 將收到的package發送回去 */
    if (pcap_inject(handle, packet, header->len) == -1) {
        printf("error: unable to proxy packet: %s\n", pcap_geterr(handle));
        fflush(stdout);
    }
}

3.2.2 Mininet配置

啓動Mininet網絡和運行prox.c需要在終端中輸入大量代碼,因此我們直接把這些代碼寫入Python腳本來自動運行,下面的run.py能夠爲我們做下面幾個工作:

  1. 嘗試用套接字連接Floodlight控制器 (localhost,port=6653), 等待直到連接成功
  2. 啓動Mininet並創建制定拓撲結構的虛擬網絡,包括Open vswitch 和主機
  3. 編譯proxy.c並在host主機中運行編譯出的proxy
#!/usr/bin/env python2.7

from __future__   import print_function
from argparse     import ArgumentParser
from subprocess   import Popen, STDOUT, PIPE
from socket       import socket, AF_INET, SOCK_STREAM
from time         import sleep
from sys          import stdout
from threading    import Thread;
from mininet.net  import Mininet
from mininet.topo import Topo
from mininet.node import RemoteController
from mininet.node import OVSKernelSwitch

MAGIC_MAC = "00:11:00:11:00:11"
MAGIC_IP  = "10.111.111.111"

# Mininet拓撲結構
class MyTopo(Topo):

    def __init__(self):
        """Create custom topo."""

        Topo.__init__(self)

        switch1 = self.addSwitch('s1')
        switch2 = self.addSwitch('s2')
        switch3 = self.addSwitch('s3')

        h1 = self.addHost('h1')
        h2 = self.addHost('h2')
        h3 = self.addHost('h3')
        prox = self.addHost('prox')

        link1 = self.addLink(h1, switch1)
        link2 = self.addLink(h2, switch1)
        link4 = self.addLink(h3, switch3)
        link0 = self.addLink(prox, switch2)     

        link2 = self.addLink(switch1, switch2)
        link3 = self.addLink(switch2, switch3)

# 運行prox腳本
class Prox(Thread):

    def __init__(self, node, log=None):

        Thread.__init__(self)
        self.node = node
        self.log  = log

    def run(self):
        if self.log != None:
            self.log = open(self.log, 'w')
        self.proc = self.node.popen(
            ["./proxy", "prox-eth0"],
            stdout=self.log, stderr=self.log
        )
        print("proxy is running")
        self.proc.wait()

# 嘗試連接控制器
def wait_on_controller():

    s = socket(AF_INET, SOCK_STREAM)
    addr = ("localhost", 6653)

    try:
        s.connect(addr)
        s.close()
        return
    except:
        pass

    print("Waiting on controller", end=""); stdout.flush()

    while True:
        sleep(0.1)
        try:
            s.connect(addr)
            s.close()
            print("")
            return
        except:
            print(".", end=""); stdout.flush()
            continue

# 編譯proxy.c文件
def build_prox(psrc):
    gcc_proc = Popen(stdout=PIPE, stderr=STDOUT,
            args=("gcc", psrc, "-o", "proxy", "-l", "pcap")
    )

    r = gcc_proc.wait()
    if r != 0:
        out, _ = gcc_proc.communicate()
        print(out)
        exit(1)

if __name__ == "__main__":

    build_prox("proxy.c")
    wait_on_controller()

    mn = Mininet(
        topo=MyTopo(),
        autoSetMacs=True,
        autoStaticArp=True,
        controller=RemoteController('c0',port=6653),
        switch=OVSKernelSwitch
    )

    mn.start()

    sleep(0.5)

    # 每個host向調試主機發送ping包,紀錄主機的Mac地址
    for src in mn.hosts:
        # setARP能繞過ARP協議
        src.setARP(ip=MAGIC_IP, mac=MAGIC_MAC)
        src.cmd("ping", "-c1", "-W1", MAGIC_IP)

    px = Prox(mn.getNodeByName("prox"), "proxy.log")
    px.start()
    mn.interact()

4. 運行和實驗結果

4.1. 執行代碼

1.在eclipse中運行上個教程中修改的floodlight項目,floodlight控制器會在0.0.0.0:6653監聽Openflow的switch,關於如何運行floodlight控制器, 請點擊這裏,如果運行正常,控制檯會打印以下內容:

2017-01-07 10:58:21.707 INFO  [n.f.c.m.FloodlightModuleLoader] Loading modules from src/main/resources/floodlightdefault.properties
2017-01-07 10:58:21.883 WARN  [n.f.r.RestApiServer] HTTPS disabled; HTTPS will not be used to connect to the REST API.
2017-01-07 10:58:21.883 WARN  [n.f.r.RestApiServer] HTTP enabled; Allowing unsecure access to REST API on port 8080.
2017-01-07 10:58:21.883 WARN  [n.f.r.RestApiServer] CORS access control allow ALL origins: true
2017-01-07 10:58:22.57 WARN  [n.f.c.i.OFSwitchManager] SSL disabled. Using unsecure connections between Floodlight and switches.
2017-01-07 10:58:22.57 INFO  [n.f.c.i.OFSwitchManager] Clear switch flow tables on initial handshake as master: TRUE
2017-01-07 10:58:22.57 INFO  [n.f.c.i.OFSwitchManager] Clear switch flow tables on each transition to master: TRUE
2017-01-07 10:58:22.63 INFO  [n.f.c.i.OFSwitchManager] Setting 0x1 as the default max tables to receive table-miss flow
2017-01-07 10:58:22.124 INFO  [n.f.c.i.OFSwitchManager] OpenFlow version OF_15 will be advertised to switches. Supported fallback versions [OF_10, OF_11, OF_12, OF_13, OF_14, OF_15]
2017-01-07 10:58:22.125 INFO  [n.f.c.i.OFSwitchManager] Listening for OpenFlow switches on [0.0.0.0]:6653
...

2.執行run.py腳本 ,通過以下操作來檢查網絡是否運行正常:

  • nodes
  • net
  • h1 ping h3
  • h2 ping h3
  • h1 ping h2
  • h1 ping prox

# 4.2. 實驗結果

從下面的輸出可以得出實驗結果與我們預期是相符合的,h1和h2是直接路由模式,而他們與h3的連接是代理模式,可以從proxy.log看出(由proxy產生),但是網絡延遲很長>1000ms,這種延遲應該是proxy轉發不及時或者其他問題;而且,對於h1和h2來說,它們完全不知道代理主機prox的存在,因爲它們與prox之間爲丟包模式,因此無法ping通prox代理主機

peng@peng-virtual-machine:~/Downloads/TransHttpProxy$ sudo ./run.py
proxy is running
mininet> nodes
available nodes are: 
c0 h1 h2 h3 prox s1 s2 s3
mininet> net
h1 h1-eth0:s1-eth1
h2 h2-eth0:s1-eth2
h3 h3-eth0:s3-eth1
prox prox-eth0:s2-eth1
s1 lo:  s1-eth1:h1-eth0 s1-eth2:h2-eth0 s1-eth3:s2-eth2
s2 lo:  s2-eth1:prox-eth0 s2-eth2:s1-eth3 s2-eth3:s3-eth2
s3 lo:  s3-eth1:h3-eth0 s3-eth2:s2-eth3
c0
mininet> h1 ping h3
PING 10.0.0.3 (10.0.0.3) 56(84) bytes of data.
64 bytes from 10.0.0.3: icmp_seq=2 ttl=64 time=1634 ms
64 bytes from 10.0.0.3: icmp_seq=3 ttl=64 time=1629 ms
64 bytes from 10.0.0.3: icmp_seq=4 ttl=64 time=1629 ms
64 bytes from 10.0.0.3: icmp_seq=5 ttl=64 time=1629 ms
^C
--- 10.0.0.3 ping statistics ---
7 packets transmitted, 4 received, 42% packet loss, time 6016ms
rtt min/avg/max/mdev = 1629.310/1630.597/1634.350/2.166 ms, pipe 2
mininet> h2 ping h3
PING 10.0.0.3 (10.0.0.3) 56(84) bytes of data.
64 bytes from 10.0.0.3: icmp_seq=2 ttl=64 time=1996 ms
64 bytes from 10.0.0.3: icmp_seq=3 ttl=64 time=1996 ms
64 bytes from 10.0.0.3: icmp_seq=4 ttl=64 time=1993 ms
64 bytes from 10.0.0.3: icmp_seq=5 ttl=64 time=1993 ms
64 bytes from 10.0.0.3: icmp_seq=6 ttl=64 time=1993 ms
^C
--- 10.0.0.3 ping statistics ---
8 packets transmitted, 5 received, 37% packet loss, time 7014ms
rtt min/avg/max/mdev = 1993.094/1994.576/1996.525/2.127 ms, pipe 2
mininet> h1 ping h2
PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
64 bytes from 10.0.0.2: icmp_seq=2 ttl=64 time=0.195 ms
64 bytes from 10.0.0.2: icmp_seq=3 ttl=64 time=0.074 ms
64 bytes from 10.0.0.2: icmp_seq=4 ttl=64 time=0.052 ms
64 bytes from 10.0.0.2: icmp_seq=5 ttl=64 time=0.081 ms
^C
--- 10.0.0.2 ping statistics ---
5 packets transmitted, 4 received, 20% packet loss, time 4007ms
rtt min/avg/max/mdev = 0.052/0.100/0.195/0.056 ms
mininet> h1 ping prox
PING 10.0.0.4 (10.0.0.4) 56(84) bytes of data.
^C
--- 10.0.0.4 ping statistics ---
9 packets transmitted, 0 received, 100% packet loss, time 8057ms

mininet> exit

在代理服務器prox中運行的proxy代碼會產生一個proxy.log文件能夠查看他轉發的package,裏面記錄了轉發的目的Mac地址和源Mac地址以及package的類型:

10.0.0.1 -->        10.0.0.3  [ICMP]
10.0.0.3 -->        10.0.0.1  [ICMP]
10.0.0.1 -->        10.0.0.3  [ICMP]
10.0.0.3 -->        10.0.0.1  [ICMP]
10.0.0.1 -->        10.0.0.3  [ICMP]
10.0.0.3 -->        10.0.0.1  [ICMP]
10.0.0.1 -->        10.0.0.3  [ICMP]
10.0.0.3 -->        10.0.0.1  [ICMP]
10.0.0.1 -->        10.0.0.3  [ICMP]
10.0.0.3 -->        10.0.0.1  [ICMP]
...

最後我們可以查看switch中的flows table,在Mininet還運行的過程中,通過ovs-ofctl dump-flows 指令查看 :

peng@peng-virtual-machine:~$ sudo ovs-ofctl dump-flows s1
NXST_FLOW reply (xid=0x4):
 cookie=0x0, duration=9.825s, table=0, n_packets=8, n_bytes=784, idle_timeout=20, idle_age=1, priority=1,ip,in_port=1,dl_src=00:00:00:00:00:01,dl_dst=00:00:00:00:00:03 actions=output:3
 cookie=0x0, duration=9.825s, table=0, n_packets=7, n_bytes=686, idle_timeout=20, idle_age=0, priority=1,ip,in_port=3,dl_src=00:00:00:00:00:03,dl_dst=00:00:00:00:00:01 actions=output:1
 cookie=0x0, duration=209.230s, table=0, n_packets=57, n_bytes=6431, idle_age=4, priority=0 actions=CONTROLLER:65535
peng@peng-virtual-machine:~$ sudo ovs-ofctl dump-flows s2
NXST_FLOW reply (xid=0x4):
 cookie=0x0, duration=11.232s, table=0, n_packets=8, n_bytes=784, idle_timeout=20, idle_age=3, priority=1,ip,in_port=2,dl_src=00:00:00:00:00:01,dl_dst=00:00:00:00:00:03 actions=output:1
 cookie=0x0, duration=11.228s, table=0, n_packets=8, n_bytes=784, idle_timeout=20, idle_age=2, priority=1,ip,in_port=1,dl_src=00:00:00:00:00:01,dl_dst=00:00:00:00:00:03 actions=output:3
 cookie=0x0, duration=11.228s, table=0, n_packets=8, n_bytes=784, idle_timeout=20, idle_age=2, priority=1,ip,in_port=3,dl_src=00:00:00:00:00:03,dl_dst=00:00:00:00:00:01 actions=output:1
 cookie=0x0, duration=11.228s, table=0, n_packets=8, n_bytes=784, idle_timeout=20, idle_age=1, priority=1,ip,in_port=1,dl_src=00:00:00:00:00:03,dl_dst=00:00:00:00:00:01 actions=output:2
 cookie=0x0, duration=210.640s, table=0, n_packets=87, n_bytes=10534, idle_age=6, priority=0 actions=CONTROLLER:65535
peng@peng-virtual-machine:~$ sudo ovs-ofctl dump-flows s3
NXST_FLOW reply (xid=0x4):
 cookie=0x0, duration=12.783s, table=0, n_packets=8, n_bytes=784, idle_timeout=20, idle_age=3, priority=1,ip,in_port=2,dl_src=00:00:00:00:00:01,dl_dst=00:00:00:00:00:03 actions=output:1
 cookie=0x0, duration=12.783s, table=0, n_packets=8, n_bytes=784, idle_timeout=20, idle_age=3, priority=1,ip,in_port=1,dl_src=00:00:00:00:00:03,dl_dst=00:00:00:00:00:01 actions=output:2
 cookie=0x0, duration=212.208s, table=0, n_packets=48, n_bytes=5649, idle_age=7, priority=0 actions=CONTROLLER:65535

以交換機s1的第一條rule爲例子解釋:

cookie=0x0, duration=9.825s, table=0, n_packets=8, n_bytes=784, idle_timeout=20, idle_age=1, priority=1,ip,in_port=1,dl_src=00:00:00:00:00:01,dl_dst=00:00:00:00:00:03 actions=output:3

一個數據包如果能夠從s1的1端口(s1-eth1)進入,並有着源Mac地址00:00:00:00:00:01和目的Mac地址00:00:00:00:00:03,就會從s1的3端口轉發出去。注意,此包的priority=1,若有其他的匹配的rule的優先級高於這個,就會執行優先級高的rule。

5. 總結

所有的代碼位於TransHttpProxDemo,總結一下幾個要注意的部分:

  • v1.3的Floodlight在創建FlowMod的時候一定要設置priority>0,因爲switch向Floodlight發送未匹配的package的時候priority=0,若有其他priority=0的rule存在時,這些rule即使能夠匹配package,也都不會被觸發,而是作爲未匹配的package交付給控制器,設置priority>0能夠避免switch執行默認的交付而順利執行其他的rule
  • v1.3Floodlight控制器的運行在6653端口,而mininet默認的爲6633端口,因此需要指定mininet與控制器連接的端口爲6653
  • run.py文件執行的內容其實都是終端的命令行
  • 爲了可以在一開始就能知道所有主機的mac地址以及連接的switch,用了一個小trick:讓所有的主機ping調試主機Magic的IP,然後在控制器中保存起來。這樣能夠幫助Floodlight尋找兩個主機之間的路徑
  • 實驗結果沒有考慮連接的時延和帶寬,代理模式和直連模式互相能夠ping通就可以,應該有其他改善的地方
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章