Compare commits

..

10 Commits

8 changed files with 328 additions and 288 deletions

View File

@@ -19,7 +19,7 @@ type RunCmd struct {
DNSSEC bool `long:"dnssec" help:"Enable DNSSEC"` DNSSEC bool `long:"dnssec" help:"Enable DNSSEC"`
AuthoritativeDNSSEC bool `short:"a" long:"auth-dnssec" help:"Use authoritative DNSSEC validation instead of trusting resolver"` AuthoritativeDNSSEC bool `short:"a" long:"auth-dnssec" help:"Use authoritative DNSSEC validation instead of trusting resolver"`
KeepAlive bool `short:"k" long:"keep-alive" help:"Use persistent connections"` KeepAlive bool `short:"k" long:"keep-alive" help:"Use persistent connections"`
Interface string `long:"iface" default:"any" help:"Capture interface (e.g., eth0, any)"` Interface string `long:"iface" default:"veth1" help:"Capture interface (e.g., eth0, any)"`
Servers []string `short:"s" long:"server" help:"Upstream servers (udp://..., tls://..., https://..., doq://...)"` Servers []string `short:"s" long:"server" help:"Upstream servers (udp://..., tls://..., https://..., doq://...)"`
} }

View File

@@ -3,9 +3,7 @@ package capture
import ( import (
"context" "context"
"fmt" "fmt"
"net"
"os" "os"
"strings"
"sync" "sync"
"github.com/google/gopacket" "github.com/google/gopacket"
@@ -21,102 +19,33 @@ type PacketCapture struct {
err error err error
} }
func getLocalIPs() ([]string, error) { func NewPacketCapture(iface, outputPath string) (*PacketCapture, error) {
var localIPs []string
addrs, err := net.InterfaceAddrs()
if err != nil {
return nil, fmt.Errorf("failed to get network interfaces: %w", err)
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
// Skip loopback
if ip == nil || ip.IsLoopback() {
continue
}
localIPs = append(localIPs, ip.String())
}
if len(localIPs) == 0 {
return nil, fmt.Errorf("no non-loopback IPs found")
}
return localIPs, nil
}
func buildBPFFilter(protocol string, localIPs []string) string {
// Build filter for this machine's IPs
var hostFilters []string
for _, ip := range localIPs {
hostFilters = append(hostFilters, fmt.Sprintf("host %s", ip))
}
testMachineFilter := "(" + strings.Join(hostFilters, " or ") + ")"
// Protocol-specific ports
var portFilter string
switch strings.ToLower(protocol) {
case "udp":
portFilter = "(port 53)"
case "tls", "dot":
portFilter = "(port 53 or port 853)"
case "https", "doh":
portFilter = "(port 53 or port 443)"
case "doq":
portFilter = "(port 53 or port 853)"
case "doh3":
portFilter = "(port 53 or port 443)"
default:
portFilter = "(port 53 or port 443 or port 853)"
}
// Exclude private-to-private traffic (LAN-to-LAN, includes Docker ranges)
privateExclude := "not (src net (10.0.0.0/8 or 172.16.0.0/12 or 192.168.0.0/16) and dst net (10.0.0.0/8 or 172.16.0.0/12 or 192.168.0.0/16))"
// Combine: test machine AND protocol ports AND NOT (private to private)
return testMachineFilter + " and " + portFilter + " and " + privateExclude
}
func NewPacketCapture(iface, outputPath, protocol string) (*PacketCapture, error) {
handle, err := pcap.OpenLive(iface, 65535, true, pcap.BlockForever) handle, err := pcap.OpenLive(iface, 65535, true, pcap.BlockForever)
if err != nil { if err != nil {
return nil, fmt.Errorf("pcap open (try running as root): %w", err) return nil, fmt.Errorf("pcap open (try running as root): %w", err)
} }
// Get local IPs dynamically // Check if file exists
localIPs, err := getLocalIPs() fileExists := false
if err != nil { if _, err := os.Stat(outputPath); err == nil {
handle.Close() fileExists = true
return nil, fmt.Errorf("failed to get local IPs: %w", err)
} }
// Build and apply BPF filter file, err := os.OpenFile(outputPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
bpfFilter := buildBPFFilter(protocol, localIPs)
if err := handle.SetBPFFilter(bpfFilter); err != nil {
handle.Close()
return nil, fmt.Errorf("failed to set BPF filter '%s': %w", bpfFilter, err)
}
file, err := os.Create(outputPath)
if err != nil { if err != nil {
handle.Close() handle.Close()
return nil, fmt.Errorf("create pcap file: %w", err) return nil, fmt.Errorf("create/open pcap file: %w", err)
} }
writer := pcapgo.NewWriter(file) writer := pcapgo.NewWriter(file)
if err := writer.WriteFileHeader(65535, handle.LinkType()); err != nil {
handle.Close() // Only write header if file is new
file.Close() if !fileExists {
return nil, fmt.Errorf("pcap header: %w", err) if err := writer.WriteFileHeader(65535, handle.LinkType()); err != nil {
handle.Close()
file.Close()
return nil, fmt.Errorf("pcap header: %w", err)
}
} }
return &PacketCapture{ return &PacketCapture{
@@ -133,6 +62,8 @@ func (pc *PacketCapture) Start(ctx context.Context) error {
go func() { go func() {
for { for {
select { select {
case <-ctx.Done():
return
case pkt, ok := <-pktCh: case pkt, ok := <-pktCh:
if !ok { if !ok {
return return
@@ -145,8 +76,6 @@ func (pc *PacketCapture) Start(ctx context.Context) error {
} }
pc.mu.Unlock() pc.mu.Unlock()
} }
case <-ctx.Done():
return
} }
} }
}() }()

View File

@@ -60,7 +60,7 @@ func (r *MeasurementRunner) Run() error {
} }
for _, upstream := range r.config.Servers { for _, upstream := range r.config.Servers {
if err := r.runMeasurement(upstream, domains, qType); err != nil { if err := r.runPerUpstream(upstream, domains, qType); err != nil {
fmt.Fprintf(os.Stderr, "error on server %s: %v\n", upstream, err) fmt.Fprintf(os.Stderr, "error on server %s: %v\n", upstream, err)
} }
} }
@@ -68,7 +68,16 @@ func (r *MeasurementRunner) Run() error {
return nil return nil
} }
func (r *MeasurementRunner) runMeasurement(upstream string, domains []string, qType uint16) error { func (r *MeasurementRunner) setupDNSClient(upstream string) (client.DNSClient, error) {
opts := client.Options{
DNSSEC: r.config.DNSSEC,
AuthoritativeDNSSEC: r.config.AuthoritativeDNSSEC,
KeepAlive: r.config.KeepAlive,
}
return client.New(upstream, opts)
}
func (r *MeasurementRunner) runPerUpstream(upstream string, domains []string, qType uint16) error {
// Setup DNS client // Setup DNS client
dnsClient, err := r.setupDNSClient(upstream) dnsClient, err := r.setupDNSClient(upstream)
if err != nil { if err != nil {
@@ -93,11 +102,8 @@ func (r *MeasurementRunner) runMeasurement(upstream string, domains []string, qT
relPath, _ := filepath.Rel(r.config.OutputDir, csvPath) relPath, _ := filepath.Rel(r.config.OutputDir, csvPath)
fmt.Printf(">>> Measuring %s (dnssec=%v, auth=%v%s) → %s\n", upstream, r.config.DNSSEC, r.config.AuthoritativeDNSSEC, keepAliveStr, relPath) fmt.Printf(">>> Measuring %s (dnssec=%v, auth=%v%s) → %s\n", upstream, r.config.DNSSEC, r.config.AuthoritativeDNSSEC, keepAliveStr, relPath)
// Setup packet capture
proto := DetectProtocol(upstream)
// Setup packet capture with protocol-aware filtering // Setup packet capture with protocol-aware filtering
packetCapture, err := capture.NewPacketCapture(r.config.Interface, pcapPath, proto) packetCapture, err := capture.NewPacketCapture(r.config.Interface, pcapPath)
if err != nil { if err != nil {
return err return err
} }
@@ -115,15 +121,6 @@ func (r *MeasurementRunner) runMeasurement(upstream string, domains []string, qT
return r.runQueries(dnsClient, upstream, domains, qType, writer, packetCapture) return r.runQueries(dnsClient, upstream, domains, qType, writer, packetCapture)
} }
func (r *MeasurementRunner) setupDNSClient(upstream string) (client.DNSClient, error) {
opts := client.Options{
DNSSEC: r.config.DNSSEC,
AuthoritativeDNSSEC: r.config.AuthoritativeDNSSEC,
KeepAlive: r.config.KeepAlive,
}
return client.New(upstream, opts)
}
func (r *MeasurementRunner) runQueries(dnsClient client.DNSClient, upstream string, func (r *MeasurementRunner) runQueries(dnsClient client.DNSClient, upstream string,
domains []string, qType uint16, writer *results.MetricsWriter, domains []string, qType uint16, writer *results.MetricsWriter,
packetCapture *capture.PacketCapture) error { packetCapture *capture.PacketCapture) error {

View File

@@ -31,27 +31,35 @@ type MetricsWriter struct {
} }
func NewMetricsWriter(path string) (*MetricsWriter, error) { func NewMetricsWriter(path string) (*MetricsWriter, error) {
file, err := os.Create(path) // Check if file exists
fileExists := false
if _, err := os.Stat(path); err == nil {
fileExists = true
}
// Open in append mode
file, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil { if err != nil {
return nil, fmt.Errorf("create csv output: %w", err) return nil, fmt.Errorf("create/open csv output: %w", err)
} }
writer := csv.NewWriter(file) writer := csv.NewWriter(file)
// Write CSV header // Only write header if file is new
header := []string{ if !fileExists {
"domain", "query_type", "protocol", "dnssec", "auth_dnssec", "keep_alive", header := []string{
"dns_server", "timestamp", "duration_ns", "duration_ms", "domain", "query_type", "protocol", "dnssec", "auth_dnssec", "keep_alive",
"request_size_bytes", "response_size_bytes", "response_code", "error", "dns_server", "timestamp", "duration_ns", "duration_ms",
} "request_size_bytes", "response_size_bytes", "response_code", "error",
}
if err := writer.Write(header); err != nil { if err := writer.Write(header); err != nil {
file.Close() file.Close()
return nil, fmt.Errorf("write csv header: %w", err) return nil, fmt.Errorf("write csv header: %w", err)
}
writer.Flush()
} }
writer.Flush()
return &MetricsWriter{ return &MetricsWriter{
writer: writer, writer: writer,
file: file, file: file,

View File

@@ -5,25 +5,17 @@ import (
"net/url" "net/url"
"path/filepath" "path/filepath"
"strings" "strings"
"time"
) )
func GenerateOutputPaths(outputDir, upstream string, dnssec, authDNSSEC, keepAlive bool) (csvPath, pcapPath string) { func GenerateOutputPaths(outputDir, upstream string, dnssec, authDNSSEC, keepAlive bool) (csvPath, pcapPath string) {
proto := DetectProtocol(upstream) proto := DetectProtocol(upstream)
cleanServer := cleanServerName(upstream) cleanServer := cleanServerName(upstream)
// Create date-based subdirectory subDir := filepath.Join(outputDir, cleanServer)
date := time.Now().Format("2006-01-02")
timestamp := time.Now().Format("150405")
// Organize hierarchically: resolver/date/filename
subDir := filepath.Join(outputDir, cleanServer, date)
// Create simple filename
base := proto base := proto
// Add flags if enabled
var flags []string var flags []string
if dnssec { if dnssec {
if authDNSSEC { if authDNSSEC {
flags = append(flags, "auth") flags = append(flags, "auth")
@@ -39,11 +31,8 @@ func GenerateOutputPaths(outputDir, upstream string, dnssec, authDNSSEC, keepAli
base = fmt.Sprintf("%s-%s", base, strings.Join(flags, "-")) base = fmt.Sprintf("%s-%s", base, strings.Join(flags, "-"))
} }
// Add timestamp return filepath.Join(subDir, base+".csv"),
filename := fmt.Sprintf("%s-%s", base, timestamp) filepath.Join(subDir, base+".pcap")
return filepath.Join(subDir, filename+".csv"),
filepath.Join(subDir, filename+".pcap")
} }
func cleanServerName(server string) string { func cleanServerName(server string) string {

80
run.sh Normal file → Executable file
View File

@@ -1,8 +1,72 @@
#!/bin/bash #!/bin/bash
TOOL_PATH="$1"/"qol" # Exit on error
DOMAINS_FILE="$1"/"domains.txt" set -e
OUTPUT_DIR="$1"/"results"
# Default values
TOOL_PATH="./qol"
DOMAINS_FILE="./domains.txt"
OUTPUT_DIR="./results"
INTERFACE="veth1"
TIMEOUT="5s"
SLEEP_TIME="1"
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
-t|--tool-path)
TOOL_PATH="$2"
shift 2
;;
-d|--domains-file)
DOMAINS_FILE="$2"
shift 2
;;
-o|--output-dir)
OUTPUT_DIR="$2"
shift 2
;;
-I|--interface)
INTERFACE="$2"
shift 2
;;
-T|--timeout)
TIMEOUT="$2"
shift 2
;;
-s|--sleep)
SLEEP_TIME="$2"
shift 2
;;
--help)
echo "Usage: $0 [OPTIONS]"
echo ""
echo "Options:"
echo " -t, --tool-path PATH Path to qol tool (default: ./qol)"
echo " -d, --domains-file PATH Path to domains file (default: ./domains.txt)"
echo " -o, --output-dir PATH Output directory (default: ./results)"
echo " -I, --interface NAME Network interface (default: veth1)"
echo " -T, --timeout DURATION Timeout duration (default: 5s)"
echo " -s, --sleep SECONDS Sleep between runs (default: 1)"
echo " --help Show this help"
exit 0
;;
*)
echo "Unknown option: $1"
echo "Use --help for usage information"
exit 1
;;
esac
done
echo "Configuration:"
echo " Tool path: $TOOL_PATH"
echo " Domains file: $DOMAINS_FILE"
echo " Output dir: $OUTPUT_DIR"
echo " Interface: $INTERFACE"
echo " Timeout: $TIMEOUT"
echo " Sleep time: ${SLEEP_TIME}s"
echo ""
# Connection-based protocols that benefit from keep-alive (TCP-based) # Connection-based protocols that benefit from keep-alive (TCP-based)
CONN_SERVERS=( CONN_SERVERS=(
@@ -35,8 +99,8 @@ CONNLESS_SERVERS=(
# Common args # Common args
COMMON_ARGS=( COMMON_ARGS=(
"$DOMAINS_FILE" "$DOMAINS_FILE"
--interface eth0 --interface "$INTERFACE"
--timeout 5s --timeout "$TIMEOUT"
) )
# Combinations for TCP-based connection protocols # Combinations for TCP-based connection protocols
@@ -78,7 +142,7 @@ for FLAGS in "${CONN_COMBINATIONS[@]}"; do
"${CONN_SERVERS[@]}" \ "${CONN_SERVERS[@]}" \
"${FLAGS_ARRAY[@]}" "${FLAGS_ARRAY[@]}"
sleep 1 sleep "$SLEEP_TIME"
done done
echo "" echo ""
@@ -94,7 +158,7 @@ for FLAGS in "${NO_KEEPALIVE_COMBINATIONS[@]}"; do
"${QUIC_SERVERS[@]}" \ "${QUIC_SERVERS[@]}" \
"${FLAGS_ARRAY[@]}" "${FLAGS_ARRAY[@]}"
sleep 1 sleep "$SLEEP_TIME"
done done
echo "" echo ""
@@ -110,7 +174,7 @@ for FLAGS in "${NO_KEEPALIVE_COMBINATIONS[@]}"; do
"${CONNLESS_SERVERS[@]}" \ "${CONNLESS_SERVERS[@]}" \
"${FLAGS_ARRAY[@]}" "${FLAGS_ARRAY[@]}"
sleep 1 sleep "$SLEEP_TIME"
done done
echo "" echo ""

View File

@@ -1,116 +1,68 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
""" """
Add network metrics from PCAP files to DNS CSV files. Add network metrics from PCAP files to DNS CSV files.
Adds: pcap_network_bytes_in, pcap_network_bytes_out, pcap_overhead_bytes Adds: raw_bytes_total, raw_packet_count, overhead_bytes, efficiency_percent
""" """
import csv import csv
import os import os
import argparse import argparse
import re
from pathlib import Path from pathlib import Path
from datetime import datetime, timezone from datetime import datetime, timezone
import dpkt from scapy.all import rdpcap
import socket
# Test machine IPs def parse_timestamp(ts_str):
TEST_IPS = { """Parse timestamp with timezone and nanoseconds (RFC3339Nano)."""
'10.0.0.50', match = re.match(
'2001:818:e73e:ba00:5506:dfd4:ed8b:96e', r'(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})\.(\d+)([\+\-]\d{2}:\d{2})',
'fe80::fe98:c62e:4463:9a2d' ts_str
} )
if not match:
raise ValueError(f"Invalid timestamp format: {ts_str}")
def inet_to_str(inet): base, nanos, tz = match.groups()
"""Convert inet bytes to IP string""" micros = nanos[:6].ljust(6, '0')
try: iso_str = f"{base}.{micros}{tz}"
return socket.inet_ntop(socket.AF_INET, inet) dt = datetime.fromisoformat(iso_str)
except ValueError: full_nanos = int(nanos.ljust(9, '0'))
try:
return socket.inet_ntop(socket.AF_INET6, inet)
except ValueError:
return None
return dt, full_nanos
def read_pcap(pcap_path): def read_pcap(pcap_path):
"""Read PCAP and return list of (timestamp_ns, size, src_ip, dst_ip)""" """Read PCAP and return list of (timestamp_epoch, size)."""
packets = [] packets = []
try:
with open(pcap_path, 'rb') as f: pkts = rdpcap(str(pcap_path))
try: for pkt in pkts:
pcap = dpkt.pcap.Reader(f) timestamp = float(pkt.time)
except: length = len(pkt)
f.seek(0) packets.append((timestamp, length))
pcap = dpkt.pcapng.Reader(f) except Exception as e:
print(f" ❌ Error reading PCAP: {e}")
for ts, buf in pcap: return []
try:
# Convert PCAP timestamp (float seconds) to nanoseconds
timestamp_ns = int(ts * 1_000_000_000)
size = len(buf)
eth = dpkt.ethernet.Ethernet(buf)
src_ip = dst_ip = None
if isinstance(eth.data, dpkt.ip.IP):
src_ip = inet_to_str(eth.data.src)
dst_ip = inet_to_str(eth.data.dst)
elif isinstance(eth.data, dpkt.ip6.IP6):
src_ip = inet_to_str(eth.data.src)
dst_ip = inet_to_str(eth.data.dst)
if src_ip and dst_ip:
packets.append((timestamp_ns, size, src_ip, dst_ip))
except (dpkt.dpkt.NeedData, dpkt.dpkt.UnpackError):
continue
return packets return packets
def find_packets_in_window(packets, start_ts, start_nanos, duration_ns):
"""Find packets within exact time window."""
start_epoch = start_ts.timestamp()
start_epoch += (start_nanos % 1_000_000) / 1_000_000_000
end_epoch = start_epoch + (duration_ns / 1_000_000_000)
def find_packets_in_window(packets, start_ns, duration_ns): total_bytes = 0
"""Find packets within exact time window (nanosecond precision)""" packet_count = 0
end_ns = start_ns + duration_ns
matching = [] for pkt_ts, pkt_len in packets:
for timestamp_ns, size, src_ip, dst_ip in packets: if start_epoch <= pkt_ts <= end_epoch:
if start_ns <= timestamp_ns <= end_ns: total_bytes += pkt_len
matching.append((size, src_ip, dst_ip)) packet_count += 1
return matching
def calculate_metrics(packets):
"""Calculate network metrics from packets"""
bytes_in = 0
bytes_out = 0
for size, src_ip, dst_ip in packets:
if dst_ip in TEST_IPS:
bytes_in += size
elif src_ip in TEST_IPS:
bytes_out += size
return {
'pcap_network_bytes_in': bytes_in,
'pcap_network_bytes_out': bytes_out,
'pcap_overhead_bytes': bytes_in + bytes_out
}
def parse_timestamp_to_ns(ts_str):
"""Parse ISO timestamp to nanoseconds since epoch"""
try:
dt = datetime.fromisoformat(ts_str.replace('Z', '+00:00'))
if dt.tzinfo is not None:
dt = dt.astimezone(timezone.utc)
# Convert to nanoseconds since epoch
return int(dt.timestamp() * 1_000_000_000)
except ValueError:
return None
return total_bytes, packet_count
def enhance_csv(csv_path, pcap_path, output_path, debug=False): def enhance_csv(csv_path, pcap_path, output_path, debug=False):
"""Add PCAP metrics to CSV""" """Add PCAP metrics to CSV."""
if not os.path.exists(pcap_path): if not os.path.exists(pcap_path):
print(f"⚠️ PCAP not found: {pcap_path}") print(f"⚠️ PCAP not found: {pcap_path}")
return False return False
@@ -118,76 +70,92 @@ def enhance_csv(csv_path, pcap_path, output_path, debug=False):
print(f"Processing: {os.path.basename(csv_path)}") print(f"Processing: {os.path.basename(csv_path)}")
# Read PCAP # Read PCAP
try: packets = read_pcap(pcap_path)
packets = read_pcap(pcap_path) print(f" Loaded {len(packets)} packets")
print(f" Loaded {len(packets)} packets")
if packets and debug:
first_pcap_ns = packets[0][0]
last_pcap_ns = packets[-1][0]
print(f" First PCAP packet: {first_pcap_ns} ns")
print(f" Last PCAP packet: {last_pcap_ns} ns")
print(f" PCAP duration: {(last_pcap_ns - first_pcap_ns) / 1e9:.3f}s")
except Exception as e:
print(f" ❌ Error reading PCAP: {e}")
return False
if not packets: if not packets:
print(" ❌ No packets found") print(" ❌ No packets found")
return False return False
if packets and debug:
first_pcap = packets[0][0]
last_pcap = packets[-1][0]
print(f" First PCAP packet: {first_pcap:.6f}")
print(f" Last PCAP packet: {last_pcap:.6f}")
print(f" PCAP duration: {(last_pcap - first_pcap):.3f}s")
# Read CSV # Read CSV
with open(csv_path, 'r', newline='') as f: with open(csv_path, 'r', newline='') as f:
reader = csv.DictReader(f) reader = csv.DictReader(f)
fieldnames = list(reader.fieldnames) + [ fieldnames = list(reader.fieldnames) + [
'pcap_network_bytes_in', 'raw_bytes_total',
'pcap_network_bytes_out', 'raw_packet_count',
'pcap_overhead_bytes' 'overhead_bytes',
'efficiency_percent'
] ]
rows = list(reader) rows = list(reader)
if rows and debug: if rows and debug:
first_csv_ns = parse_timestamp_to_ns(rows[0]['timestamp']) try:
last_csv_ns = parse_timestamp_to_ns(rows[-1]['timestamp']) first_ts, _ = parse_timestamp(rows[0]['timestamp'])
if first_csv_ns and last_csv_ns: last_ts, _ = parse_timestamp(rows[-1]['timestamp'])
print(f" First CSV query: {first_csv_ns} ns") print(f" First CSV query: {first_ts.timestamp():.6f}")
print(f" Last CSV query: {last_csv_ns} ns") print(f" Last CSV query: {last_ts.timestamp():.6f}")
print(f" CSV duration: {(last_csv_ns - first_csv_ns) / 1e9:.3f}s") offset = packets[0][0] - first_ts.timestamp()
print(f" Time offset (PCAP - CSV): {offset:.3f}s")
# Check alignment except:
offset_ns = packets[0][0] - first_csv_ns pass
print(f" Time offset (PCAP - CSV): {offset_ns / 1e9:.3f}s")
# Enhance rows # Enhance rows
enhanced = [] enhanced = []
matched = 0 matched = 0
for i, row in enumerate(rows): for i, row in enumerate(rows):
ts_ns = parse_timestamp_to_ns(row['timestamp']) try:
if not ts_ns: timestamp, nanos = parse_timestamp(row['timestamp'])
continue duration_ns = int(row['duration_ns'])
duration_ns = int(row.get('duration_ns', 0)) raw_bytes, packet_count = find_packets_in_window(
packets, timestamp, nanos, duration_ns
)
matching_packets = find_packets_in_window(packets, ts_ns, duration_ns) useful_bytes = (
int(row['request_size_bytes']) +
int(row['response_size_bytes'])
)
overhead = raw_bytes - useful_bytes
efficiency = (
(useful_bytes / raw_bytes * 100)
if raw_bytes > 0 else 0
)
row['raw_bytes_total'] = raw_bytes
row['raw_packet_count'] = packet_count
row['overhead_bytes'] = overhead
row['efficiency_percent'] = f"{efficiency:.2f}"
if raw_bytes > 0:
matched += 1
# Debug first few queries
if debug and i < 3:
print(f" Query {i}: {row['domain']}")
print(f" Duration: {duration_ns / 1e6:.3f}ms")
print(f" Matched packets: {packet_count}")
print(f" Raw bytes: {raw_bytes}")
print(f" Useful bytes: {useful_bytes}")
print(f" Efficiency: {efficiency:.2f}%")
except (ValueError, KeyError) as e:
if debug:
print(f" Error processing row {i}: {e}")
row['raw_bytes_total'] = 0
row['raw_packet_count'] = 0
row['overhead_bytes'] = 0
row['efficiency_percent'] = "0.00"
metrics = calculate_metrics(matching_packets)
row.update(metrics)
enhanced.append(row) enhanced.append(row)
if metrics['pcap_overhead_bytes'] > 0:
matched += 1
# Debug first few queries
if debug and i < 3:
print(f" Query {i}: {row['domain']}")
print(f" Start: {ts_ns} ns")
print(f" Duration: {duration_ns} ns ({duration_ns / 1e6:.3f}ms)")
print(f" End: {ts_ns + duration_ns} ns")
print(f" Matched packets: {len(matching_packets)}")
print(f" Bytes: {metrics['pcap_overhead_bytes']}")
print(f" Matched: {matched}/{len(rows)} queries") print(f" Matched: {matched}/{len(rows)} queries")
if matched == 0: if matched == 0:
@@ -204,16 +172,15 @@ def enhance_csv(csv_path, pcap_path, output_path, debug=False):
print(f" ✓ Saved: {output_path}") print(f" ✓ Saved: {output_path}")
return True return True
def main(): def main():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Add PCAP network metrics to DNS CSV files' description='Add PCAP network metrics to DNS CSV files'
) )
parser.add_argument('input_dir', help='Input directory (e.g., results_merged)') parser.add_argument('input_dir', help='Input directory (e.g., results)')
parser.add_argument( parser.add_argument(
'--output', '--output',
default='./results_enhanced', default='./results_enriched',
help='Output directory (default: ./results_enhanced)' help='Output directory (default: ./results_enriched)'
) )
parser.add_argument( parser.add_argument(
'--dry-run', '--dry-run',
@@ -279,6 +246,5 @@ def main():
return 0 if failed == 0 else 1 return 0 if failed == 0 else 1
if __name__ == "__main__": if __name__ == "__main__":
exit(main()) exit(main())

87
setup-netns.sh Executable file
View File

@@ -0,0 +1,87 @@
#!/bin/bash
# Exit on error
set -e
# Default values
NETNS_NAME="myapp"
VETH_HOST="veth0"
VETH_NS="veth1"
HOST_IP="192.168.100.1"
NS_IP="192.168.100.2"
SUBNET="192.168.100.0/24"
PHYSICAL_IF="eth0"
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
-n|--namespace)
NETNS_NAME="$2"
shift 2
;;
-p|--physical-if)
PHYSICAL_IF="$2"
shift 2
;;
--help)
echo "Usage: $0 [OPTIONS]"
echo ""
echo "Options:"
echo " -n, --namespace NAME Namespace name (default: myapp)"
echo " -p, --physical-if NAME Physical interface (default: eth0)"
echo " --help Show this help"
exit 0
;;
*)
echo "Unknown option: $1"
echo "Use --help for usage information"
exit 1
;;
esac
done
echo "Configuration:"
echo " Namespace: $NETNS_NAME"
echo " Physical interface: $PHYSICAL_IF"
echo ""
echo "Creating network namespace: $NETNS_NAME"
sudo ip netns add $NETNS_NAME
echo "Creating veth pair: $VETH_HOST <-> $VETH_NS"
sudo ip link add $VETH_HOST type veth peer name $VETH_NS
echo "Moving $VETH_NS into namespace"
sudo ip link set $VETH_NS netns $NETNS_NAME
echo "Configuring host side ($VETH_HOST)"
sudo ip addr add $HOST_IP/24 dev $VETH_HOST
sudo ip link set $VETH_HOST up
echo "Configuring namespace side ($VETH_NS)"
sudo ip netns exec $NETNS_NAME ip addr add $NS_IP/24 dev $VETH_NS
sudo ip netns exec $NETNS_NAME ip link set $VETH_NS up
sudo ip netns exec $NETNS_NAME ip link set lo up
sudo ip netns exec $NETNS_NAME ip route add default via $HOST_IP
echo "Enabling IP forwarding"
sudo sysctl -w net.ipv4.ip_forward=1
echo "Disabling IPv6"
sudo ip netns exec $NETNS_NAME sysctl -w net.ipv6.conf.all.disable_ipv6=1
echo "Setting up NAT"
sudo iptables -t nat -A POSTROUTING -s $SUBNET -o $PHYSICAL_IF -j MASQUERADE
echo "Setting up forwarding rules"
sudo iptables -I FORWARD -i $VETH_HOST -o $PHYSICAL_IF -j ACCEPT
sudo iptables -I FORWARD -i $PHYSICAL_IF -o $VETH_HOST -j ACCEPT
echo ""
echo "Done! Network namespace '$NETNS_NAME' is ready."
echo ""
echo "To run your app in the namespace:"
echo " sudo ip netns exec $NETNS_NAME ./your_app"
echo ""
echo "To capture traffic:"
echo " sudo tshark -i $VETH_HOST -w app.pcap"