feat(dnssec): add auth and trust dnssec

This commit is contained in:
2025-09-28 13:11:58 +01:00
parent 4a549cfea7
commit a966c1e98d
10 changed files with 345 additions and 184 deletions

1
.gitignore vendored
View File

@@ -16,3 +16,4 @@
**/tls-key-log.txt
/results
/results.bak

View File

@@ -17,7 +17,7 @@ def map_server_to_resolver(server):
elif 'adguard' in server_lower:
return 'AdGuard'
else:
return server
return server # Fallback to original server name
def extract_from_new_format(filename):
"""Parse new filename format: protocol[-flags]-timestamp.csv"""
@@ -25,65 +25,97 @@ def extract_from_new_format(filename):
parts = base.split('-')
if len(parts) < 2:
return None, None, None
return None, None, None, None
protocol = parts[0]
timestamp = parts[-1]
# Flags are everything between protocol and timestamp
flags_str = '-'.join(parts[1:-1])
dnssec_status = 'on' if 'dnssec' in flags_str else 'off'
# Determine DNSSEC status
if 'auth' in flags_str:
dnssec_status = 'auth' # Authoritative DNSSEC
elif 'trust' in flags_str:
dnssec_status = 'trust' # Trust-based DNSSEC
else:
dnssec_status = 'off'
keepalive_status = 'on' if 'persist' in flags_str else 'off'
return protocol, dnssec_status, keepalive_status
return protocol, dnssec_status, keepalive_status, flags_str
def extract_server_info(file_path, dns_server_field):
"""Extract info using directory structure and filename"""
def extract_server_info_from_csv(row):
"""Extract DNSSEC info from CSV row data"""
dnssec = row.get('dnssec', 'false').lower() == 'true'
auth_dnssec = row.get('auth_dnssec', 'false').lower() == 'true'
keepalive = row.get('keep_alive', 'false').lower() == 'true'
if dnssec:
if auth_dnssec:
dnssec_status = 'auth'
else:
dnssec_status = 'trust'
else:
dnssec_status = 'off'
keepalive_status = 'on' if keepalive else 'off'
return dnssec_status, keepalive_status
def extract_server_info(file_path, row):
"""Extract info using directory structure, filename, and CSV data"""
path = Path(file_path)
# Expect structure like: results/resolver/date/filename.csv
parts = path.parts
if len(parts) >= 3 and parts[-2].isdigit() and len(parts[-2]) == 10: # date folder like 2024-03-01
server = parts[-3] # resolver folder (e.g., cloudflare)
filename = parts[-1]
protocol, dnssec_status, keepalive_status = extract_from_new_format(filename)
if protocol:
return protocol, server, dnssec_status, keepalive_status
# Fallback to old parsing if structure doesn't match
filename = path.name
old_parts = filename.replace('.csv', '').split('_')
if len(old_parts) >= 6:
protocol = old_parts[0]
# First try to get DNSSEC info from CSV row (most accurate)
try:
dnssec_idx = old_parts.index('dnssec')
keepalive_idx = old_parts.index('keepalive')
csv_dnssec_status, csv_keepalive_status = extract_server_info_from_csv(row)
protocol = row.get('protocol', '').lower()
server_parts = old_parts[1:dnssec_idx]
server = '_'.join(server_parts)
# Get server from directory structure
parts = path.parts
if len(parts) >= 4:
potential_date = parts[-2]
# Check if it's a date like YYYY-MM-DD
if len(potential_date) == 10 and potential_date[4] == '-' and potential_date[7] == '-' and potential_date.replace('-', '').isdigit():
server = parts[-3] # resolver folder (e.g., cloudflare)
return protocol, server, csv_dnssec_status, csv_keepalive_status
dnssec_status = old_parts[dnssec_idx + 1] if dnssec_idx + 1 < len(old_parts) else 'off'
keepalive_status = old_parts[keepalive_idx + 1] if keepalive_idx + 1 < len(old_parts) else 'off'
# Fallback to DNS server field
server = row.get('dns_server', '')
return protocol, server, csv_dnssec_status, csv_keepalive_status
return protocol, server, dnssec_status, keepalive_status
except ValueError:
except (KeyError, ValueError):
pass
# Even older format fallback
if len(old_parts) >= 4:
protocol = old_parts[0]
dnssec_status = 'on' if 'dnssec_on' in filename else 'off'
keepalive_status = 'on' if 'keepalive_on' in filename else 'off'
server = '_'.join(old_parts[1:-4]) if len(old_parts) > 4 else old_parts[1]
# Fallback to filename parsing
filename = path.name
protocol, dnssec_status, keepalive_status, flags = extract_from_new_format(filename)
if protocol:
# Get server from directory structure
parts = path.parts
if len(parts) >= 4:
potential_date = parts[-2]
if len(potential_date) == 10 and potential_date[4] == '-' and potential_date[7] == '-' and potential_date.replace('-', '').isdigit():
server = parts[-3]
return protocol, server, dnssec_status, keepalive_status
# Fallback to DNS server field
server = row.get('dns_server', '')
return protocol, server, dnssec_status, keepalive_status
return None, None, None, None
def get_dnssec_display_name(dnssec_status):
"""Convert DNSSEC status to display name"""
if dnssec_status == 'auth':
return 'DNSSEC (Authoritative)'
elif dnssec_status == 'trust':
return 'DNSSEC (Trust-based)'
else:
return 'No DNSSEC'
def analyze_dns_data(root_directory, output_file):
"""Analyze DNS data and generate metrics"""
@@ -103,8 +135,7 @@ def analyze_dns_data(root_directory, output_file):
for row_num, row in enumerate(reader, 2): # Start at 2 since header is row 1
try:
protocol, server, dnssec_status, keepalive_status = extract_server_info(
file_path, row.get('dns_server', ''))
protocol, server, dnssec_status, keepalive_status = extract_server_info(file_path, row)
if protocol and server:
resolver = map_server_to_resolver(server)
@@ -123,13 +154,15 @@ def analyze_dns_data(root_directory, output_file):
print(f"Error processing file {file_path}: {e}")
continue
# Calculate statistics and group by resolver, dnssec, and keepalive
# Calculate statistics grouped by resolver first, then by configuration
resolver_results = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for (resolver, protocol, dnssec, keepalive), durations in measurements.items():
if durations:
stats = {
'protocol': protocol.upper(),
'dnssec': dnssec,
'keepalive': keepalive,
'total_queries': len(durations),
'avg_latency_ms': round(statistics.mean(durations), 3),
'median_latency_ms': round(statistics.median(durations), 3),
@@ -139,23 +172,22 @@ def analyze_dns_data(root_directory, output_file):
'p95_latency_ms': round(statistics.quantiles(durations, n=20)[18], 3) if len(durations) >= 20 else round(max(durations), 3),
'p99_latency_ms': round(statistics.quantiles(durations, n=100)[98], 3) if len(durations) >= 100 else round(max(durations), 3)
}
resolver_results[dnssec][keepalive][resolver].append(stats)
# Group by resolver -> dnssec -> keepalive -> protocol
resolver_results[resolver][dnssec][keepalive].append(stats)
# Sort each resolver's results by average latency
for dnssec in resolver_results:
for keepalive in resolver_results[dnssec]:
for resolver in resolver_results[dnssec][keepalive]:
resolver_results[dnssec][keepalive][resolver].sort(key=lambda x: x['avg_latency_ms'])
# Sort each configuration's results by average latency
for resolver in resolver_results:
for dnssec in resolver_results[resolver]:
for keepalive in resolver_results[resolver][dnssec]:
resolver_results[resolver][dnssec][keepalive].sort(key=lambda x: x['avg_latency_ms'])
# Write to CSV with all data
all_results = []
for dnssec in resolver_results:
for keepalive in resolver_results[dnssec]:
for resolver, results in resolver_results[dnssec][keepalive].items():
for result in results:
for resolver in resolver_results:
for dnssec in resolver_results[resolver]:
for keepalive in resolver_results[resolver][dnssec]:
for result in resolver_results[resolver][dnssec][keepalive]:
result['resolver'] = resolver
result['dnssec'] = dnssec
result['keepalive'] = keepalive
all_results.append(result)
with open(output_file, 'w', newline='') as csvfile:
@@ -172,32 +204,83 @@ def analyze_dns_data(root_directory, output_file):
print(f"\nAnalysis complete! Full results written to {output_file}")
print(f"Total measurements: {sum(len(durations) for durations in measurements.values())}")
def print_resolver_table(resolver, results, dnssec_status, keepalive_status):
"""Print a formatted table for a resolver"""
ka_indicator = "PERSISTENT" if keepalive_status == 'on' else "NEW CONNECTION"
print(f"\n{resolver} DNS Resolver (DNSSEC {dnssec_status.upper()}, {ka_indicator})")
print("=" * 100)
print(f"{'Protocol':<12} {'Queries':<8} {'Avg(ms)':<10} {'Median(ms)':<12} {'Min(ms)':<10} {'Max(ms)':<10} {'P95(ms)':<10}")
print("-" * 100)
def print_configuration_table(resolver, dnssec_status, keepalive_status, results):
"""Print a formatted table for a specific configuration"""
ka_indicator = "PERSISTENT" if keepalive_status == 'on' else "NEW CONN"
dnssec_display = get_dnssec_display_name(dnssec_status)
print(f"\n {dnssec_display} - {ka_indicator}")
print(" " + "-" * 90)
print(f" {'Protocol':<12} {'Queries':<8} {'Avg(ms)':<10} {'Median(ms)':<12} {'Min(ms)':<10} {'Max(ms)':<10} {'P95(ms)':<10}")
print(" " + "-" * 90)
for result in results:
print(f"{result['protocol']:<12} {result['total_queries']:<8} "
print(f" {result['protocol']:<12} {result['total_queries']:<8} "
f"{result['avg_latency_ms']:<10} {result['median_latency_ms']:<12} "
f"{result['min_latency_ms']:<10} {result['max_latency_ms']:<10} "
f"{result['p95_latency_ms']:<10}")
# Print tables organized by DNSSEC and KeepAlive status
for dnssec_status in ['off', 'on']:
if dnssec_status in resolver_results:
print(f"\n{'#' * 60}")
print(f"# DNS RESOLVERS - DNSSEC {dnssec_status.upper()}")
print(f"{'#' * 60}")
# Print results grouped by resolver first
print(f"\n{'=' * 100}")
print("DNS RESOLVER PERFORMANCE COMPARISON")
print(f"{'=' * 100}")
for keepalive_status in ['off', 'on']:
if keepalive_status in resolver_results[dnssec_status]:
for resolver in sorted(resolver_results[dnssec_status][keepalive_status].keys()):
results = resolver_results[dnssec_status][keepalive_status][resolver]
print_resolver_table(resolver, results, dnssec_status, keepalive_status)
for resolver in sorted(resolver_results.keys()):
print(f"\n{resolver} DNS Resolver")
print("=" * 100)
# Order configurations logically
config_order = [
('off', 'off'), # No DNSSEC, New connections
('off', 'on'), # No DNSSEC, Persistent
('trust', 'off'), # Trust DNSSEC, New connections
('trust', 'on'), # Trust DNSSEC, Persistent
('auth', 'off'), # Auth DNSSEC, New connections
('auth', 'on'), # Auth DNSSEC, Persistent
]
for dnssec_status, keepalive_status in config_order:
if dnssec_status in resolver_results[resolver] and keepalive_status in resolver_results[resolver][dnssec_status]:
results = resolver_results[resolver][dnssec_status][keepalive_status]
if results: # Only print if there are results
print_configuration_table(resolver, dnssec_status, keepalive_status, results)
# Summary comparison across resolvers
print(f"\n{'=' * 100}")
print("CROSS-RESOLVER PROTOCOL COMPARISON")
print(f"{'=' * 100}")
# Group by protocol and configuration for cross-resolver comparison
protocol_comparison = defaultdict(lambda: defaultdict(list))
for resolver in resolver_results:
for dnssec in resolver_results[resolver]:
for keepalive in resolver_results[resolver][dnssec]:
for result in resolver_results[resolver][dnssec][keepalive]:
config_key = f"{get_dnssec_display_name(dnssec)} - {'PERSISTENT' if keepalive == 'on' else 'NEW CONN'}"
protocol_comparison[result['protocol']][config_key].append({
'resolver': resolver,
'avg_latency_ms': result['avg_latency_ms'],
'total_queries': result['total_queries']
})
for protocol in sorted(protocol_comparison.keys()):
print(f"\n{protocol} Protocol Comparison")
print("-" * 100)
for config in sorted(protocol_comparison[protocol].keys()):
resolvers_data = protocol_comparison[protocol][config]
if resolvers_data:
print(f"\n {config}")
print(" " + "-" * 60)
print(f" {'Resolver':<15} {'Avg Latency (ms)':<20} {'Queries':<10}")
print(" " + "-" * 60)
# Sort by average latency
resolvers_data.sort(key=lambda x: x['avg_latency_ms'])
for data in resolvers_data:
print(f" {data['resolver']:<15} {data['avg_latency_ms']:<20} {data['total_queries']:<10}")
if __name__ == "__main__":
root_dir = "."

View File

@@ -28,12 +28,12 @@ type ValidatingDNSClient struct {
type Options struct {
DNSSEC bool
AuthoritativeDNSSEC bool
ValidateOnly bool
StrictValidation bool
KeepAlive bool // New flag for long-lived connections
KeepAlive bool
}
// New creates a DNS client based on the upstream string
func New(upstream string, opts Options) (DNSClient, error) {
logger.Debug("Creating DNS client for upstream: %s with options: %+v", upstream, opts)
@@ -67,8 +67,21 @@ func New(upstream string, opts Options) (DNSClient, error) {
return baseClient, nil
}
logger.Debug("DNSSEC enabled, wrapping with validator")
validator := dnssec.NewValidatorWithAuthoritativeQueries()
logger.Debug("DNSSEC enabled, wrapping with validator (AuthoritativeDNSSEC: %v)", opts.AuthoritativeDNSSEC)
var validator *dnssec.Validator
if opts.AuthoritativeDNSSEC {
validator = dnssec.NewValidatorWithAuthoritativeQueries()
} else {
validator = dnssec.NewValidator(func(qname string, qtype uint16) (*dns.Msg, error) {
msg := new(dns.Msg)
msg.SetQuestion(dns.Fqdn(qname), qtype)
msg.Id = dns.Id()
msg.RecursionDesired = true
msg.SetEdns0(4096, true)
return baseClient.Query(msg)
})
}
return &ValidatingDNSClient{
client: baseClient,
@@ -80,8 +93,8 @@ func New(upstream string, opts Options) (DNSClient, error) {
func (v *ValidatingDNSClient) Query(msg *dns.Msg) (*dns.Msg, error) {
if len(msg.Question) > 0 {
question := msg.Question[0]
logger.Debug("ValidatingDNSClient query: %s %s (DNSSEC: %v, ValidateOnly: %v, StrictValidation: %v)",
question.Name, dns.TypeToString[question.Qtype], v.options.DNSSEC, v.options.ValidateOnly, v.options.StrictValidation)
logger.Debug("ValidatingDNSClient query: %s %s (DNSSEC: %v, AuthoritativeDNSSEC: %v, ValidateOnly: %v, StrictValidation: %v)",
question.Name, dns.TypeToString[question.Qtype], v.options.DNSSEC, v.options.AuthoritativeDNSSEC, v.options.ValidateOnly, v.options.StrictValidation)
}
// Always query the upstream first
@@ -261,7 +274,7 @@ func createClient(scheme, host, port, path string, opts Options) (DNSClient, err
logger.Debug("Creating DoT client with config: %+v", config)
return dot.New(config)
case "doq": // DNS over QUIC
case "doq":
config := doq.Config{
Host: host,
Port: port,

View File

@@ -17,6 +17,7 @@ type RunCmd struct {
QueryType string `short:"t" long:"type" default:"A" help:"DNS query type"`
Timeout time.Duration `long:"timeout" default:"5s" help:"Query timeout (informational)"`
DNSSEC bool `long:"dnssec" help:"Enable DNSSEC"`
AuthoritativeDNSSEC bool `short:"a" long:"auth-dnssec" help:"Use authoritative DNSSEC validation instead of trusting resolver"`
KeepAlive bool `short:"k" long:"keep-alive" help:"Use persistent connections"`
Interface string `long:"iface" default:"any" help:"Capture interface (e.g., eth0, any)"`
Servers []string `short:"s" long:"server" help:"Upstream servers (udp://..., tls://..., https://..., doq://...)"`
@@ -28,6 +29,7 @@ func (r *RunCmd) Run() error {
OutputDir: r.OutputDir,
QueryType: r.QueryType,
DNSSEC: r.DNSSEC,
AuthoritativeDNSSEC: r.AuthoritativeDNSSEC,
KeepAlive: r.KeepAlive,
Interface: r.Interface,
Servers: r.Servers,

View File

@@ -24,6 +24,7 @@ type QueryCmd struct {
Server string `help:"Upstream server address (e.g., https://1.1.1.1/dns-query, tls://1.1.1.1, 8.8.8.8)." short:"s" required:""`
QueryType string `help:"Query type (A, AAAA, MX, TXT, etc.)." short:"t" enum:"A,AAAA,MX,TXT,NS,CNAME,SOA,PTR,DNSKEY" default:"A"`
DNSSEC bool `help:"Enable DNSSEC (DO bit)." short:"d"`
AuthoritativeDNSSEC bool `help:"Use authoritative DNSSEC validation instead of trusting resolver." short:"a"`
ValidateOnly bool `help:"Only return DNSSEC validated responses." short:"V"`
StrictValidation bool `help:"Fail on any DNSSEC validation error." short:"S"`
KeepAlive bool `help:"Use persistent connections." short:"k"`
@@ -37,17 +38,19 @@ type ListenCmd struct {
Fallback string `help:"Fallback DNS server (e.g., https://1.1.1.1/dns-query, tls://8.8.8.8)." short:"f"`
Bootstrap string `help:"Bootstrap DNS server (must be an IP address, e.g., 8.8.8.8, 1.1.1.1)." short:"b"`
DNSSEC bool `help:"Enable DNSSEC for upstream queries." short:"d"`
AuthoritativeDNSSEC bool `help:"Use authoritative DNSSEC validation instead of trusting resolver." short:"a"`
KeepAlive bool `help:"Use persistent connections to upstream servers." short:"k"`
Timeout time.Duration `help:"Timeout for upstream queries." default:"5s"`
Verbose bool `help:"Enable verbose logging." short:"v"`
}
func (q *QueryCmd) Run() error {
logger.Info("Querying %s for %s type %s (DNSSEC: %v, ValidateOnly: %v, StrictValidation: %v, KeepAlive: %v, Timeout: %v)",
q.Server, q.DomainName, q.QueryType, q.DNSSEC, q.ValidateOnly, q.StrictValidation, q.KeepAlive, q.Timeout)
logger.Info("Querying %s for %s type %s (DNSSEC: %v, AuthoritativeDNSSEC: %v, ValidateOnly: %v, StrictValidation: %v, KeepAlive: %v, Timeout: %v)",
q.Server, q.DomainName, q.QueryType, q.DNSSEC, q.AuthoritativeDNSSEC, q.ValidateOnly, q.StrictValidation, q.KeepAlive, q.Timeout)
opts := client.Options{
DNSSEC: q.DNSSEC,
AuthoritativeDNSSEC: q.AuthoritativeDNSSEC,
ValidateOnly: q.ValidateOnly,
StrictValidation: q.StrictValidation,
KeepAlive: q.KeepAlive,
@@ -93,6 +96,7 @@ func (l *ListenCmd) Run() error {
Fallback: l.Fallback,
Bootstrap: l.Bootstrap,
DNSSEC: l.DNSSEC,
AuthoritativeDNSSEC: l.AuthoritativeDNSSEC,
KeepAlive: l.KeepAlive,
Timeout: l.Timeout,
Verbose: l.Verbose,
@@ -114,7 +118,6 @@ func (l *ListenCmd) Run() error {
return srv.Start()
}
func printResponse(domain, qtype string, msg *dns.Msg) {
fmt.Println(";; QUESTION SECTION:")

View File

@@ -21,6 +21,7 @@ type MeasurementConfig struct {
OutputDir string
QueryType string
DNSSEC bool
AuthoritativeDNSSEC bool
KeepAlive bool
Interface string
Servers []string
@@ -76,7 +77,7 @@ func (r *MeasurementRunner) runMeasurement(upstream string, domains []string, qT
defer dnsClient.Close()
// Setup output files
csvPath, pcapPath := GenerateOutputPaths(r.config.OutputDir, upstream, r.config.DNSSEC, r.config.KeepAlive)
csvPath, pcapPath := GenerateOutputPaths(r.config.OutputDir, upstream, r.config.DNSSEC, r.config.AuthoritativeDNSSEC, r.config.KeepAlive)
// Create directory if it doesn't exist
if err := os.MkdirAll(filepath.Dir(csvPath), 0755); err != nil {
@@ -90,7 +91,7 @@ func (r *MeasurementRunner) runMeasurement(upstream string, domains []string, qT
// Show relative path for cleaner output
relPath, _ := filepath.Rel(r.config.OutputDir, csvPath)
fmt.Printf(">>> Measuring %s (dnssec=%v%s) → %s\n", upstream, r.config.DNSSEC, keepAliveStr, relPath)
fmt.Printf(">>> Measuring %s (dnssec=%v, auth=%v%s) → %s\n", upstream, r.config.DNSSEC, r.config.AuthoritativeDNSSEC, keepAliveStr, relPath)
// Setup packet capture
packetCapture, err := capture.NewPacketCapture(r.config.Interface, pcapPath)
@@ -113,6 +114,7 @@ func (r *MeasurementRunner) runMeasurement(upstream string, domains []string, qT
func (r *MeasurementRunner) setupDNSClient(upstream string) (client.DNSClient, error) {
opts := client.Options{
DNSSEC: r.config.DNSSEC,
AuthoritativeDNSSEC: r.config.AuthoritativeDNSSEC,
KeepAlive: r.config.KeepAlive,
}
return client.New(upstream, opts)
@@ -177,6 +179,7 @@ func (r *MeasurementRunner) performQuery(dnsClient client.DNSClient, domain, ups
QueryType: r.config.QueryType,
Protocol: proto,
DNSSEC: r.config.DNSSEC,
AuthoritativeDNSSEC: r.config.AuthoritativeDNSSEC,
KeepAlive: r.config.KeepAlive,
DNSServer: upstream,
Timestamp: time.Now(),

View File

@@ -13,6 +13,7 @@ type DNSMetric struct {
QueryType string `json:"query_type"`
Protocol string `json:"protocol"`
DNSSEC bool `json:"dnssec"`
AuthoritativeDNSSEC bool `json:"auth_dnssec"`
KeepAlive bool `json:"keep_alive"`
DNSServer string `json:"dns_server"`
Timestamp time.Time `json:"timestamp"`
@@ -39,7 +40,7 @@ func NewMetricsWriter(path string) (*MetricsWriter, error) {
// Write CSV header
header := []string{
"domain", "query_type", "protocol", "dnssec", "keep_alive",
"domain", "query_type", "protocol", "dnssec", "auth_dnssec", "keep_alive",
"dns_server", "timestamp", "duration_ns", "duration_ms",
"request_size_bytes", "response_size_bytes", "response_code", "error",
}
@@ -63,6 +64,7 @@ func (mw *MetricsWriter) WriteMetric(metric DNSMetric) error {
metric.QueryType,
metric.Protocol,
strconv.FormatBool(metric.DNSSEC),
strconv.FormatBool(metric.AuthoritativeDNSSEC),
strconv.FormatBool(metric.KeepAlive),
metric.DNSServer,
metric.Timestamp.Format(time.RFC3339),

View File

@@ -8,7 +8,7 @@ import (
"time"
)
func GenerateOutputPaths(outputDir, upstream string, dnssec, keepAlive bool) (csvPath, pcapPath string) {
func GenerateOutputPaths(outputDir, upstream string, dnssec, authDNSSEC, keepAlive bool) (csvPath, pcapPath string) {
proto := DetectProtocol(upstream)
cleanServer := cleanServerName(upstream)
@@ -25,7 +25,11 @@ func GenerateOutputPaths(outputDir, upstream string, dnssec, keepAlive bool) (cs
// Add flags if enabled
var flags []string
if dnssec {
flags = append(flags, "dnssec")
if authDNSSEC {
flags = append(flags, "auth")
} else {
flags = append(flags, "trust")
}
}
if keepAlive {
flags = append(flags, "persist")

62
run.sh
View File

@@ -3,9 +3,8 @@
TOOL_PATH="$1"/"qol"
DOMAINS_FILE="$1"/"domains.txt"
OUTPUT_DIR="$1"/"results"
TIMESTAMP=$(date '+%Y%m%d_%H%M')
# All servers in one command
# All servers in one command (same as yours)
SERVERS=(
-s "udp://8.8.8.8:53"
-s "udp://1.1.1.1:53"
@@ -19,12 +18,61 @@ SERVERS=(
-s "https://cloudflare-dns.com/dns-query"
-s "https://dns10.quad9.net/dns-query"
-s "https://dns.adguard-dns.com/dns-query"
-s "doh3://dns.google/dns-query"
-s "doh3://cloudflare-dns.com/dns-query"
-s "doh3://dns.adguard-dns.com/dns-query"
-s "doq://dns.adguard-dns.com:853"
)
# Run with DNSSEC off
sudo "$TOOL_PATH" run "$DOMAINS_FILE" \
--output-dir "${OUTPUT_DIR}/run_${TIMESTAMP}_dnssec_off" \
--interface eth0 \
--timeout 5s \
# Common args
COMMON_ARGS=(
"$DOMAINS_FILE"
--interface eth0
--timeout 5s
"${SERVERS[@]}"
)
# Define all combinations as arrays of extra flags (no suffixes here, since flags are in filenames)
COMBINATIONS=(
# DNSSEC off, Keep off
""
# DNSSEC off, Keep on
"--keep-alive"
# DNSSEC on (trust), Keep off
"--dnssec"
# DNSSEC on (trust), Keep on
"--dnssec --keep-alive"
# DNSSEC on (auth), Keep off
"--dnssec --auth-dnssec"
# DNSSEC on (auth), Keep on
"--dnssec --auth-dnssec --keep-alive"
)
# Run each combination with a unique timestamped output dir
for ((i=0; i<${#COMBINATIONS[@]}; i++)); do
FLAGS=${COMBINATIONS[$i]}
# Generate a unique timestamp for this run (YYYYMMDD_HHMMSS)
TIMESTAMP=$(date '+%Y%m%d_%H%M%S')
OUTPUT_PATH="${OUTPUT_DIR}/run_${TIMESTAMP}"
echo "Running combination: $FLAGS (output: $OUTPUT_PATH)"
# Convert FLAGS string to array (split by space)
FLAGS_ARRAY=($FLAGS)
sudo "$TOOL_PATH" run \
--output-dir "$OUTPUT_PATH" \
"${COMMON_ARGS[@]}" \
"${FLAGS_ARRAY[@]}"
sleep 1
done
echo "All combinations completed!"

View File

@@ -23,7 +23,8 @@ type Config struct {
Fallback string
Bootstrap string
DNSSEC bool
KeepAlive bool // Added KeepAlive field
AuthoritativeDNSSEC bool
KeepAlive bool
Timeout time.Duration
Verbose bool
}
@@ -163,7 +164,8 @@ func (s *Server) initClients() error {
logger.Debug("Creating upstream client for %s (resolved: %s)", s.config.Upstream, resolvedUpstream)
upstreamClient, err := client.New(resolvedUpstream, client.Options{
DNSSEC: s.config.DNSSEC,
KeepAlive: s.config.KeepAlive, // Pass KeepAlive to upstream client
AuthoritativeDNSSEC: s.config.AuthoritativeDNSSEC,
KeepAlive: s.config.KeepAlive,
})
if err != nil {
logger.Error("Failed to create upstream client: %v", err)