394 lines
14 KiB
Python
394 lines
14 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
End-to-End Deployment Testing Script
|
|
Tests the complete Calejo Control Adapter system with mock SCADA and optimization
|
|
"""
|
|
|
|
import asyncio
|
|
import time
|
|
import requests
|
|
import json
|
|
import logging
|
|
import subprocess
|
|
import sys
|
|
from pathlib import Path
|
|
from typing import Dict, Any, List
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class E2ETestRunner:
|
|
"""End-to-end test runner for Calejo Control Adapter"""
|
|
|
|
def __init__(self):
|
|
self.base_url = "http://localhost:8081"
|
|
self.mock_scada_process = None
|
|
self.mock_optimization_process = None
|
|
self.main_app_process = None
|
|
|
|
# Test results
|
|
self.test_results = []
|
|
|
|
def start_mock_servers(self):
|
|
"""Start mock SCADA and optimization servers"""
|
|
logger.info("Starting mock servers...")
|
|
|
|
try:
|
|
# Start mock SCADA server
|
|
self.mock_scada_process = subprocess.Popen(
|
|
[sys.executable, "mock-scada-server.py"],
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.PIPE
|
|
)
|
|
|
|
# Start mock optimization server
|
|
self.mock_optimization_process = subprocess.Popen(
|
|
[sys.executable, "mock-optimization-server.py"],
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.PIPE
|
|
)
|
|
|
|
# Wait for servers to start
|
|
time.sleep(5)
|
|
logger.info("Mock servers started")
|
|
return True
|
|
|
|
except Exception as e:
|
|
logger.error(f"Failed to start mock servers: {e}")
|
|
return False
|
|
|
|
def start_main_application(self):
|
|
"""Start the main Calejo Control Adapter application"""
|
|
logger.info("Starting main application...")
|
|
|
|
try:
|
|
self.main_app_process = subprocess.Popen(
|
|
[sys.executable, "src/main.py"],
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.PIPE
|
|
)
|
|
|
|
# Wait for application to start
|
|
for i in range(30):
|
|
try:
|
|
response = requests.get(f"{self.base_url}/health", timeout=2)
|
|
if response.status_code == 200:
|
|
logger.info("Main application started")
|
|
return True
|
|
except:
|
|
pass
|
|
|
|
time.sleep(2)
|
|
if i % 5 == 0:
|
|
logger.info(f"Waiting for application to start... ({i*2}s)")
|
|
|
|
logger.error("Main application failed to start within 60 seconds")
|
|
return False
|
|
|
|
except Exception as e:
|
|
logger.error(f"Failed to start main application: {e}")
|
|
return False
|
|
|
|
def stop_servers(self):
|
|
"""Stop all running servers"""
|
|
logger.info("Stopping servers...")
|
|
|
|
if self.main_app_process:
|
|
self.main_app_process.terminate()
|
|
self.main_app_process.wait(timeout=10)
|
|
|
|
if self.mock_scada_process:
|
|
self.mock_scada_process.terminate()
|
|
self.mock_scada_process.wait(timeout=10)
|
|
|
|
if self.mock_optimization_process:
|
|
self.mock_optimization_process.terminate()
|
|
self.mock_optimization_process.wait(timeout=10)
|
|
|
|
logger.info("All servers stopped")
|
|
|
|
def test_health_endpoint(self) -> bool:
|
|
"""Test health endpoint"""
|
|
logger.info("Testing health endpoint...")
|
|
|
|
try:
|
|
response = requests.get(f"{self.base_url}/health")
|
|
if response.status_code == 200:
|
|
data = response.json()
|
|
logger.info(f"Health status: {data.get('status', 'unknown')}")
|
|
self.test_results.append(("Health Endpoint", True, "Health check successful"))
|
|
return True
|
|
else:
|
|
logger.error(f"Health endpoint returned {response.status_code}")
|
|
self.test_results.append(("Health Endpoint", False, f"HTTP {response.status_code}"))
|
|
return False
|
|
|
|
except Exception as e:
|
|
logger.error(f"Health endpoint test failed: {e}")
|
|
self.test_results.append(("Health Endpoint", False, str(e)))
|
|
return False
|
|
|
|
def test_dashboard_access(self) -> bool:
|
|
"""Test dashboard access"""
|
|
logger.info("Testing dashboard access...")
|
|
|
|
try:
|
|
response = requests.get(f"{self.base_url}/dashboard")
|
|
if response.status_code == 200:
|
|
if "Calejo Control Adapter Dashboard" in response.text:
|
|
logger.info("Dashboard HTML loaded successfully")
|
|
self.test_results.append(("Dashboard Access", True, "Dashboard loaded"))
|
|
return True
|
|
else:
|
|
logger.error("Dashboard HTML content incorrect")
|
|
self.test_results.append(("Dashboard Access", False, "Incorrect content"))
|
|
return False
|
|
else:
|
|
logger.error(f"Dashboard returned {response.status_code}")
|
|
self.test_results.append(("Dashboard Access", False, f"HTTP {response.status_code}"))
|
|
return False
|
|
|
|
except Exception as e:
|
|
logger.error(f"Dashboard access test failed: {e}")
|
|
self.test_results.append(("Dashboard Access", False, str(e)))
|
|
return False
|
|
|
|
def test_dashboard_api(self) -> bool:
|
|
"""Test dashboard API endpoints"""
|
|
logger.info("Testing dashboard API...")
|
|
|
|
endpoints = [
|
|
("/api/v1/dashboard/status", "Status API"),
|
|
("/api/v1/dashboard/config", "Config API"),
|
|
("/api/v1/dashboard/logs", "Logs API"),
|
|
("/api/v1/dashboard/actions", "Actions API")
|
|
]
|
|
|
|
all_passed = True
|
|
|
|
for endpoint, name in endpoints:
|
|
try:
|
|
response = requests.get(f"{self.base_url}{endpoint}")
|
|
if response.status_code == 200:
|
|
logger.info(f"{name}: OK")
|
|
self.test_results.append((f"{name}", True, "API accessible"))
|
|
else:
|
|
logger.error(f"{name}: HTTP {response.status_code}")
|
|
self.test_results.append((f"{name}", False, f"HTTP {response.status_code}"))
|
|
all_passed = False
|
|
|
|
except Exception as e:
|
|
logger.error(f"{name} test failed: {e}")
|
|
self.test_results.append((f"{name}", False, str(e)))
|
|
all_passed = False
|
|
|
|
return all_passed
|
|
|
|
def test_configuration_management(self) -> bool:
|
|
"""Test configuration management"""
|
|
logger.info("Testing configuration management...")
|
|
|
|
try:
|
|
# Get current configuration
|
|
response = requests.get(f"{self.base_url}/api/v1/dashboard/config")
|
|
if response.status_code != 200:
|
|
logger.error("Failed to get configuration")
|
|
self.test_results.append(("Configuration Management", False, "Get config failed"))
|
|
return False
|
|
|
|
config = response.json()
|
|
|
|
# Test configuration update
|
|
test_config = {
|
|
"database": {
|
|
"host": "test-host",
|
|
"port": 5432,
|
|
"name": "test-db",
|
|
"user": "test-user",
|
|
"password": "test-pass"
|
|
},
|
|
"opcua": {
|
|
"enabled": True,
|
|
"port": 4840
|
|
},
|
|
"modbus": {
|
|
"enabled": True,
|
|
"port": 502
|
|
},
|
|
"rest_api": {
|
|
"enabled": True,
|
|
"port": 8080
|
|
},
|
|
"monitoring": {
|
|
"enabled": True,
|
|
"port": 9090
|
|
},
|
|
"security": {
|
|
"enable_auth": False,
|
|
"enable_ssl": False
|
|
}
|
|
}
|
|
|
|
response = requests.post(
|
|
f"{self.base_url}/api/v1/dashboard/config",
|
|
json=test_config
|
|
)
|
|
|
|
if response.status_code == 200:
|
|
result = response.json()
|
|
if result.get("success", False):
|
|
logger.info("Configuration update successful")
|
|
self.test_results.append(("Configuration Management", True, "Config update successful"))
|
|
return True
|
|
else:
|
|
logger.error(f"Configuration update failed: {result.get('error', 'Unknown error')}")
|
|
self.test_results.append(("Configuration Management", False, result.get('error', 'Unknown error')))
|
|
return False
|
|
else:
|
|
logger.error(f"Configuration update returned {response.status_code}")
|
|
self.test_results.append(("Configuration Management", False, f"HTTP {response.status_code}"))
|
|
return False
|
|
|
|
except Exception as e:
|
|
logger.error(f"Configuration management test failed: {e}")
|
|
self.test_results.append(("Configuration Management", False, str(e)))
|
|
return False
|
|
|
|
def test_system_actions(self) -> bool:
|
|
"""Test system actions"""
|
|
logger.info("Testing system actions...")
|
|
|
|
try:
|
|
# Test health check action
|
|
response = requests.post(
|
|
f"{self.base_url}/api/v1/dashboard/actions",
|
|
json={"action": "health_check"}
|
|
)
|
|
|
|
if response.status_code == 200:
|
|
result = response.json()
|
|
if result.get("success", False):
|
|
logger.info("Health check action successful")
|
|
self.test_results.append(("System Actions", True, "Health check successful"))
|
|
return True
|
|
else:
|
|
logger.error(f"Health check failed: {result.get('error', 'Unknown error')}")
|
|
self.test_results.append(("System Actions", False, result.get('error', 'Unknown error')))
|
|
return False
|
|
else:
|
|
logger.error(f"Health check action returned {response.status_code}")
|
|
self.test_results.append(("System Actions", False, f"HTTP {response.status_code}"))
|
|
return False
|
|
|
|
except Exception as e:
|
|
logger.error(f"System actions test failed: {e}")
|
|
self.test_results.append(("System Actions", False, str(e)))
|
|
return False
|
|
|
|
def test_integration_with_mock_servers(self) -> bool:
|
|
"""Test integration with mock SCADA and optimization servers"""
|
|
logger.info("Testing integration with mock servers...")
|
|
|
|
try:
|
|
# Test if mock SCADA is responding (OPC UA would require specific client)
|
|
# For now, just check if processes are running
|
|
if (self.mock_scada_process and self.mock_scada_process.poll() is None and
|
|
self.mock_optimization_process and self.mock_optimization_process.poll() is None):
|
|
logger.info("Mock servers are running")
|
|
self.test_results.append(("Mock Server Integration", True, "Mock servers running"))
|
|
return True
|
|
else:
|
|
logger.error("Mock servers are not running")
|
|
self.test_results.append(("Mock Server Integration", False, "Mock servers not running"))
|
|
return False
|
|
|
|
except Exception as e:
|
|
logger.error(f"Integration test failed: {e}")
|
|
self.test_results.append(("Mock Server Integration", False, str(e)))
|
|
return False
|
|
|
|
def run_all_tests(self) -> bool:
|
|
"""Run all end-to-end tests"""
|
|
logger.info("Starting end-to-end deployment tests...")
|
|
|
|
# Start servers
|
|
if not self.start_mock_servers():
|
|
logger.error("Failed to start mock servers")
|
|
return False
|
|
|
|
if not self.start_main_application():
|
|
logger.error("Failed to start main application")
|
|
self.stop_servers()
|
|
return False
|
|
|
|
# Run tests
|
|
tests = [
|
|
self.test_health_endpoint,
|
|
self.test_dashboard_access,
|
|
self.test_dashboard_api,
|
|
self.test_configuration_management,
|
|
self.test_system_actions,
|
|
self.test_integration_with_mock_servers
|
|
]
|
|
|
|
all_passed = True
|
|
for test_func in tests:
|
|
if not test_func():
|
|
all_passed = False
|
|
|
|
# Stop servers
|
|
self.stop_servers()
|
|
|
|
# Print results
|
|
self.print_test_results()
|
|
|
|
return all_passed
|
|
|
|
def print_test_results(self):
|
|
"""Print test results summary"""
|
|
print("\n" + "="*60)
|
|
print("END-TO-END DEPLOYMENT TEST RESULTS")
|
|
print("="*60)
|
|
|
|
passed = 0
|
|
total = len(self.test_results)
|
|
|
|
for test_name, success, message in self.test_results:
|
|
status = "✅ PASS" if success else "❌ FAIL"
|
|
print(f"{status} {test_name}: {message}")
|
|
if success:
|
|
passed += 1
|
|
|
|
print("\n" + "="*60)
|
|
print(f"SUMMARY: {passed}/{total} tests passed")
|
|
|
|
if passed == total:
|
|
print("🎉 SUCCESS: All end-to-end tests passed!")
|
|
print("The deployment is ready for production use.")
|
|
else:
|
|
print("❌ Some tests failed. Please check the deployment.")
|
|
print("="*60)
|
|
|
|
|
|
def main():
|
|
"""Main function"""
|
|
print("🚀 Calejo Control Adapter - End-to-End Deployment Test")
|
|
print("="*60)
|
|
|
|
# Check if required files exist
|
|
required_files = ["mock-scada-server.py", "mock-optimization-server.py", "src/main.py"]
|
|
for file in required_files:
|
|
if not Path(file).exists():
|
|
print(f"❌ Required file not found: {file}")
|
|
return 1
|
|
|
|
# Run tests
|
|
test_runner = E2ETestRunner()
|
|
success = test_runner.run_all_tests()
|
|
|
|
return 0 if success else 1
|
|
|
|
|
|
if __name__ == "__main__":
|
|
sys.exit(main()) |