feat: Implement configurable pump control preprocessing logic #5
|
|
@ -2,10 +2,10 @@
|
||||||
# Enable protocol servers for testing
|
# Enable protocol servers for testing
|
||||||
|
|
||||||
# Database configuration
|
# Database configuration
|
||||||
DB_HOST=calejo-postgres-test
|
DB_HOST=postgres
|
||||||
DB_PORT=5432
|
DB_PORT=5432
|
||||||
DB_NAME=calejo_test
|
DB_NAME=calejo_test
|
||||||
DB_USER=calejo
|
DB_USER=calejo_test
|
||||||
DB_PASSWORD=password
|
DB_PASSWORD=password
|
||||||
|
|
||||||
# Enable internal protocol servers for testing
|
# Enable internal protocol servers for testing
|
||||||
|
|
@ -15,7 +15,7 @@ MODBUS_ENABLED=true
|
||||||
# REST API configuration
|
# REST API configuration
|
||||||
REST_API_ENABLED=true
|
REST_API_ENABLED=true
|
||||||
REST_API_HOST=0.0.0.0
|
REST_API_HOST=0.0.0.0
|
||||||
REST_API_PORT=8081
|
REST_API_PORT=8080
|
||||||
|
|
||||||
# Health monitoring
|
# Health monitoring
|
||||||
HEALTH_MONITOR_PORT=9091
|
HEALTH_MONITOR_PORT=9091
|
||||||
|
|
|
||||||
|
|
@ -101,6 +101,16 @@ CREATE TABLE IF NOT EXISTS users (
|
||||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||||
);
|
);
|
||||||
|
|
||||||
|
-- Create discovery_results table
|
||||||
|
CREATE TABLE IF NOT EXISTS discovery_results (
|
||||||
|
scan_id VARCHAR(100) PRIMARY KEY,
|
||||||
|
status VARCHAR(50) NOT NULL,
|
||||||
|
discovered_endpoints JSONB,
|
||||||
|
scan_started_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
scan_completed_at TIMESTAMP,
|
||||||
|
error_message TEXT
|
||||||
|
);
|
||||||
|
|
||||||
-- Create indexes for better performance
|
-- Create indexes for better performance
|
||||||
CREATE INDEX IF NOT EXISTS idx_pump_plans_station_pump ON pump_plans(station_id, pump_id);
|
CREATE INDEX IF NOT EXISTS idx_pump_plans_station_pump ON pump_plans(station_id, pump_id);
|
||||||
CREATE INDEX IF NOT EXISTS idx_pump_plans_interval ON pump_plans(interval_start, interval_end);
|
CREATE INDEX IF NOT EXISTS idx_pump_plans_interval ON pump_plans(interval_start, interval_end);
|
||||||
|
|
@ -108,6 +118,8 @@ CREATE INDEX IF NOT EXISTS idx_pump_plans_status ON pump_plans(plan_status);
|
||||||
CREATE INDEX IF NOT EXISTS idx_emergency_stops_cleared ON emergency_stops(cleared_at);
|
CREATE INDEX IF NOT EXISTS idx_emergency_stops_cleared ON emergency_stops(cleared_at);
|
||||||
CREATE INDEX IF NOT EXISTS idx_audit_logs_timestamp ON audit_logs(timestamp);
|
CREATE INDEX IF NOT EXISTS idx_audit_logs_timestamp ON audit_logs(timestamp);
|
||||||
CREATE INDEX IF NOT EXISTS idx_audit_logs_user ON audit_logs(user_id);
|
CREATE INDEX IF NOT EXISTS idx_audit_logs_user ON audit_logs(user_id);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_discovery_results_status ON discovery_results(status);
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_discovery_results_timestamp ON discovery_results(scan_started_at);
|
||||||
|
|
||||||
-- Insert sample data for testing
|
-- Insert sample data for testing
|
||||||
INSERT INTO pump_stations (station_id, station_name, location) VALUES
|
INSERT INTO pump_stations (station_id, station_name, location) VALUES
|
||||||
|
|
|
||||||
|
|
@ -140,13 +140,64 @@ class SSHDeployer:
|
||||||
dirs[:] = [d for d in dirs if not d.startswith('.')]
|
dirs[:] = [d for d in dirs if not d.startswith('.')]
|
||||||
|
|
||||||
for file in files:
|
for file in files:
|
||||||
if not file.startswith('.'):
|
# Skip hidden files except .env files
|
||||||
file_path = os.path.join(root, file)
|
if file.startswith('.') and not file.startswith('.env'):
|
||||||
arcname = os.path.relpath(file_path, '.')
|
continue
|
||||||
|
|
||||||
|
file_path = os.path.join(root, file)
|
||||||
|
arcname = os.path.relpath(file_path, '.')
|
||||||
|
|
||||||
|
# Handle docker-compose.yml specially for test environment
|
||||||
|
if file == 'docker-compose.yml' and 'test' in self.config_file:
|
||||||
|
# Create modified docker-compose for test environment
|
||||||
|
modified_compose = self.create_test_docker_compose(file_path)
|
||||||
|
temp_compose_path = os.path.join(temp_dir, 'docker-compose.yml')
|
||||||
|
with open(temp_compose_path, 'w') as f:
|
||||||
|
f.write(modified_compose)
|
||||||
|
tar.add(temp_compose_path, arcname='docker-compose.yml')
|
||||||
|
# Handle .env files for test environment
|
||||||
|
elif file.startswith('.env') and 'test' in self.config_file:
|
||||||
|
if file == '.env.test':
|
||||||
|
# Copy .env.test as .env for test environment
|
||||||
|
temp_env_path = os.path.join(temp_dir, '.env')
|
||||||
|
with open(file_path, 'r') as src, open(temp_env_path, 'w') as dst:
|
||||||
|
dst.write(src.read())
|
||||||
|
tar.add(temp_env_path, arcname='.env')
|
||||||
|
# Skip other .env files in test environment
|
||||||
|
else:
|
||||||
tar.add(file_path, arcname=arcname)
|
tar.add(file_path, arcname=arcname)
|
||||||
|
|
||||||
return package_path
|
return package_path
|
||||||
|
|
||||||
|
def create_test_docker_compose(self, original_compose_path: str) -> str:
|
||||||
|
"""Create modified docker-compose.yml for test environment"""
|
||||||
|
with open(original_compose_path, 'r') as f:
|
||||||
|
content = f.read()
|
||||||
|
|
||||||
|
# Replace container names and ports for test environment
|
||||||
|
replacements = {
|
||||||
|
'calejo-control-adapter': 'calejo-control-adapter-test',
|
||||||
|
'calejo-postgres': 'calejo-postgres-test',
|
||||||
|
'calejo-prometheus': 'calejo-prometheus-test',
|
||||||
|
'calejo-grafana': 'calejo-grafana-test',
|
||||||
|
'"8080:8080"': '"8081:8080"', # Test app port
|
||||||
|
'"4840:4840"': '"4841:4840"', # Test OPC UA port
|
||||||
|
'"502:502"': '"503:502"', # Test Modbus port
|
||||||
|
'"9090:9090"': '"9092:9090"', # Test Prometheus metrics
|
||||||
|
'"5432:5432"': '"5433:5432"', # Test PostgreSQL port
|
||||||
|
'"9091:9090"': '"9093:9090"', # Test Prometheus UI
|
||||||
|
'"3000:3000"': '"3001:3000"', # Test Grafana port
|
||||||
|
'calejo': 'calejo_test', # Test database name
|
||||||
|
'calejo-network': 'calejo-network-test',
|
||||||
|
'@postgres:5432': '@calejo_test-postgres-test:5432', # Fix database hostname
|
||||||
|
' - DATABASE_URL=postgresql://calejo_test:password@calejo_test-postgres-test:5432/calejo_test': ' # DATABASE_URL removed - using .env file instead' # Remove DATABASE_URL to use .env file
|
||||||
|
}
|
||||||
|
|
||||||
|
for old, new in replacements.items():
|
||||||
|
content = content.replace(old, new)
|
||||||
|
|
||||||
|
return content
|
||||||
|
|
||||||
def deploy(self, dry_run: bool = False):
|
def deploy(self, dry_run: bool = False):
|
||||||
"""Main deployment process"""
|
"""Main deployment process"""
|
||||||
print("🚀 Starting SSH deployment...")
|
print("🚀 Starting SSH deployment...")
|
||||||
|
|
@ -214,8 +265,10 @@ class SSHDeployer:
|
||||||
|
|
||||||
# Wait for services
|
# Wait for services
|
||||||
print("⏳ Waiting for services to start...")
|
print("⏳ Waiting for services to start...")
|
||||||
|
# Determine health check port based on environment
|
||||||
|
health_port = "8081" if 'test' in self.config_file else "8080"
|
||||||
for i in range(30):
|
for i in range(30):
|
||||||
if self.execute_remote("curl -s http://localhost:8080/health > /dev/null", "", silent=True):
|
if self.execute_remote(f"curl -s http://localhost:{health_port}/health > /dev/null", "", silent=True):
|
||||||
print(" ✅ Services started successfully")
|
print(" ✅ Services started successfully")
|
||||||
break
|
break
|
||||||
print(f" ⏳ Waiting... ({i+1}/30)")
|
print(f" ⏳ Waiting... ({i+1}/30)")
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,10 @@
|
||||||
|
GET http://95.111.206.155:8081/api/v1/dashboard/discovery/results/scan_20251107_092049 404 (Not Found)
|
||||||
|
(anonymous) @ discovery.js:114
|
||||||
|
setInterval
|
||||||
|
pollScanStatus @ discovery.js:112
|
||||||
|
startDiscoveryScan @ discovery.js:81
|
||||||
|
await in startDiscoveryScan
|
||||||
|
(anonymous) @ discovery.js:34
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
"""
|
"""
|
||||||
Mock-Dependent End-to-End Test Runner
|
Mock-Dependent End-to-End Test Runner
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ from .configuration_manager import (
|
||||||
configuration_manager, OPCUAConfig, ModbusTCPConfig, PumpStationConfig,
|
configuration_manager, OPCUAConfig, ModbusTCPConfig, PumpStationConfig,
|
||||||
PumpConfig, SafetyLimitsConfig, DataPointMapping, ProtocolType, ProtocolMapping
|
PumpConfig, SafetyLimitsConfig, DataPointMapping, ProtocolType, ProtocolMapping
|
||||||
)
|
)
|
||||||
from src.discovery.protocol_discovery_fast import discovery_service, DiscoveryStatus, DiscoveredEndpoint
|
from src.discovery.protocol_discovery_persistent import persistent_persistent_discovery_service, DiscoveryStatus, DiscoveredEndpoint
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
@ -975,7 +975,7 @@ async def delete_protocol_mapping(mapping_id: str):
|
||||||
async def get_discovery_status():
|
async def get_discovery_status():
|
||||||
"""Get current discovery service status"""
|
"""Get current discovery service status"""
|
||||||
try:
|
try:
|
||||||
status = discovery_service.get_discovery_status()
|
status = persistent_discovery_service.get_discovery_status()
|
||||||
return {
|
return {
|
||||||
"success": True,
|
"success": True,
|
||||||
"status": status
|
"status": status
|
||||||
|
|
@ -990,7 +990,7 @@ async def start_discovery_scan(background_tasks: BackgroundTasks):
|
||||||
"""Start a new discovery scan"""
|
"""Start a new discovery scan"""
|
||||||
try:
|
try:
|
||||||
# Check if scan is already running
|
# Check if scan is already running
|
||||||
status = discovery_service.get_discovery_status()
|
status = persistent_discovery_service.get_discovery_status()
|
||||||
if status["is_scanning"]:
|
if status["is_scanning"]:
|
||||||
raise HTTPException(status_code=409, detail="Discovery scan already in progress")
|
raise HTTPException(status_code=409, detail="Discovery scan already in progress")
|
||||||
|
|
||||||
|
|
@ -998,7 +998,7 @@ async def start_discovery_scan(background_tasks: BackgroundTasks):
|
||||||
scan_id = f"scan_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
scan_id = f"scan_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
||||||
|
|
||||||
async def run_discovery():
|
async def run_discovery():
|
||||||
await discovery_service.discover_all_protocols(scan_id)
|
await persistent_discovery_service.discover_all_protocols(scan_id)
|
||||||
|
|
||||||
background_tasks.add_task(run_discovery)
|
background_tasks.add_task(run_discovery)
|
||||||
|
|
||||||
|
|
@ -1018,7 +1018,7 @@ async def start_discovery_scan(background_tasks: BackgroundTasks):
|
||||||
async def get_discovery_results(scan_id: str):
|
async def get_discovery_results(scan_id: str):
|
||||||
"""Get results for a specific discovery scan"""
|
"""Get results for a specific discovery scan"""
|
||||||
try:
|
try:
|
||||||
result = discovery_service.get_scan_result(scan_id)
|
result = persistent_discovery_service.get_scan_result(scan_id)
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise HTTPException(status_code=404, detail=f"Discovery scan {scan_id} not found")
|
raise HTTPException(status_code=404, detail=f"Discovery scan {scan_id} not found")
|
||||||
|
|
@ -1059,12 +1059,12 @@ async def get_recent_discoveries():
|
||||||
"""Get most recently discovered endpoints"""
|
"""Get most recently discovered endpoints"""
|
||||||
try:
|
try:
|
||||||
# Get recent scan results and extract endpoints
|
# Get recent scan results and extract endpoints
|
||||||
status = discovery_service.get_discovery_status()
|
status = persistent_discovery_service.get_discovery_status()
|
||||||
recent_scans = status.get("recent_scans", [])[-5:] # Last 5 scans
|
recent_scans = status.get("recent_scans", [])[-5:] # Last 5 scans
|
||||||
|
|
||||||
recent_endpoints = []
|
recent_endpoints = []
|
||||||
for scan_id in recent_scans:
|
for scan_id in recent_scans:
|
||||||
result = discovery_service.get_scan_result(scan_id)
|
result = persistent_discovery_service.get_scan_result(scan_id)
|
||||||
if result and result.discovered_endpoints:
|
if result and result.discovered_endpoints:
|
||||||
recent_endpoints.extend(result.discovered_endpoints)
|
recent_endpoints.extend(result.discovered_endpoints)
|
||||||
|
|
||||||
|
|
@ -1100,7 +1100,7 @@ async def get_recent_discoveries():
|
||||||
async def apply_discovery_results(scan_id: str, station_id: str, pump_id: str, data_type: str, db_source: str):
|
async def apply_discovery_results(scan_id: str, station_id: str, pump_id: str, data_type: str, db_source: str):
|
||||||
"""Apply discovered endpoints as protocol mappings"""
|
"""Apply discovered endpoints as protocol mappings"""
|
||||||
try:
|
try:
|
||||||
result = discovery_service.get_scan_result(scan_id)
|
result = persistent_discovery_service.get_scan_result(scan_id)
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise HTTPException(status_code=404, detail=f"Discovery scan {scan_id} not found")
|
raise HTTPException(status_code=404, detail=f"Discovery scan {scan_id} not found")
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,256 @@
|
||||||
|
"""
|
||||||
|
Protocol Discovery Service - Persistent version with database storage
|
||||||
|
"""
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import List, Dict, Any, Optional
|
||||||
|
from enum import Enum
|
||||||
|
from dataclasses import dataclass, asdict
|
||||||
|
|
||||||
|
from sqlalchemy import text
|
||||||
|
from config.settings import settings
|
||||||
|
from src.database.flexible_client import FlexibleDatabaseClient
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class DiscoveryStatus(Enum):
|
||||||
|
"""Discovery operation status"""
|
||||||
|
PENDING = "pending"
|
||||||
|
RUNNING = "running"
|
||||||
|
COMPLETED = "completed"
|
||||||
|
FAILED = "failed"
|
||||||
|
|
||||||
|
|
||||||
|
class ProtocolType(Enum):
|
||||||
|
MODBUS_TCP = "modbus_tcp"
|
||||||
|
MODBUS_RTU = "modbus_rtu"
|
||||||
|
OPC_UA = "opc_ua"
|
||||||
|
REST_API = "rest_api"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DiscoveredEndpoint:
|
||||||
|
protocol_type: ProtocolType
|
||||||
|
address: str
|
||||||
|
port: Optional[int] = None
|
||||||
|
device_id: Optional[str] = None
|
||||||
|
device_name: Optional[str] = None
|
||||||
|
capabilities: Optional[List[str]] = None
|
||||||
|
response_time: Optional[float] = None
|
||||||
|
discovered_at: Optional[datetime] = None
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
if self.capabilities is None:
|
||||||
|
self.capabilities = []
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DiscoveryResult:
|
||||||
|
scan_id: str
|
||||||
|
status: DiscoveryStatus
|
||||||
|
discovered_endpoints: List[DiscoveredEndpoint]
|
||||||
|
scan_started_at: datetime
|
||||||
|
scan_completed_at: Optional[datetime] = None
|
||||||
|
error_message: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
class PersistentProtocolDiscoveryService:
|
||||||
|
"""
|
||||||
|
Protocol discovery service with database persistence
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._current_scan_id: Optional[str] = None
|
||||||
|
self._db_client = FlexibleDatabaseClient(settings.database_url)
|
||||||
|
|
||||||
|
async def initialize(self):
|
||||||
|
"""Initialize database connection"""
|
||||||
|
try:
|
||||||
|
await self._db_client.connect()
|
||||||
|
logger.info("Discovery service database initialized")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to initialize discovery service database: {e}")
|
||||||
|
|
||||||
|
def get_discovery_status(self) -> Dict[str, Any]:
|
||||||
|
"""Get current discovery service status"""
|
||||||
|
try:
|
||||||
|
# Get recent scans from database
|
||||||
|
query = text("""
|
||||||
|
SELECT scan_id, status, scan_started_at, scan_completed_at
|
||||||
|
FROM discovery_results
|
||||||
|
ORDER BY scan_started_at DESC
|
||||||
|
LIMIT 5
|
||||||
|
""")
|
||||||
|
|
||||||
|
with self._db_client.engine.connect() as conn:
|
||||||
|
result = conn.execute(query)
|
||||||
|
recent_scans = [
|
||||||
|
{
|
||||||
|
'scan_id': row[0],
|
||||||
|
'status': row[1],
|
||||||
|
'scan_started_at': row[2].isoformat() if row[2] else None,
|
||||||
|
'scan_completed_at': row[3].isoformat() if row[3] else None
|
||||||
|
}
|
||||||
|
for row in result
|
||||||
|
]
|
||||||
|
|
||||||
|
# Get total discovered endpoints
|
||||||
|
query = text("""
|
||||||
|
SELECT COUNT(*)
|
||||||
|
FROM discovery_results dr,
|
||||||
|
jsonb_array_elements(dr.discovered_endpoints) AS endpoint
|
||||||
|
WHERE dr.status = 'completed'
|
||||||
|
""")
|
||||||
|
|
||||||
|
with self._db_client.engine.connect() as conn:
|
||||||
|
result = conn.execute(query)
|
||||||
|
total_endpoints = result.scalar() or 0
|
||||||
|
|
||||||
|
return {
|
||||||
|
"current_scan_id": self._current_scan_id,
|
||||||
|
"recent_scans": recent_scans,
|
||||||
|
"total_discovered_endpoints": total_endpoints
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting discovery status: {e}")
|
||||||
|
return {
|
||||||
|
"current_scan_id": None,
|
||||||
|
"recent_scans": [],
|
||||||
|
"total_discovered_endpoints": 0
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_scan_result(self, scan_id: str) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Get result for a specific scan from database"""
|
||||||
|
try:
|
||||||
|
query = text("""
|
||||||
|
SELECT scan_id, status, discovered_endpoints,
|
||||||
|
scan_started_at, scan_completed_at, error_message
|
||||||
|
FROM discovery_results
|
||||||
|
WHERE scan_id = :scan_id
|
||||||
|
""")
|
||||||
|
|
||||||
|
with self._db_client.engine.connect() as conn:
|
||||||
|
result = conn.execute(query, {"scan_id": scan_id})
|
||||||
|
row = result.fetchone()
|
||||||
|
|
||||||
|
if row:
|
||||||
|
return {
|
||||||
|
"scan_id": row[0],
|
||||||
|
"status": row[1],
|
||||||
|
"discovered_endpoints": row[2] if row[2] else [],
|
||||||
|
"scan_started_at": row[3].isoformat() if row[3] else None,
|
||||||
|
"scan_completed_at": row[4].isoformat() if row[4] else None,
|
||||||
|
"error_message": row[5]
|
||||||
|
}
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting scan result {scan_id}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
async def discover_all_protocols(self, scan_id: str) -> None:
|
||||||
|
"""
|
||||||
|
Discover all available protocols (simulated for now)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Store scan as started
|
||||||
|
await self._store_scan_result(
|
||||||
|
scan_id=scan_id,
|
||||||
|
status=DiscoveryStatus.RUNNING,
|
||||||
|
discovered_endpoints=[],
|
||||||
|
scan_started_at=datetime.now(),
|
||||||
|
scan_completed_at=None,
|
||||||
|
error_message=None
|
||||||
|
)
|
||||||
|
|
||||||
|
# Simulate discovery process
|
||||||
|
await asyncio.sleep(2)
|
||||||
|
|
||||||
|
# Create mock discovered endpoints
|
||||||
|
discovered_endpoints = [
|
||||||
|
{
|
||||||
|
"protocol_type": "modbus_tcp",
|
||||||
|
"address": "192.168.1.100",
|
||||||
|
"port": 502,
|
||||||
|
"device_id": "pump_controller_001",
|
||||||
|
"device_name": "Main Pump Controller",
|
||||||
|
"capabilities": ["read_coils", "read_holding_registers"],
|
||||||
|
"response_time": 0.15,
|
||||||
|
"discovered_at": datetime.now().isoformat()
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"protocol_type": "opc_ua",
|
||||||
|
"address": "192.168.1.101",
|
||||||
|
"port": 4840,
|
||||||
|
"device_id": "scada_server_001",
|
||||||
|
"device_name": "SCADA Server",
|
||||||
|
"capabilities": ["browse", "read", "write"],
|
||||||
|
"response_time": 0.25,
|
||||||
|
"discovered_at": datetime.now().isoformat()
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
# Store completed scan
|
||||||
|
await self._store_scan_result(
|
||||||
|
scan_id=scan_id,
|
||||||
|
status=DiscoveryStatus.COMPLETED,
|
||||||
|
discovered_endpoints=discovered_endpoints,
|
||||||
|
scan_started_at=datetime.now(),
|
||||||
|
scan_completed_at=datetime.now(),
|
||||||
|
error_message=None
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(f"Discovery scan {scan_id} completed with {len(discovered_endpoints)} endpoints")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Discovery scan {scan_id} failed: {e}")
|
||||||
|
await self._store_scan_result(
|
||||||
|
scan_id=scan_id,
|
||||||
|
status=DiscoveryStatus.FAILED,
|
||||||
|
discovered_endpoints=[],
|
||||||
|
scan_started_at=datetime.now(),
|
||||||
|
scan_completed_at=datetime.now(),
|
||||||
|
error_message=str(e)
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _store_scan_result(
|
||||||
|
self,
|
||||||
|
scan_id: str,
|
||||||
|
status: DiscoveryStatus,
|
||||||
|
discovered_endpoints: List[Dict[str, Any]],
|
||||||
|
scan_started_at: datetime,
|
||||||
|
scan_completed_at: Optional[datetime] = None,
|
||||||
|
error_message: Optional[str] = None
|
||||||
|
) -> None:
|
||||||
|
"""Store scan result in database"""
|
||||||
|
try:
|
||||||
|
query = text("""
|
||||||
|
INSERT INTO discovery_results
|
||||||
|
(scan_id, status, discovered_endpoints, scan_started_at, scan_completed_at, error_message)
|
||||||
|
VALUES (:scan_id, :status, :discovered_endpoints, :scan_started_at, :scan_completed_at, :error_message)
|
||||||
|
ON CONFLICT (scan_id) DO UPDATE SET
|
||||||
|
status = EXCLUDED.status,
|
||||||
|
discovered_endpoints = EXCLUDED.discovered_endpoints,
|
||||||
|
scan_completed_at = EXCLUDED.scan_completed_at,
|
||||||
|
error_message = EXCLUDED.error_message
|
||||||
|
""")
|
||||||
|
|
||||||
|
with self._db_client.engine.connect() as conn:
|
||||||
|
conn.execute(query, {
|
||||||
|
"scan_id": scan_id,
|
||||||
|
"status": status.value,
|
||||||
|
"discovered_endpoints": json.dumps(discovered_endpoints),
|
||||||
|
"scan_started_at": scan_started_at,
|
||||||
|
"scan_completed_at": scan_completed_at,
|
||||||
|
"error_message": error_message
|
||||||
|
})
|
||||||
|
conn.commit()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to store scan result {scan_id}: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
# Global instance
|
||||||
|
persistent_discovery_service = PersistentProtocolDiscoveryService()
|
||||||
|
|
@ -177,6 +177,11 @@ class CalejoControlAdapter:
|
||||||
await self.db_client.connect()
|
await self.db_client.connect()
|
||||||
logger.info("database_connected")
|
logger.info("database_connected")
|
||||||
|
|
||||||
|
# Initialize persistent discovery service
|
||||||
|
from src.discovery.protocol_discovery_persistent import persistent_discovery_service
|
||||||
|
await persistent_discovery_service.initialize()
|
||||||
|
logger.info("persistent_discovery_service_initialized")
|
||||||
|
|
||||||
# Load safety limits
|
# Load safety limits
|
||||||
await self.safety_enforcer.load_safety_limits()
|
await self.safety_enforcer.load_safety_limits()
|
||||||
logger.info("safety_limits_loaded")
|
logger.info("safety_limits_loaded")
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue