logs
This commit is contained in:
@ -71,6 +71,7 @@ type FunctionExecution struct {
|
|||||||
Error string `json:"error,omitempty" db:"error"`
|
Error string `json:"error,omitempty" db:"error"`
|
||||||
Duration time.Duration `json:"duration" db:"duration"`
|
Duration time.Duration `json:"duration" db:"duration"`
|
||||||
MemoryUsed int `json:"memory_used" db:"memory_used"`
|
MemoryUsed int `json:"memory_used" db:"memory_used"`
|
||||||
|
Logs []string `json:"logs,omitempty" db:"logs"`
|
||||||
ContainerID string `json:"container_id,omitempty" db:"container_id"`
|
ContainerID string `json:"container_id,omitempty" db:"container_id"`
|
||||||
ExecutorID string `json:"executor_id" db:"executor_id"`
|
ExecutorID string `json:"executor_id" db:"executor_id"`
|
||||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||||
|
|||||||
@ -8,6 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
"github.com/lib/pq"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
"github.com/RyanCopley/skybridge/faas/internal/domain"
|
"github.com/RyanCopley/skybridge/faas/internal/domain"
|
||||||
@ -97,7 +98,7 @@ func (r *executionRepository) Create(ctx context.Context, execution *domain.Func
|
|||||||
func (r *executionRepository) GetByID(ctx context.Context, id uuid.UUID) (*domain.FunctionExecution, error) {
|
func (r *executionRepository) GetByID(ctx context.Context, id uuid.UUID) (*domain.FunctionExecution, error) {
|
||||||
query := `
|
query := `
|
||||||
SELECT id, function_id, status, input, output, error, duration, memory_used,
|
SELECT id, function_id, status, input, output, error, duration, memory_used,
|
||||||
container_id, executor_id, created_at, started_at, completed_at
|
logs, container_id, executor_id, created_at, started_at, completed_at
|
||||||
FROM executions WHERE id = $1`
|
FROM executions WHERE id = $1`
|
||||||
|
|
||||||
execution := &domain.FunctionExecution{}
|
execution := &domain.FunctionExecution{}
|
||||||
@ -106,7 +107,7 @@ func (r *executionRepository) GetByID(ctx context.Context, id uuid.UUID) (*domai
|
|||||||
err := r.db.QueryRowContext(ctx, query, id).Scan(
|
err := r.db.QueryRowContext(ctx, query, id).Scan(
|
||||||
&execution.ID, &execution.FunctionID, &execution.Status, &execution.Input,
|
&execution.ID, &execution.FunctionID, &execution.Status, &execution.Input,
|
||||||
&execution.Output, &execution.Error, &durationInterval, &execution.MemoryUsed,
|
&execution.Output, &execution.Error, &durationInterval, &execution.MemoryUsed,
|
||||||
&execution.ContainerID, &execution.ExecutorID, &execution.CreatedAt,
|
pq.Array(&execution.Logs), &execution.ContainerID, &execution.ExecutorID, &execution.CreatedAt,
|
||||||
&execution.StartedAt, &execution.CompletedAt,
|
&execution.StartedAt, &execution.CompletedAt,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -135,12 +136,13 @@ func (r *executionRepository) Update(ctx context.Context, id uuid.UUID, executio
|
|||||||
query := `
|
query := `
|
||||||
UPDATE executions
|
UPDATE executions
|
||||||
SET status = $2, output = $3, error = $4, duration = $5, memory_used = $6,
|
SET status = $2, output = $3, error = $4, duration = $5, memory_used = $6,
|
||||||
container_id = $7, started_at = $8, completed_at = $9
|
logs = $7, container_id = $8, started_at = $9, completed_at = $10
|
||||||
WHERE id = $1`
|
WHERE id = $1`
|
||||||
|
|
||||||
_, err := r.db.ExecContext(ctx, query,
|
_, err := r.db.ExecContext(ctx, query,
|
||||||
id, execution.Status, jsonField(execution.Output), execution.Error,
|
id, execution.Status, jsonField(execution.Output), execution.Error,
|
||||||
durationToInterval(execution.Duration), execution.MemoryUsed, execution.ContainerID,
|
durationToInterval(execution.Duration), execution.MemoryUsed,
|
||||||
|
pq.Array(execution.Logs), execution.ContainerID,
|
||||||
execution.StartedAt, execution.CompletedAt,
|
execution.StartedAt, execution.CompletedAt,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -209,7 +211,7 @@ func (r *executionRepository) List(ctx context.Context, functionID *uuid.UUID, l
|
|||||||
err := rows.Scan(
|
err := rows.Scan(
|
||||||
&execution.ID, &execution.FunctionID, &execution.Status, &execution.Input,
|
&execution.ID, &execution.FunctionID, &execution.Status, &execution.Input,
|
||||||
&execution.Output, &execution.Error, &durationInterval, &execution.MemoryUsed,
|
&execution.Output, &execution.Error, &durationInterval, &execution.MemoryUsed,
|
||||||
&execution.ContainerID, &execution.ExecutorID, &execution.CreatedAt,
|
pq.Array(&execution.Logs), &execution.ContainerID, &execution.ExecutorID, &execution.CreatedAt,
|
||||||
&execution.StartedAt, &execution.CompletedAt,
|
&execution.StartedAt, &execution.CompletedAt,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -245,7 +247,7 @@ func (r *executionRepository) GetByFunctionID(ctx context.Context, functionID uu
|
|||||||
func (r *executionRepository) GetByStatus(ctx context.Context, status domain.ExecutionStatus, limit, offset int) ([]*domain.FunctionExecution, error) {
|
func (r *executionRepository) GetByStatus(ctx context.Context, status domain.ExecutionStatus, limit, offset int) ([]*domain.FunctionExecution, error) {
|
||||||
query := `
|
query := `
|
||||||
SELECT id, function_id, status, input, output, error, duration, memory_used,
|
SELECT id, function_id, status, input, output, error, duration, memory_used,
|
||||||
container_id, executor_id, created_at, started_at, completed_at
|
logs, container_id, executor_id, created_at, started_at, completed_at
|
||||||
FROM executions WHERE status = $1
|
FROM executions WHERE status = $1
|
||||||
ORDER BY created_at DESC LIMIT $2 OFFSET $3`
|
ORDER BY created_at DESC LIMIT $2 OFFSET $3`
|
||||||
|
|
||||||
@ -264,7 +266,7 @@ func (r *executionRepository) GetByStatus(ctx context.Context, status domain.Exe
|
|||||||
err := rows.Scan(
|
err := rows.Scan(
|
||||||
&execution.ID, &execution.FunctionID, &execution.Status, &execution.Input,
|
&execution.ID, &execution.FunctionID, &execution.Status, &execution.Input,
|
||||||
&execution.Output, &execution.Error, &durationInterval, &execution.MemoryUsed,
|
&execution.Output, &execution.Error, &durationInterval, &execution.MemoryUsed,
|
||||||
&execution.ContainerID, &execution.ExecutorID, &execution.CreatedAt,
|
pq.Array(&execution.Logs), &execution.ContainerID, &execution.ExecutorID, &execution.CreatedAt,
|
||||||
&execution.StartedAt, &execution.CompletedAt,
|
&execution.StartedAt, &execution.CompletedAt,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -4,6 +4,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -341,7 +343,15 @@ func (s *SimpleDockerRuntime) createContainer(ctx context.Context, function *dom
|
|||||||
echo "const handler = require('/tmp/index.js').handler;
|
echo "const handler = require('/tmp/index.js').handler;
|
||||||
const input = process.env.FUNCTION_INPUT ? JSON.parse(process.env.FUNCTION_INPUT) : {};
|
const input = process.env.FUNCTION_INPUT ? JSON.parse(process.env.FUNCTION_INPUT) : {};
|
||||||
const context = { functionName: '` + function.Name + `' };
|
const context = { functionName: '` + function.Name + `' };
|
||||||
handler(input, context).then(result => console.log(JSON.stringify(result))).catch(err => { console.error(err); process.exit(1); });" > /tmp/runner.js &&
|
console.log('<stdout>');
|
||||||
|
handler(input, context).then(result => {
|
||||||
|
console.log('</stdout>');
|
||||||
|
console.log('<result>' + JSON.stringify(result) + '</result>');
|
||||||
|
}).catch(err => {
|
||||||
|
console.log('</stdout>');
|
||||||
|
console.error('<result>{\"error\": \"' + err.message + '\"}</result>');
|
||||||
|
process.exit(1);
|
||||||
|
});" > /tmp/runner.js &&
|
||||||
node /tmp/runner.js
|
node /tmp/runner.js
|
||||||
`}
|
`}
|
||||||
case "python", "python3", "python3.9", "python3.10", "python3.11":
|
case "python", "python3", "python3.9", "python3.10", "python3.11":
|
||||||
@ -350,8 +360,15 @@ func (s *SimpleDockerRuntime) createContainer(ctx context.Context, function *dom
|
|||||||
echo "import json, os, sys; sys.path.insert(0, '/tmp'); from handler import handler;
|
echo "import json, os, sys; sys.path.insert(0, '/tmp'); from handler import handler;
|
||||||
input_data = json.loads(os.environ.get('FUNCTION_INPUT', '{}'));
|
input_data = json.loads(os.environ.get('FUNCTION_INPUT', '{}'));
|
||||||
context = {'function_name': '` + function.Name + `'};
|
context = {'function_name': '` + function.Name + `'};
|
||||||
result = handler(input_data, context);
|
print('<stdout>');
|
||||||
print(json.dumps(result))" > /tmp/runner.py &&
|
try:
|
||||||
|
result = handler(input_data, context);
|
||||||
|
print('</stdout>');
|
||||||
|
print('<result>' + json.dumps(result) + '</result>');
|
||||||
|
except Exception as e:
|
||||||
|
print('</stdout>');
|
||||||
|
print('<result>{\"error\": \"' + str(e) + '\"}</result>', file=sys.stderr);
|
||||||
|
sys.exit(1);" > /tmp/runner.py &&
|
||||||
python /tmp/runner.py
|
python /tmp/runner.py
|
||||||
`}
|
`}
|
||||||
default:
|
default:
|
||||||
@ -386,20 +403,48 @@ func (s *SimpleDockerRuntime) getContainerLogs(ctx context.Context, containerID
|
|||||||
logs, err := s.client.ContainerLogs(ctx, containerID, container.LogsOptions{
|
logs, err := s.client.ContainerLogs(ctx, containerID, container.LogsOptions{
|
||||||
ShowStdout: true,
|
ShowStdout: true,
|
||||||
ShowStderr: true,
|
ShowStderr: true,
|
||||||
Tail: "50", // Get last 50 lines
|
Tail: "100", // Get last 100 lines
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get container logs: %w", err)
|
return nil, fmt.Errorf("failed to get container logs: %w", err)
|
||||||
}
|
}
|
||||||
defer logs.Close()
|
defer logs.Close()
|
||||||
|
|
||||||
// For simplicity, we'll return a placeholder
|
// Read the actual logs content
|
||||||
// In a real implementation, you'd parse the log output
|
logData, err := io.ReadAll(logs)
|
||||||
return []string{
|
if err != nil {
|
||||||
"Container logs would appear here",
|
return nil, fmt.Errorf("failed to read log data: %w", err)
|
||||||
"Function execution started",
|
}
|
||||||
"Function execution completed",
|
|
||||||
}, nil
|
// Parse Docker logs to remove binary headers
|
||||||
|
rawOutput := parseDockerLogs(logData)
|
||||||
|
|
||||||
|
// Parse the XML-tagged output to extract logs
|
||||||
|
parsedLogs, _, err := s.parseContainerOutput(rawOutput)
|
||||||
|
if err != nil {
|
||||||
|
s.logger.Warn("Failed to parse container output for logs", zap.Error(err))
|
||||||
|
// Fallback to raw output split by lines
|
||||||
|
lines := strings.Split(strings.TrimSpace(rawOutput), "\n")
|
||||||
|
cleanLines := make([]string, 0, len(lines))
|
||||||
|
for _, line := range lines {
|
||||||
|
if trimmed := strings.TrimSpace(line); trimmed != "" {
|
||||||
|
cleanLines = append(cleanLines, trimmed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cleanLines, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no logs were parsed from <stdout> tags, fallback to basic parsing
|
||||||
|
if len(parsedLogs) == 0 {
|
||||||
|
lines := strings.Split(strings.TrimSpace(rawOutput), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
if trimmed := strings.TrimSpace(line); trimmed != "" && !strings.Contains(trimmed, "<result>") && !strings.Contains(trimmed, "</result>") {
|
||||||
|
parsedLogs = append(parsedLogs, trimmed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return parsedLogs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SimpleDockerRuntime) getContainerOutput(ctx context.Context, containerID string) (json.RawMessage, error) {
|
func (s *SimpleDockerRuntime) getContainerOutput(ctx context.Context, containerID string) (json.RawMessage, error) {
|
||||||
@ -415,36 +460,143 @@ func (s *SimpleDockerRuntime) getContainerOutput(ctx context.Context, containerI
|
|||||||
defer logs.Close()
|
defer logs.Close()
|
||||||
|
|
||||||
// Read the actual logs content
|
// Read the actual logs content
|
||||||
buf := make([]byte, 4096)
|
logData, err := io.ReadAll(logs)
|
||||||
var output strings.Builder
|
if err != nil {
|
||||||
for {
|
return nil, fmt.Errorf("failed to read log data: %w", err)
|
||||||
n, err := logs.Read(buf)
|
}
|
||||||
if n > 0 {
|
|
||||||
// Docker logs include 8-byte headers, skip them for stdout content
|
// Parse Docker logs to remove binary headers
|
||||||
if n > 8 {
|
rawOutput := parseDockerLogs(logData)
|
||||||
output.Write(buf[8:n])
|
|
||||||
|
// Parse the XML-tagged output to extract the result
|
||||||
|
_, result, err := s.parseContainerOutput(rawOutput)
|
||||||
|
if err != nil {
|
||||||
|
s.logger.Warn("Failed to parse container output for result", zap.Error(err))
|
||||||
|
// Fallback to legacy parsing
|
||||||
|
logContent := strings.TrimSpace(rawOutput)
|
||||||
|
if json.Valid([]byte(logContent)) && logContent != "" {
|
||||||
|
return json.RawMessage(logContent), nil
|
||||||
|
} else {
|
||||||
|
// Return the output wrapped in a JSON object
|
||||||
|
fallbackResult := map[string]interface{}{
|
||||||
|
"result": "Function executed successfully",
|
||||||
|
"output": logContent,
|
||||||
|
"timestamp": time.Now().UTC(),
|
||||||
}
|
}
|
||||||
}
|
resultJSON, _ := json.Marshal(fallbackResult)
|
||||||
if err != nil {
|
return json.RawMessage(resultJSON), nil
|
||||||
break
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
logContent := strings.TrimSpace(output.String())
|
// If no result was found in XML tags, provide a default success result
|
||||||
|
if result == nil {
|
||||||
// Try to parse as JSON first, if that fails, wrap in a JSON object
|
defaultResult := map[string]interface{}{
|
||||||
if json.Valid([]byte(logContent)) && logContent != "" {
|
|
||||||
return json.RawMessage(logContent), nil
|
|
||||||
} else {
|
|
||||||
// Return the output wrapped in a JSON object
|
|
||||||
result := map[string]interface{}{
|
|
||||||
"result": "Function executed successfully",
|
"result": "Function executed successfully",
|
||||||
"output": logContent,
|
"message": "No result output found",
|
||||||
"timestamp": time.Now().UTC(),
|
"timestamp": time.Now().UTC(),
|
||||||
}
|
}
|
||||||
resultJSON, _ := json.Marshal(result)
|
resultJSON, _ := json.Marshal(defaultResult)
|
||||||
return json.RawMessage(resultJSON), nil
|
return json.RawMessage(resultJSON), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDockerLogs parses Docker log output which includes 8-byte headers
|
||||||
|
func parseDockerLogs(logData []byte) string {
|
||||||
|
var cleanOutput strings.Builder
|
||||||
|
|
||||||
|
for len(logData) > 8 {
|
||||||
|
// Docker log header: [STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4]
|
||||||
|
// Skip the first 8 bytes (header)
|
||||||
|
headerSize := 8
|
||||||
|
if len(logData) < headerSize {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract size from bytes 4-7 (big endian)
|
||||||
|
size := int(logData[4])<<24 + int(logData[5])<<16 + int(logData[6])<<8 + int(logData[7])
|
||||||
|
|
||||||
|
if len(logData) < headerSize+size {
|
||||||
|
// If the remaining data is less than expected size, take what we have
|
||||||
|
size = len(logData) - headerSize
|
||||||
|
}
|
||||||
|
|
||||||
|
if size > 0 {
|
||||||
|
// Extract the actual log content
|
||||||
|
content := string(logData[headerSize : headerSize+size])
|
||||||
|
cleanOutput.WriteString(content)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move to next log entry
|
||||||
|
logData = logData[headerSize+size:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return cleanOutput.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseContainerOutput parses container output that contains <stdout> and <result> XML tags
|
||||||
|
func (s *SimpleDockerRuntime) parseContainerOutput(rawOutput string) (logs []string, result json.RawMessage, err error) {
|
||||||
|
// Extract stdout content (logs) - use DOTALL flag for multiline matching
|
||||||
|
stdoutRegex := regexp.MustCompile(`(?s)<stdout>(.*?)</stdout>`)
|
||||||
|
stdoutMatch := stdoutRegex.FindStringSubmatch(rawOutput)
|
||||||
|
if len(stdoutMatch) > 1 {
|
||||||
|
stdoutContent := strings.TrimSpace(stdoutMatch[1])
|
||||||
|
if stdoutContent != "" {
|
||||||
|
// Split stdout content into lines for logs
|
||||||
|
lines := strings.Split(stdoutContent, "\n")
|
||||||
|
// Clean up empty lines and trim whitespace
|
||||||
|
cleanLogs := make([]string, 0, len(lines))
|
||||||
|
for _, line := range lines {
|
||||||
|
if trimmed := strings.TrimSpace(line); trimmed != "" {
|
||||||
|
cleanLogs = append(cleanLogs, trimmed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
logs = cleanLogs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract result content - use DOTALL flag for multiline matching
|
||||||
|
resultRegex := regexp.MustCompile(`(?s)<result>(.*?)</result>`)
|
||||||
|
resultMatch := resultRegex.FindStringSubmatch(rawOutput)
|
||||||
|
if len(resultMatch) > 1 {
|
||||||
|
resultContent := strings.TrimSpace(resultMatch[1])
|
||||||
|
if resultContent != "" {
|
||||||
|
// Validate JSON
|
||||||
|
if json.Valid([]byte(resultContent)) {
|
||||||
|
result = json.RawMessage(resultContent)
|
||||||
|
} else {
|
||||||
|
// If not valid JSON, wrap it
|
||||||
|
wrappedResult := map[string]interface{}{
|
||||||
|
"output": resultContent,
|
||||||
|
}
|
||||||
|
resultJSON, _ := json.Marshal(wrappedResult)
|
||||||
|
result = json.RawMessage(resultJSON)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no result tag found, treat entire output as result (fallback for non-tagged output)
|
||||||
|
if result == nil {
|
||||||
|
// Remove any XML tags from the output for fallback
|
||||||
|
cleanOutput := regexp.MustCompile(`(?s)<[^>]*>`).ReplaceAllString(rawOutput, "")
|
||||||
|
cleanOutput = strings.TrimSpace(cleanOutput)
|
||||||
|
|
||||||
|
if cleanOutput != "" {
|
||||||
|
if json.Valid([]byte(cleanOutput)) {
|
||||||
|
result = json.RawMessage(cleanOutput)
|
||||||
|
} else {
|
||||||
|
// Wrap non-JSON output
|
||||||
|
wrappedResult := map[string]interface{}{
|
||||||
|
"output": cleanOutput,
|
||||||
|
}
|
||||||
|
resultJSON, _ := json.Marshal(wrappedResult)
|
||||||
|
result = json.RawMessage(resultJSON)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return logs, result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SimpleDockerRuntime) cleanupContainer(ctx context.Context, containerID string) {
|
func (s *SimpleDockerRuntime) cleanupContainer(ctx context.Context, containerID string) {
|
||||||
|
|||||||
@ -142,6 +142,7 @@ func (s *executionService) executeSync(ctx context.Context, execution *domain.Fu
|
|||||||
execution.Error = result.Error
|
execution.Error = result.Error
|
||||||
execution.Duration = result.Duration
|
execution.Duration = result.Duration
|
||||||
execution.MemoryUsed = result.MemoryUsed
|
execution.MemoryUsed = result.MemoryUsed
|
||||||
|
execution.Logs = result.Logs
|
||||||
|
|
||||||
// Check if the result indicates a timeout
|
// Check if the result indicates a timeout
|
||||||
if result.Error != "" {
|
if result.Error != "" {
|
||||||
@ -219,6 +220,7 @@ func (s *executionService) executeAsync(ctx context.Context, execution *domain.F
|
|||||||
execution.Error = result.Error
|
execution.Error = result.Error
|
||||||
execution.Duration = result.Duration
|
execution.Duration = result.Duration
|
||||||
execution.MemoryUsed = result.MemoryUsed
|
execution.MemoryUsed = result.MemoryUsed
|
||||||
|
execution.Logs = result.Logs
|
||||||
|
|
||||||
// Check if the result indicates a timeout
|
// Check if the result indicates a timeout
|
||||||
if result.Error != "" {
|
if result.Error != "" {
|
||||||
@ -327,31 +329,18 @@ func (s *executionService) Cancel(ctx context.Context, id uuid.UUID, userID stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *executionService) GetLogs(ctx context.Context, id uuid.UUID) ([]string, error) {
|
func (s *executionService) GetLogs(ctx context.Context, id uuid.UUID) ([]string, error) {
|
||||||
// Get execution
|
// Get execution with logs from database
|
||||||
execution, err := s.executionRepo.GetByID(ctx, id)
|
execution, err := s.executionRepo.GetByID(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("execution not found: %w", err)
|
return nil, fmt.Errorf("execution not found: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get function to determine runtime
|
// Return logs from execution record
|
||||||
function, err := s.functionRepo.GetByID(ctx, execution.FunctionID)
|
if execution.Logs == nil {
|
||||||
if err != nil {
|
return []string{}, nil
|
||||||
return nil, fmt.Errorf("function not found: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get runtime backend
|
return execution.Logs, nil
|
||||||
backend, err := s.runtimeService.GetBackend(ctx, string(function.Runtime))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to get runtime backend: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get logs from runtime
|
|
||||||
logs, err := backend.GetLogs(ctx, id)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to get logs: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return logs, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *executionService) GetRunningExecutions(ctx context.Context) ([]*domain.FunctionExecution, error) {
|
func (s *executionService) GetRunningExecutions(ctx context.Context) ([]*domain.FunctionExecution, error) {
|
||||||
|
|||||||
2
faas/migrations/002_add_execution_logs.down.sql
Normal file
2
faas/migrations/002_add_execution_logs.down.sql
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
-- Remove logs column from executions table
|
||||||
|
ALTER TABLE executions DROP COLUMN IF EXISTS logs;
|
||||||
2
faas/migrations/002_add_execution_logs.up.sql
Normal file
2
faas/migrations/002_add_execution_logs.up.sql
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
-- Add logs column to executions table to store function execution logs
|
||||||
|
ALTER TABLE executions ADD COLUMN logs TEXT[];
|
||||||
@ -1,4 +1,4 @@
|
|||||||
import React, { useState } from 'react';
|
import React, { useState, useEffect, useRef } from 'react';
|
||||||
import {
|
import {
|
||||||
Modal,
|
Modal,
|
||||||
Button,
|
Button,
|
||||||
@ -39,6 +39,40 @@ export const ExecutionModal: React.FC<ExecutionModalProps> = ({
|
|||||||
const [execution, setExecution] = useState<FunctionExecution | null>(null);
|
const [execution, setExecution] = useState<FunctionExecution | null>(null);
|
||||||
const [logs, setLogs] = useState<string[]>([]);
|
const [logs, setLogs] = useState<string[]>([]);
|
||||||
const [loadingLogs, setLoadingLogs] = useState(false);
|
const [loadingLogs, setLoadingLogs] = useState(false);
|
||||||
|
const [autoRefreshLogs, setAutoRefreshLogs] = useState(false);
|
||||||
|
const pollIntervalRef = useRef<NodeJS.Timeout | null>(null);
|
||||||
|
const logsPollIntervalRef = useRef<NodeJS.Timeout | null>(null);
|
||||||
|
|
||||||
|
const stopLogsAutoRefresh = () => {
|
||||||
|
if (logsPollIntervalRef.current) {
|
||||||
|
clearInterval(logsPollIntervalRef.current);
|
||||||
|
logsPollIntervalRef.current = null;
|
||||||
|
}
|
||||||
|
setAutoRefreshLogs(false);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Cleanup intervals on unmount or when modal closes
|
||||||
|
useEffect(() => {
|
||||||
|
if (!opened) {
|
||||||
|
// Stop auto-refresh when modal closes
|
||||||
|
stopLogsAutoRefresh();
|
||||||
|
if (pollIntervalRef.current) {
|
||||||
|
clearTimeout(pollIntervalRef.current);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, [opened]);
|
||||||
|
|
||||||
|
// Cleanup intervals on unmount
|
||||||
|
useEffect(() => {
|
||||||
|
return () => {
|
||||||
|
if (pollIntervalRef.current) {
|
||||||
|
clearTimeout(pollIntervalRef.current);
|
||||||
|
}
|
||||||
|
if (logsPollIntervalRef.current) {
|
||||||
|
clearInterval(logsPollIntervalRef.current);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}, []);
|
||||||
|
|
||||||
if (!func) return null;
|
if (!func) return null;
|
||||||
|
|
||||||
@ -69,8 +103,13 @@ export const ExecutionModal: React.FC<ExecutionModalProps> = ({
|
|||||||
setResult(response.data);
|
setResult(response.data);
|
||||||
|
|
||||||
if (async) {
|
if (async) {
|
||||||
// Poll for execution status
|
// Poll for execution status and start auto-refreshing logs
|
||||||
pollExecution(response.data.execution_id);
|
pollExecution(response.data.execution_id);
|
||||||
|
} else {
|
||||||
|
// For synchronous executions, load logs immediately
|
||||||
|
if (response.data.execution_id) {
|
||||||
|
loadLogs(response.data.execution_id);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
notifications.show({
|
notifications.show({
|
||||||
@ -91,19 +130,24 @@ export const ExecutionModal: React.FC<ExecutionModalProps> = ({
|
|||||||
};
|
};
|
||||||
|
|
||||||
const pollExecution = async (executionId: string) => {
|
const pollExecution = async (executionId: string) => {
|
||||||
|
// Start auto-refreshing logs immediately for async executions
|
||||||
|
startLogsAutoRefresh(executionId);
|
||||||
|
|
||||||
const poll = async () => {
|
const poll = async () => {
|
||||||
try {
|
try {
|
||||||
const response = await executionApi.getById(executionId);
|
const response = await executionApi.getById(executionId);
|
||||||
setExecution(response.data);
|
setExecution(response.data);
|
||||||
|
|
||||||
if (response.data.status === 'running' || response.data.status === 'pending') {
|
if (response.data.status === 'running' || response.data.status === 'pending') {
|
||||||
setTimeout(poll, 2000); // Poll every 2 seconds
|
pollIntervalRef.current = setTimeout(poll, 2000); // Poll every 2 seconds
|
||||||
} else {
|
} else {
|
||||||
// Execution completed, get logs
|
// Execution completed, stop auto-refresh and load final logs
|
||||||
|
stopLogsAutoRefresh();
|
||||||
loadLogs(executionId);
|
loadLogs(executionId);
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Error polling execution:', error);
|
console.error('Error polling execution:', error);
|
||||||
|
stopLogsAutoRefresh();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -122,6 +166,28 @@ export const ExecutionModal: React.FC<ExecutionModalProps> = ({
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const startLogsAutoRefresh = (executionId: string) => {
|
||||||
|
// Clear any existing interval
|
||||||
|
if (logsPollIntervalRef.current) {
|
||||||
|
clearInterval(logsPollIntervalRef.current);
|
||||||
|
}
|
||||||
|
|
||||||
|
setAutoRefreshLogs(true);
|
||||||
|
|
||||||
|
// Load logs immediately
|
||||||
|
loadLogs(executionId);
|
||||||
|
|
||||||
|
// Set up auto-refresh every 2 seconds
|
||||||
|
logsPollIntervalRef.current = setInterval(async () => {
|
||||||
|
try {
|
||||||
|
const response = await executionApi.getLogs(executionId);
|
||||||
|
setLogs(response.data.logs || []);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error auto-refreshing logs:', error);
|
||||||
|
}
|
||||||
|
}, 2000);
|
||||||
|
};
|
||||||
|
|
||||||
const handleCancel = async () => {
|
const handleCancel = async () => {
|
||||||
if (result && async) {
|
if (result && async) {
|
||||||
try {
|
try {
|
||||||
@ -285,35 +351,60 @@ export const ExecutionModal: React.FC<ExecutionModalProps> = ({
|
|||||||
)}
|
)}
|
||||||
|
|
||||||
{/* Logs */}
|
{/* Logs */}
|
||||||
{async && (
|
<div style={{ marginTop: '1rem' }}>
|
||||||
<div style={{ marginTop: '1rem' }}>
|
<Group justify="space-between" mb="xs">
|
||||||
<Group justify="space-between" mb="xs">
|
<Group gap="xs">
|
||||||
<Text size="sm" fw={500}>Logs:</Text>
|
<Text size="sm" fw={500}>Logs:</Text>
|
||||||
|
{autoRefreshLogs && (
|
||||||
|
<Badge size="xs" color="blue" variant="light">
|
||||||
|
Auto-refreshing
|
||||||
|
</Badge>
|
||||||
|
)}
|
||||||
|
</Group>
|
||||||
|
<Group gap="xs">
|
||||||
|
{result.execution_id && (
|
||||||
|
<Button
|
||||||
|
size="xs"
|
||||||
|
variant={autoRefreshLogs ? "filled" : "light"}
|
||||||
|
color={autoRefreshLogs ? "red" : "blue"}
|
||||||
|
leftSection={<IconRefresh size={12} />}
|
||||||
|
onClick={() => {
|
||||||
|
if (autoRefreshLogs) {
|
||||||
|
stopLogsAutoRefresh();
|
||||||
|
} else {
|
||||||
|
startLogsAutoRefresh(result.execution_id);
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{autoRefreshLogs ? 'Stop Auto-refresh' : 'Auto-refresh'}
|
||||||
|
</Button>
|
||||||
|
)}
|
||||||
<Button
|
<Button
|
||||||
size="xs"
|
size="xs"
|
||||||
variant="light"
|
variant="light"
|
||||||
leftSection={<IconRefresh size={12} />}
|
leftSection={<IconRefresh size={12} />}
|
||||||
onClick={() => result.execution_id && loadLogs(result.execution_id)}
|
onClick={() => result.execution_id && loadLogs(result.execution_id)}
|
||||||
loading={loadingLogs}
|
loading={loadingLogs}
|
||||||
|
disabled={autoRefreshLogs}
|
||||||
>
|
>
|
||||||
Refresh
|
Manual Refresh
|
||||||
</Button>
|
</Button>
|
||||||
</Group>
|
</Group>
|
||||||
<Paper bg="gray.9" p="sm" mah={200} style={{ overflow: 'auto' }}>
|
</Group>
|
||||||
{loadingLogs ? (
|
<Paper bg="gray.9" p="sm" mah={200} style={{ overflow: 'auto' }}>
|
||||||
<Group justify="center">
|
{loadingLogs ? (
|
||||||
<Loader size="sm" />
|
<Group justify="center">
|
||||||
</Group>
|
<Loader size="sm" />
|
||||||
) : logs.length > 0 ? (
|
</Group>
|
||||||
<Text size="xs" c="white" component="pre">
|
) : (logs.length > 0 || (execution?.logs && execution.logs.length > 0)) ? (
|
||||||
{logs.join('\n')}
|
<Text size="xs" c="white" component="pre">
|
||||||
</Text>
|
{(execution?.logs || logs).join('\n')}
|
||||||
) : (
|
</Text>
|
||||||
<Text size="xs" c="gray.5">No logs available</Text>
|
) : (
|
||||||
)}
|
<Text size="xs" c="gray.5">No logs available</Text>
|
||||||
</Paper>
|
)}
|
||||||
</div>
|
</Paper>
|
||||||
)}
|
</div>
|
||||||
</Paper>
|
</Paper>
|
||||||
</>
|
</>
|
||||||
)}
|
)}
|
||||||
|
|||||||
@ -35,6 +35,7 @@ export interface FunctionExecution {
|
|||||||
error?: string;
|
error?: string;
|
||||||
duration?: number;
|
duration?: number;
|
||||||
memory_used?: number;
|
memory_used?: number;
|
||||||
|
logs?: string[];
|
||||||
container_id?: string;
|
container_id?: string;
|
||||||
executor_id: string;
|
executor_id: string;
|
||||||
created_at: string;
|
created_at: string;
|
||||||
|
|||||||
Reference in New Issue
Block a user