fix: ensure unifying on hardcoded /data mount path

This commit is contained in:
Ran
2026-02-24 03:25:35 +08:00
committed by 晨苒
parent 6cb80d30be
commit 5e12b5a53f
23 changed files with 96 additions and 96 deletions
+2 -9
View File
@@ -457,11 +457,7 @@ func provideToolGatewayService(log *slog.Logger, cfg config.Config, channelManag
memoryExec := mcpmemory.NewExecutor(log, memoryService, chatService, accountService)
webExec := mcpweb.NewExecutor(log, settingsService, searchProviderService)
inboxExec := mcpinbox.NewExecutor(log, inboxService)
execWorkDir := cfg.MCP.DataMount
if strings.TrimSpace(execWorkDir) == "" {
execWorkDir = config.DefaultDataMount
}
fsExec := mcpcontainer.NewExecutor(log, manager, execWorkDir)
fsExec := mcpcontainer.NewExecutor(log, manager, config.DefaultDataMount)
fedGateway := handlers.NewMCPFederationGateway(log, containerdHandler)
fedSource := mcpfederation.NewSource(log, fedGateway, mcpConnService)
@@ -482,10 +478,7 @@ func provideToolGatewayService(log *slog.Logger, cfg config.Config, channelManag
func provideMemoryHandler(log *slog.Logger, service *memory.Service, chatService *conversation.Service, accountService *accounts.Service, cfg config.Config, manager *mcp.Manager) *handlers.MemoryHandler {
h := handlers.NewMemoryHandler(log, service, chatService, accountService)
if manager != nil {
execWorkDir := cfg.MCP.DataMount
if strings.TrimSpace(execWorkDir) == "" {
execWorkDir = config.DefaultDataMount
}
execWorkDir := config.DefaultDataMount
h.SetMemoryFS(memory.NewMemoryFS(log, manager, execWorkDir))
}
return h
-1
View File
@@ -29,7 +29,6 @@ jwt_expires_in = "168h"
[mcp]
image = "memohai/mcp:latest"
data_root = "data"
data_mount = "/data"
[postgres]
host = "127.0.0.1"
-1
View File
@@ -27,7 +27,6 @@ jwt_expires_in = "168h"
# image = "docker.io/library/memoh-mcp:dev"
# snapshotter = "overlayfs"
# data_root = "data"
# data_mount = "/data"
[postgres]
host = "127.0.0.1"
-1
View File
@@ -26,7 +26,6 @@ namespace = "default"
image = "docker.io/library/memoh-mcp:latest"
snapshotter = "overlayfs"
data_root = "/opt/memoh/data"
data_mount = "/data"
## Postgres configuration
[postgres]
-1
View File
@@ -26,7 +26,6 @@ namespace = "default"
image = "docker.io/library/memoh-mcp:dev"
snapshotter = "overlayfs"
data_root = "data"
data_mount = "/data"
cni_bin_dir = "/opt/cni/bin"
cni_conf_dir = "/etc/cni/net.d"
-2
View File
@@ -29,7 +29,6 @@ namespace = "default"
image = "docker.io/library/memoh-mcp:latest"
snapshotter = "overlayfs"
data_root = "data"
data_mount = "/data"
[postgres]
host = "127.0.0.1"
@@ -104,7 +103,6 @@ MCP (Model Context Protocol) container configuration. Each bot runs in a contain
| `image` | string | `"docker.io/library/memoh-mcp:latest"` | MCP container image |
| `snapshotter` | string | `"overlayfs"` | Containerd snapshotter |
| `data_root` | string | `"data"` | Host path for bot data (Docker: `/opt/memoh/data`) |
| `data_mount` | string | `"/data"` | Path inside container where data is mounted |
### `[postgres]`
+15 -11
View File
@@ -44,7 +44,7 @@ func (d *fakeDBTX) QueryRow(ctx context.Context, sql string, args ...any) pgx.Ro
func makeBotRow(botID, ownerUserID pgtype.UUID, botType string, allowGuest bool) *fakeRow {
return &fakeRow{
scanFunc: func(dest ...any) error {
if len(dest) < 17 {
if len(dest) < 21 {
return pgx.ErrNoRows
}
*dest[0].(*pgtype.UUID) = botID
@@ -54,16 +54,20 @@ func makeBotRow(botID, ownerUserID pgtype.UUID, botType string, allowGuest bool)
*dest[4].(*pgtype.Text) = pgtype.Text{}
*dest[5].(*bool) = true
*dest[6].(*string) = BotStatusReady
*dest[7].(*int32) = 30
*dest[8].(*string) = "en"
*dest[9].(*bool) = allowGuest
*dest[10].(*pgtype.UUID) = pgtype.UUID{}
*dest[11].(*pgtype.UUID) = pgtype.UUID{}
*dest[12].(*pgtype.UUID) = pgtype.UUID{}
*dest[13].(*pgtype.UUID) = pgtype.UUID{}
*dest[14].(*[]byte) = []byte(`{}`)
*dest[15].(*pgtype.Timestamptz) = pgtype.Timestamptz{}
*dest[16].(*pgtype.Timestamptz) = pgtype.Timestamptz{}
*dest[7].(*int32) = 30 // MaxContextLoadTime
*dest[8].(*int32) = 4096 // MaxContextTokens
*dest[9].(*int32) = 10 // MaxInboxItems
*dest[10].(*string) = "en"
*dest[11].(*bool) = allowGuest
*dest[12].(*bool) = false // ReasoningEnabled
*dest[13].(*string) = "medium" // ReasoningEffort
*dest[14].(*pgtype.UUID) = pgtype.UUID{}
*dest[15].(*pgtype.UUID) = pgtype.UUID{}
*dest[16].(*pgtype.UUID) = pgtype.UUID{}
*dest[17].(*pgtype.UUID) = pgtype.UUID{}
*dest[18].(*[]byte) = []byte(`{}`)
*dest[19].(*pgtype.Timestamptz) = pgtype.Timestamptz{}
*dest[20].(*pgtype.Timestamptz) = pgtype.Timestamptz{}
return nil
},
}
+18 -7
View File
@@ -7,6 +7,7 @@ import (
"io"
"log/slog"
"net/http"
"path/filepath"
"regexp"
"strings"
"sync"
@@ -224,6 +225,9 @@ func (p *ChannelInboundProcessor) HandleInbound(ctx context.Context, cfg channel
slog.Int("attachments", len(attachments)),
)
}
if !strings.EqualFold(identity.BotType, "personal") {
p.persistInboundUser(ctx, resolved.RouteID, identity, msg, text, attachments, "passive_sync")
}
p.createInboxItem(ctx, identity, msg, text, attachments, resolved.RouteID)
return nil
}
@@ -1658,10 +1662,10 @@ func isHTTPURL(raw string) bool {
// resolveContainerPathAsset attempts to match a container-internal file path
// to an existing media asset by extracting the storage key from the path.
// For non-/data/media/ paths, it ingests the file into the media store first.
// For non-media-marker paths, it ingests the file into the media store first.
// Returns true if the asset was resolved and item was updated.
func (p *ChannelInboundProcessor) resolveContainerPathAsset(ctx context.Context, botID, accessPath string, item *channel.Attachment) bool {
// Try /data/media/ lookup first.
// Try media marker lookup first.
storageKey := extractStorageKey(accessPath, botID)
if storageKey != "" {
asset, err := p.mediaService.GetByStorageKey(ctx, botID, storageKey)
@@ -1671,8 +1675,12 @@ func (p *ChannelInboundProcessor) resolveContainerPathAsset(ctx context.Context,
}
}
// For any /data/ path, ingest the file into media store.
if strings.HasPrefix(accessPath, "/data/") {
// For any path starting with data mount, ingest the file into media store.
dataPrefix := "/data"
if !strings.HasSuffix(dataPrefix, "/") {
dataPrefix += "/"
}
if strings.HasPrefix(accessPath, dataPrefix) {
asset, err := p.mediaService.IngestContainerFile(ctx, botID, accessPath)
if err != nil {
if p.logger != nil {
@@ -1705,8 +1713,11 @@ func applyAssetToAttachment(asset media.Asset, botID string, item *channel.Attac
// extractStorageKey derives the media storage key from a container-internal
// access path. The expected path format is /data/media/<storage_key>.
func extractStorageKey(accessPath, _ string) string {
const marker = "/data/media/"
func extractStorageKey(accessPath string, botID string) string {
marker := filepath.Join("/data", "media")
if !strings.HasSuffix(marker, "/") {
marker += "/"
}
idx := strings.Index(accessPath, marker)
if idx < 0 {
return ""
@@ -1826,7 +1837,7 @@ func mapChannelToChatAttachments(attachments []channel.Attachment) []conversatio
ca := conversation.ChatAttachment{
Type: string(att.Type),
PlatformKey: att.PlatformKey,
ContentHash: att.ContentHash,
ContentHash: att.ContentHash,
Name: att.Name,
Mime: attachment.NormalizeMime(att.Mime),
Size: att.Size,
+2
View File
@@ -29,6 +29,7 @@ type InboundIdentity struct {
UserID string
DisplayName string
AvatarURL string
BotType string
ForceReply bool
}
@@ -212,6 +213,7 @@ func (r *IdentityResolver) Resolve(ctx context.Context, cfg channel.ChannelConfi
if err != nil {
return state, err
}
state.Identity.BotType = botType
if strings.EqualFold(strings.TrimSpace(botType), "personal") {
ownerUserID, err := r.policy.BotOwnerUserID(ctx, botID)
if err != nil {
-2
View File
@@ -74,7 +74,6 @@ type MCPConfig struct {
Image string `toml:"image"`
Snapshotter string `toml:"snapshotter"`
DataRoot string `toml:"data_root"`
DataMount string `toml:"data_mount"`
CNIBinaryDir string `toml:"cni_bin_dir"`
CNIConfigDir string `toml:"cni_conf_dir"`
}
@@ -136,7 +135,6 @@ func Load(path string) (Config, error) {
MCP: MCPConfig{
Image: DefaultMCPImage,
DataRoot: DefaultDataRoot,
DataMount: DefaultDataMount,
CNIBinaryDir: DefaultCNIBinaryDir,
CNIConfigDir: DefaultCNIConfigDir,
},
+2 -8
View File
@@ -181,10 +181,7 @@ func (h *ContainerdHandler) CreateContainer(c echo.Context) error {
if err != nil {
h.logger.Warn("filepath.Abs failed", slog.Any("error", err))
}
dataMount := strings.TrimSpace(h.cfg.DataMount)
if dataMount == "" {
dataMount = config.DefaultDataMount
}
dataMount := config.DefaultDataMount
dataDir := filepath.Join(dataRoot, "bots", botID)
if err := os.MkdirAll(dataDir, 0o755); err != nil {
return echo.NewHTTPError(http.StatusInternalServerError, err.Error())
@@ -816,10 +813,7 @@ func (h *ContainerdHandler) SetupBotContainer(ctx context.Context, botID string)
} else {
dataRoot = absRoot
}
dataMount := strings.TrimSpace(h.cfg.DataMount)
if dataMount == "" {
dataMount = config.DefaultDataMount
}
dataMount := config.DefaultDataMount
dataDir := filepath.Join(dataRoot, "bots", botID)
if err := os.MkdirAll(dataDir, 0o755); err != nil {
return err
+2 -8
View File
@@ -103,10 +103,7 @@ func (h *ContainerdHandler) resolveContainerPath(botID, rawPath string) (fsPathC
containerPath = "/"
}
dataMount := strings.TrimSpace(h.cfg.DataMount)
if dataMount == "" {
dataMount = config.DefaultDataMount
}
dataMount := config.DefaultDataMount
dataMount = filepath.Clean(dataMount)
// Check whether the requested path falls under the data mount.
@@ -609,10 +606,7 @@ func (h *ContainerdHandler) FSDelete(c echo.Context) error {
}
// Prevent deleting the data mount root itself.
dataMount := strings.TrimSpace(h.cfg.DataMount)
if dataMount == "" {
dataMount = config.DefaultDataMount
}
dataMount := config.DefaultDataMount
if filepath.Clean(pc.containerPath) == filepath.Clean(dataMount) {
return echo.NewHTTPError(http.StatusForbidden, "cannot delete the data root directory")
}
+1 -4
View File
@@ -334,10 +334,7 @@ func (m *Manager) dataRoot() string {
}
func (m *Manager) dataMount() string {
if m.cfg.DataMount == "" {
return config.DefaultDataMount
}
return m.cfg.DataMount
return config.DefaultDataMount
}
func (m *Manager) imageRef() string {
+20 -12
View File
@@ -2,6 +2,7 @@ package container
import (
"context"
"fmt"
"log/slog"
"strings"
@@ -54,6 +55,10 @@ func NewExecutor(log *slog.Logger, execRunner ExecRunner, execWorkDir string) *E
// ListTools returns read, write, list, edit, and exec tool descriptors.
func (p *Executor) ListTools(ctx context.Context, session mcpgw.ToolSessionContext) ([]mcpgw.ToolDescriptor, error) {
wd := p.execWorkDir
if wd == "" {
wd = defaultExecWorkDir
}
return []mcpgw.ToolDescriptor{
{
Name: toolRead,
@@ -61,7 +66,7 @@ func (p *Executor) ListTools(ctx context.Context, session mcpgw.ToolSessionConte
InputSchema: map[string]any{
"type": "object",
"properties": map[string]any{
"path": map[string]any{"type": "string", "description": "file path (relative to /data or absolute inside container)"},
"path": map[string]any{"type": "string", "description": fmt.Sprintf("file path (relative to %s or absolute inside container)", wd)},
},
"required": []string{"path"},
},
@@ -72,7 +77,7 @@ func (p *Executor) ListTools(ctx context.Context, session mcpgw.ToolSessionConte
InputSchema: map[string]any{
"type": "object",
"properties": map[string]any{
"path": map[string]any{"type": "string", "description": "file path (relative to /data or absolute inside container)"},
"path": map[string]any{"type": "string", "description": fmt.Sprintf("file path (relative to %s or absolute inside container)", wd)},
"content": map[string]any{"type": "string", "description": "file content"},
},
"required": []string{"path", "content"},
@@ -84,7 +89,7 @@ func (p *Executor) ListTools(ctx context.Context, session mcpgw.ToolSessionConte
InputSchema: map[string]any{
"type": "object",
"properties": map[string]any{
"path": map[string]any{"type": "string", "description": "directory path (relative to /data or absolute inside container)"},
"path": map[string]any{"type": "string", "description": fmt.Sprintf("directory path (relative to %s or absolute inside container)", wd)},
"recursive": map[string]any{"type": "boolean", "description": "list recursively"},
},
"required": []string{"path"},
@@ -96,7 +101,7 @@ func (p *Executor) ListTools(ctx context.Context, session mcpgw.ToolSessionConte
InputSchema: map[string]any{
"type": "object",
"properties": map[string]any{
"path": map[string]any{"type": "string", "description": "file path (relative to /data or absolute inside container)"},
"path": map[string]any{"type": "string", "description": fmt.Sprintf("file path (relative to %s or absolute inside container)", wd)},
"old_text": map[string]any{"type": "string", "description": "exact text to find"},
"new_text": map[string]any{"type": "string", "description": "replacement text"},
},
@@ -105,7 +110,7 @@ func (p *Executor) ListTools(ctx context.Context, session mcpgw.ToolSessionConte
},
{
Name: toolExec,
Description: "Execute a command in the bot container. Runs in the bot's data directory (/data) by default.",
Description: fmt.Sprintf("Execute a command in the bot container. Runs in the bot's data directory (%s) by default.", wd),
InputSchema: map[string]any{
"type": "object",
"properties": map[string]any{
@@ -115,7 +120,7 @@ func (p *Executor) ListTools(ctx context.Context, session mcpgw.ToolSessionConte
},
"work_dir": map[string]any{
"type": "string",
"description": "Working directory inside the container (default: /data)",
"description": fmt.Sprintf("Working directory inside the container (default: %s)", wd),
},
},
"required": []string{"command"},
@@ -126,12 +131,15 @@ func (p *Executor) ListTools(ctx context.Context, session mcpgw.ToolSessionConte
// normalizePath converts paths that the LLM may send as /data/... into relative
// paths under the working directory. e.g. /data/test.txt -> test.txt, /data -> .
func normalizePath(path string) string {
func (p *Executor) normalizePath(path string) string {
path = strings.TrimSpace(path)
if path == "" {
return path
}
const prefix = "/data"
prefix := p.execWorkDir
if prefix == "" {
prefix = defaultExecWorkDir
}
if path == prefix {
return "."
}
@@ -150,7 +158,7 @@ func (p *Executor) CallTool(ctx context.Context, session mcpgw.ToolSessionContex
switch toolName {
case toolRead:
filePath := normalizePath(mcpgw.StringArg(arguments, "path"))
filePath := p.normalizePath(mcpgw.StringArg(arguments, "path"))
if filePath == "" {
return mcpgw.BuildToolErrorResult("path is required"), nil
}
@@ -163,7 +171,7 @@ func (p *Executor) CallTool(ctx context.Context, session mcpgw.ToolSessionContex
}), nil
case toolWrite:
filePath := normalizePath(mcpgw.StringArg(arguments, "path"))
filePath := p.normalizePath(mcpgw.StringArg(arguments, "path"))
content := mcpgw.StringArg(arguments, "content")
if filePath == "" {
return mcpgw.BuildToolErrorResult("path is required"), nil
@@ -174,7 +182,7 @@ func (p *Executor) CallTool(ctx context.Context, session mcpgw.ToolSessionContex
return mcpgw.BuildToolSuccessResult(map[string]any{"ok": true}), nil
case toolList:
dirPath := normalizePath(mcpgw.StringArg(arguments, "path"))
dirPath := p.normalizePath(mcpgw.StringArg(arguments, "path"))
if dirPath == "" {
dirPath = "."
}
@@ -196,7 +204,7 @@ func (p *Executor) CallTool(ctx context.Context, session mcpgw.ToolSessionContex
return mcpgw.BuildToolSuccessResult(map[string]any{"path": dirPath, "entries": entriesMaps}), nil
case toolEdit:
filePath := normalizePath(mcpgw.StringArg(arguments, "path"))
filePath := p.normalizePath(mcpgw.StringArg(arguments, "path"))
oldText := mcpgw.StringArg(arguments, "old_text")
newText := mcpgw.StringArg(arguments, "new_text")
if filePath == "" || oldText == "" {
@@ -227,8 +227,9 @@ func TestNormalizePath(t *testing.T) {
{"", ""},
{".", "."},
}
exec := &Executor{execWorkDir: "/data"}
for _, tt := range tests {
got := normalizePath(tt.in)
got := exec.normalizePath(tt.in)
if got != tt.want {
t.Errorf("normalizePath(%q) = %q, want %q", tt.in, got, tt.want)
}
+9 -3
View File
@@ -392,7 +392,10 @@ func (p *Executor) resolveAttachmentRef(ctx context.Context, botID, ref, attType
}
// Container media path — resolve via asset storage.
const mediaMarker = "/data/media/"
mediaMarker := filepath.Join("/data", "media")
if !strings.HasSuffix(mediaMarker, "/") {
mediaMarker += "/"
}
if idx := strings.Index(ref, mediaMarker); idx >= 0 && p.assetResolver != nil {
storageKey := ref[idx+len(mediaMarker):]
asset, err := p.assetResolver.GetByStorageKey(ctx, botID, storageKey)
@@ -404,8 +407,11 @@ func (p *Executor) resolveAttachmentRef(ctx context.Context, botID, ref, attType
}
}
// Other container /data/ path — ingest into media store first.
const dataPrefix = "/data/"
// Other container data mount path — ingest into media store first.
dataPrefix := "/data"
if !strings.HasSuffix(dataPrefix, "/") {
dataPrefix += "/"
}
if strings.HasPrefix(ref, dataPrefix) && p.assetResolver != nil {
asset, err := p.assetResolver.IngestContainerFile(ctx, botID, ref)
if err == nil {
@@ -85,7 +85,7 @@ func TestSourceListToolsIncludesSSETools(t *testing.T) {
if len(tools) != 1 {
t.Fatalf("expected 1 tool, got %d", len(tools))
}
if tools[0].Name != "remote_sse.search" {
if tools[0].Name != "remote_sse_search" {
t.Fatalf("unexpected tool alias: %s", tools[0].Name)
}
}
@@ -113,7 +113,7 @@ func TestSourceCallToolRoutesToSSEConnection(t *testing.T) {
}
source := NewSource(slog.Default(), gateway, lister)
result, err := source.CallTool(context.Background(), mcpgw.ToolSessionContext{BotID: "bot-1"}, "remote_sse.search", map[string]any{"query": "hello"})
result, err := source.CallTool(context.Background(), mcpgw.ToolSessionContext{BotID: "bot-1"}, "remote_sse_search", map[string]any{"query": "hello"})
if err != nil {
t.Fatalf("call tool failed: %v", err)
}
+2 -9
View File
@@ -294,10 +294,7 @@ func (m *Manager) buildVersionSpec(botID string) (ctr.ContainerSpec, error) {
if err != nil {
return ctr.ContainerSpec{}, err
}
dataMount := m.cfg.DataMount
if dataMount == "" {
dataMount = config.DefaultDataMount
}
dataMount := config.DefaultDataMount
resolvPath, err := ctr.ResolveConfSource(dataDir)
if err != nil {
return ctr.ContainerSpec{}, err
@@ -351,11 +348,7 @@ func (m *Manager) ensureDBRecords(ctx context.Context, botID, containerID, runti
return pgtype.UUID{}, err
}
containerPath := m.cfg.DataMount
if containerPath == "" {
containerPath = config.DefaultDataMount
}
containerPath := config.DefaultDataMount
if err := m.queries.UpsertContainer(ctx, dbsqlc.UpsertContainerParams{
BotID: botUUID,
ContainerID: containerID,
+2 -1
View File
@@ -9,6 +9,7 @@ import (
"sync"
"time"
"github.com/memohai/memoh/internal/config"
mcpgw "github.com/memohai/memoh/internal/mcp"
"github.com/memohai/memoh/internal/mcp/providers/container"
)
@@ -48,7 +49,7 @@ func NewMemoryFS(log *slog.Logger, runner container.ExecRunner, workDir string)
log = slog.Default()
}
if strings.TrimSpace(workDir) == "" {
workDir = "/data"
workDir = config.DefaultDataMount
}
return &MemoryFS{
execRunner: runner,
@@ -17,7 +17,7 @@ const containerMediaRoot = "/data/media"
// Provider stores media assets via the host-side bind mount path
// that maps to /data inside bot containers.
type Provider struct {
dataRoot string
dataRoot string
}
// New creates a container-based storage provider.
@@ -79,7 +79,7 @@ func (p *Provider) Delete(_ context.Context, key string) error {
// Routing key format: "<bot_id>/<storage_key>" → "/data/media/<storage_key>".
func (p *Provider) AccessPath(key string) string {
_, sub := splitRoutingKey(key)
return containerMediaRoot + "/" + sub
return filepath.Join("/data", "media", sub)
}
// hostPath converts a routing key into the host-side file path.
@@ -104,11 +104,14 @@ func (p *Provider) hostPath(key string) (string, error) {
}
// OpenContainerFile opens a file from a bot's /data/ directory on the host.
// containerPath must start with "/data/".
// containerPath must start with the data mount path.
func (p *Provider) OpenContainerFile(botID, containerPath string) (io.ReadCloser, error) {
const dataPrefix = "/data/"
dataPrefix := "/data"
if !strings.HasSuffix(dataPrefix, "/") {
dataPrefix += "/"
}
if !strings.HasPrefix(containerPath, dataPrefix) {
return nil, fmt.Errorf("path must start with /data/")
return nil, fmt.Errorf("path must start with %s", dataPrefix)
}
subPath := containerPath[len(dataPrefix):]
if subPath == "" || strings.Contains(subPath, "..") {
+4 -3
View File
@@ -113,10 +113,11 @@ export const createAgent = (
}
const loadSystemFiles = async () => {
const home = '/data'
const [identityContent, soulContent, toolsContent] = await Promise.all([
fs.readText('/data/IDENTITY.md'),
fs.readText('/data/SOUL.md'),
fs.readText('/data/TOOLS.md'),
fs.readText(`${home}/IDENTITY.md`),
fs.readText(`${home}/SOUL.md`),
fs.readText(`${home}/TOOLS.md`),
]).catch((error) => {
console.error(error)
return ['', '', '']
+5 -4
View File
@@ -55,6 +55,7 @@ export const system = ({
toolsContent,
inbox = [],
}: SystemParams) => {
const home = '/data'
// ── Static section (stable prefix for LLM prompt caching) ──────────
const staticHeaders = {
'language': language,
@@ -76,7 +77,7 @@ You are just woke up.
**Your text output IS your reply.** Whatever you write goes directly back to the person who messaged you. You do not need any tool to reply just write.
${quote('/data')} is your HOME you can read and write files there freely.
${quote(home)} is your HOME you can read and write files there freely.
## Basic Tools
- ${quote('read')}: read file content
@@ -133,14 +134,14 @@ Guidelines:
**Receiving**: Uploaded files are saved to your workspace; the file path appears in the message header.
**Sending via ${quote('send')} tool**: Pass file paths or URLs in the ${quote('attachments')} parameter. Example: ${quote('attachments: ["/data/media/ab/file.jpg", "https://example.com/img.png"]')}
**Sending via ${quote('send')} tool**: Pass file paths or URLs in the ${quote('attachments')} parameter. Example: ${quote('attachments: ["' + home + '/media/ab/file.jpg", "https://example.com/img.png"]')}
**Sending in direct responses**: Use this format:
${block([
'<attachments>',
'- /path/to/file.pdf',
'- /path/to/video.mp4',
`- ${home}/path/to/file.pdf`,
`- ${home}/path/to/video.mp4`,
'- https://example.com/image.png',
'</attachments>',
].join('\n'))}
-1
View File
@@ -40,7 +40,6 @@ export interface McpConfig {
image: string;
snapshotter: string;
data_root: string;
data_mount: string;
}
export interface PostgresConfig {