Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
139 changes: 62 additions & 77 deletions src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,12 @@

import dotenv from "dotenv";
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import {
CallToolRequestSchema,
ListResourcesRequestSchema,
ListToolsRequestSchema,
ReadResourceRequestSchema,
ListPromptsRequestSchema,
GetPromptRequestSchema,
} from "@modelcontextprotocol/sdk/types.js";
import { JSONRPCMessage } from "@modelcontextprotocol/sdk/types.js";
import { RequestOptions } from "@modelcontextprotocol/sdk/shared/protocol.js";

// Deps to run as a client
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js";


Expand All @@ -26,7 +20,7 @@ const NAME = process.env.MCP_NAME || 'mcp-stdio-to-streamable-http-adapter';
if (!URI) {
throw new Error("URI is required")
}
// console.log("starting...")
// log("starting...")
//
let transportOptions = {};
if (BEARER_TOKEN !== undefined) {
Expand All @@ -39,24 +33,7 @@ if (BEARER_TOKEN !== undefined) {
}
}

const transport = new StreamableHTTPClientTransport(
new URL(`${URI}`),
transportOptions
);
// console.log("making client...")

const client = new Client(
{
name: NAME,
version: "0.1.0"
}
);

// console.log("connecting to transport...")
await client.connect(transport);


const server = new Server(
const mcp_sever = new McpServer(
{
name: NAME,
version: "0.1.0",
Expand All @@ -65,71 +42,79 @@ const server = new Server(
// TODO - check capabilities
capabilities: {
resources: {},
tools: {},
tools: {
listChanged: true
},
prompts: {},
logging: {},
},
}
);

/**
* Handler for listing resources.
*/
server.setRequestHandler(ListResourcesRequestSchema, async () => {
return await client.listResources();
});
const server: Server = mcp_sever.server

/**
* Handler for reading the contents of a specific resource.
*/
server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
return await client.readResource({
uri: request.params.uri,
})

});
function log(message: string | object){
server.sendLoggingMessage({ level: "info", content: JSON.stringify(message)});
}

/**
* Handler that lists available tools.
* Exposes a single "chat" tool that lets clients chat with another AI.
*/
server.setRequestHandler(ListToolsRequestSchema, async () => {
return await client.listTools();
});
class CustomClientStreamableHTTPClientTransport extends StreamableHTTPClientTransport {
// @ts-ignore
onmessage = (message: JSONRPCMessage, extra: RequestOptions) => {
// log("[client] onmessage fires")
// log(message)
server.transport?.send(message, extra)
}
onerror = (error: Error) => {
// log("[client] onerror fires")
// log(error)
const error_func = server.transport?.onerror
if (error_func !== undefined){
error_func(error)
}
}
onclose = () => {
// log("[client] close trigger fires")
// server.transport?.close()
server.sendLoggingMessage({ level: "critical", data: "Client closed" })
}
}

/**
* Handler for the chat tool.
* Connects to an OpenAI SDK compatible AI Integration.
*/
server.setRequestHandler(CallToolRequestSchema, async (request) => {
return await client.callTool({
name: request.params.name,
arguments: request.params.arguments || {},
});
});
const client_transport = new CustomClientStreamableHTTPClientTransport(
new URL(`${URI}`),
transportOptions
);

/**
* Handler that lists available prompts.
*/
server.setRequestHandler(ListPromptsRequestSchema, async () => {
return await client.listPrompts();
});
class CustomServerStdioServerTransport extends StdioServerTransport {
// @ts-ignore
onmessage: ((message: JSONRPCMessage, extra: RequestOptions) => void) = (message: JSONRPCMessage, extra: RequestOptions) => {
// log("[server] onmessage fires")
// log(message)
client_transport.send(message, extra)
};
// onerror: ((error: Error) => void) = (error: Error) => {
// // log("[server] onerror fires")
// // log(error)
// client_transport.onerror(error)
// };
// onclose: (() => void) = () => {
// // log("[server] close trigger fires")
// client_transport.onclose()
// }
}

/**
* Handler for the get prompt.
*/
server.setRequestHandler(GetPromptRequestSchema, async (request) => {
return await client.getPrompt({
name: request.params.name,
});
});
await client_transport.start()

/**
* Start the server using stdio transport.
* This allows the server to communicate via standard input/output streams.
*/
async function main() {
const transport = new StdioServerTransport();
await server.connect(transport);
// const server_transport = new StdioServerTransport()
// await mcp_sever.connect(server_transport)
const server_transport = new CustomServerStdioServerTransport()
// @ts-ignore
server._transport = server_transport
await server_transport.start()
}

main().catch((error) => {
Expand Down