He
HeliumTS
Note:

HeliumTS is under pre-beta and active development. Expect bugs and breaking changes. If you find any issues, please report them in our GitHub

A stable release is planned for early December 2025.

Using OpenAI API

This guide shows how to integrate OpenAI's API with HeliumTS for both streaming and non-streaming completions.

Setup

First, install the OpenAI SDK:

%npm install openai

Add your API key to your environment variables:

%OPENAI_API_KEY=sk-...

Non-Streaming Example (RPC)

For simple request-response patterns, use RPC:

1import { defineRPC } from "heliumts/server";
2import OpenAI from "openai";
3
4const openai = new OpenAI({
5 apiKey: process.env.OPENAI_API_KEY,
6});
7
8export const chatCompletion = defineRPC(async (data: { message: string }, ctx) => {
9 if (!data.message) {
10 throw new Error("Message is required");
11 }
12
13 try {
14 const completion = await openai.chat.completions.create({
15 model: "gpt-4",
16 messages: [{ role: "user", content: data.message }],
17 });
18
19 const response = completion.choices[0]?.message?.content || "";
20
21 return {
22 response,
23 usage: completion.usage,
24 };
25 } catch (error) {
26 console.error("OpenAI API error:", error);
27 throw new Error("Failed to get completion");
28 }
29});

Client-side usage:

1import { useCall } from "heliumts/client";
2import { chatCompletion } from "heliumts/server";
3
4export default function ChatPage() {
5 const { call, data, loading, error } = useCall(chatCompletion);
6
7 const handleSendMessage = async (message: string) => {
8 const result = await call({ message });
9 console.log(result.response);
10 };
11
12 return (
13 <div>
14 {loading && <p>Loading...</p>}
15 {error && <p>Error: {error.message}</p>}
16 {data && <p>{data.response}</p>}
17 </div>
18 );
19}

Streaming Example (HTTP)

For real-time chat interfaces with token-by-token streaming, use HTTP handlers:

1import { defineHTTPRequest } from "heliumts/server";
2import OpenAI from "openai";
3
4const openai = new OpenAI({
5 apiKey: process.env.OPENAI_API_KEY,
6});
7
8export const chatCompletionStream = defineHTTPRequest("POST", "/api/chat/stream", async (req, ctx) => {
9 const { message } = (await req.json()) as { message: string };
10
11 if (!message) {
12 return new Response(JSON.stringify({ error: "Message is required" }), {
13 status: 400,
14 headers: { "Content-Type": "application/json" },
15 });
16 }
17
18 try {
19 const stream = await openai.chat.completions.create({
20 model: "gpt-4",
21 messages: [{ role: "user", content: message }],
22 stream: true,
23 });
24
25 // Create a ReadableStream from the OpenAI stream
26 const readableStream = new ReadableStream({
27 async start(controller) {
28 try {
29 for await (const chunk of stream) {
30 const content = chunk.choices[0]?.delta?.content || "";
31 if (content) {
32 // Send as SSE format
33 const data = `data: ${JSON.stringify({ content })}\n\n`;
34 controller.enqueue(new TextEncoder().encode(data));
35 }
36 }
37 controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"));
38 controller.close();
39 } catch (error) {
40 controller.error(error);
41 }
42 },
43 });
44
45 return new Response(readableStream, {
46 status: 200,
47 headers: {
48 "Content-Type": "text/event-stream",
49 "Cache-Control": "no-cache",
50 Connection: "keep-alive",
51 },
52 });
53 } catch (error) {
54 console.error("OpenAI API error:", error);
55
56 return new Response(
57 JSON.stringify({ error: "Failed to get completion" }),
58 {
59 status: 500,
60 headers: { "Content-Type": "application/json" },
61 }
62 );
63 }
64});

Client-Side Usage

Example of consuming the streaming endpoint from the client:

1async function streamChat(message: string) {
2 const response = await fetch("/api/chat/stream", {
3 method: "POST",
4 headers: {
5 "Content-Type": "application/json",
6 },
7 body: JSON.stringify({ message }),
8 });
9
10 const reader = response.body?.getReader();
11 const decoder = new TextDecoder();
12
13 while (true) {
14 const { done, value } = await reader!.read();
15 if (done) break;
16
17 const chunk = decoder.decode(value);
18 const lines = chunk.split("\n");
19
20 for (const line of lines) {
21 if (line.startsWith("data: ")) {
22 const data = line.slice(6);
23 if (data === "[DONE]") {
24 console.log("Stream complete");
25 return;
26 }
27
28 const parsed = JSON.parse(data);
29 console.log(parsed.content); // Display token
30 }
31 }
32 }
33}

Best Practices

  • Always validate user input before sending to OpenAI
  • Implement rate limiting to prevent API abuse
  • Use environment variables for API keys
  • Handle errors gracefully and provide user feedback
  • Consider adding authentication to protect your endpoints
  • Monitor API usage and costs
  • Use streaming for better user experience in chat interfaces