Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 60 additions & 0 deletions test/e2e/conformance/tests/go-wasm-ai-proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,66 @@ data: {"id":"chatcmpl-llm-mock","choices":[{"index":0,"delta":{"content":"?"},

data: [DONE]

`),
},
},
},
{
Meta: http.AssertionMeta{
TestCaseName: "azure case 1: non-streaming request",
CompareTarget: http.CompareTargetResponse,
},
Request: http.AssertionRequest{
ActualRequest: http.Request{
Host: "your-apim-instance.azure-api.net",
Path: "/v1/chat/completions",
Method: "POST",
ContentType: http.ContentTypeApplicationJson,
Body: []byte(`{"model":"gpt-3","messages":[{"role":"user","content":"你好,你是谁?"}],"stream":false}`),
},
},
Response: http.AssertionResponse{
ExpectedResponse: http.Response{
StatusCode: 200,
ContentType: http.ContentTypeApplicationJson,
Body: []byte(`{"id":"chatcmpl-llm-mock","choices":[{"index":0,"message":{"role":"assistant","content":"你好,你是谁?"},"finish_reason":"stop","logprobs":null}],"created":10,"model":"gpt-3","object":"chat.completion","usage":{"prompt_tokens":9,"completion_tokens":1,"total_tokens":10}}`),
},
},
},
{
Meta: http.AssertionMeta{
TestCaseName: "azure case 2: streaming request",
CompareTarget: http.CompareTargetResponse,
},
Request: http.AssertionRequest{
ActualRequest: http.Request{
Host: "your-apim-instance.azure-api.net",
Path: "/v1/chat/completions",
Method: "POST",
ContentType: http.ContentTypeApplicationJson,
Body: []byte(`{"model":"gpt-3","messages":[{"role":"user","content":"你好,你是谁?"}],"stream":true}`),
},
},
Response: http.AssertionResponse{
ExpectedResponse: http.Response{
StatusCode: 200,
ContentType: http.ContentTypeTextEventStream,
Body: []byte(`data: {"id":"chatcmpl-llm-mock","choices":[{"index":0,"delta":{"content":"你"},"finish_reason":null,"logprobs":null}],"created":10,"model":"gpt-3","object":"chat.completion.chunk","usage":null}

data: {"id":"chatcmpl-llm-mock","choices":[{"index":0,"delta":{"content":"好"},"finish_reason":null,"logprobs":null}],"created":10,"model":"gpt-3","object":"chat.completion.chunk","usage":null}

data: {"id":"chatcmpl-llm-mock","choices":[{"index":0,"delta":{"content":","},"finish_reason":null,"logprobs":null}],"created":10,"model":"gpt-3","object":"chat.completion.chunk","usage":null}

data: {"id":"chatcmpl-llm-mock","choices":[{"index":0,"delta":{"content":"你"},"finish_reason":null,"logprobs":null}],"created":10,"model":"gpt-3","object":"chat.completion.chunk","usage":null}

data: {"id":"chatcmpl-llm-mock","choices":[{"index":0,"delta":{"content":"是"},"finish_reason":null,"logprobs":null}],"created":10,"model":"gpt-3","object":"chat.completion.chunk","usage":null}

data: {"id":"chatcmpl-llm-mock","choices":[{"index":0,"delta":{"content":"谁"},"finish_reason":null,"logprobs":null}],"created":10,"model":"gpt-3","object":"chat.completion.chunk","usage":null}

data: {"id":"chatcmpl-llm-mock","choices":[{"index":0,"delta":{"content":"?"},"finish_reason":"stop","logprobs":null}],"created":10,"model":"gpt-3","object":"chat.completion.chunk","usage":null}

data: [DONE]

`),
},
},
Expand Down
30 changes: 30 additions & 0 deletions test/e2e/conformance/tests/go-wasm-ai-proxy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,25 @@ spec:
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: wasmplugin-ai-proxy-azure
namespace: higress-conformance-ai-backend
spec:
ingressClassName: higress
rules:
- host: "your-apim-instance.azure-api.net"
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: llm-mock-service
port:
number: 3000
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: wasmplugin-ai-proxy-baidu
namespace: higress-conformance-ai-backend
Expand Down Expand Up @@ -411,6 +430,17 @@ spec:
type: ai360
ingress:
- higress-conformance-ai-backend/wasmplugin-ai-proxy-ai360
- config:
provider:
apiTokens:
- fake_token
modelMapping:
"gpt-3": gpt-35-turbo-16k
"*": gpt-35-turbo-16k
type: azure
azureServiceUrl: "https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME/chat/completions?api-version=2024-02-15-preview"
ingress:
- higress-conformance-ai-backend/wasmplugin-ai-proxy-azure
- config:
provider:
apiTokens:
Expand Down
Loading