code
stringlengths 533
14.9k
| apis
sequencelengths 1
6
| extract_api
stringlengths 79
3.33k
|
|---|---|---|
package kr.jm.openai.token;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingType;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
public class GptTokenAnalyzer {
private final Pattern pattern;
private final Encoding encoding;
public GptTokenAnalyzer(EncodingType encodingType) {
this.encoding = Encodings.newLazyEncodingRegistry().getEncoding(encodingType);
this.pattern = Pattern.compile("�|ு|്|்");
}
public List<Integer> getTokenIds(String prompt) {
return encoding.encodeOrdinary(prompt);
}
public int getTokenCount(String prompt) {
return getTokenIds(prompt).size();
}
private List<String> getTokenStrings(List<Integer> tokenIds) {
return tokenIds.stream().map(List::of).map(encoding::decode).collect(Collectors.toList());
}
public TokenAnalysis analysis(String prompt) {
String subPrompt = prompt;
List<Integer> tokenIds = getTokenIds(prompt);
List<String> readableParts = new ArrayList<>();
List<Integer> partTokenCounts = new ArrayList<>();
List<String> tokenStrings = getTokenStrings(tokenIds);
int tempTokenCount = 0;
for (String tokenString : tokenStrings) {
if (pattern.matcher(tokenString).find() || !subPrompt.contains(tokenString)) {
tempTokenCount++;
} else {
int endIndex = subPrompt.indexOf(tokenString);
if (tempTokenCount > 0) {
partTokenCounts.add(tempTokenCount);
readableParts.add(subPrompt.substring(0, endIndex));
tempTokenCount = 0;
}
partTokenCounts.add(1);
readableParts.add(tokenString);
subPrompt = subPrompt.substring(endIndex + tokenString.length());
}
}
if (tempTokenCount > 0) {
partTokenCounts.add(tempTokenCount);
readableParts.add(subPrompt);
}
return new TokenAnalysis(prompt, tokenIds, readableParts, partTokenCounts);
}
}
|
[
"com.knuddels.jtokkit.Encodings.newLazyEncodingRegistry().getEncoding"
] |
[((464, 525), 'com.knuddels.jtokkit.Encodings.newLazyEncodingRegistry().getEncoding')]
|
package dev.langchain4j.model.openai;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.IntArrayList;
import dev.langchain4j.agent.tool.ToolExecutionRequest;
import dev.langchain4j.agent.tool.ToolParameters;
import dev.langchain4j.agent.tool.ToolSpecification;
import dev.langchain4j.data.message.*;
import dev.langchain4j.model.Tokenizer;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Supplier;
import static dev.langchain4j.internal.Exceptions.illegalArgument;
import static dev.langchain4j.internal.Json.fromJson;
import static dev.langchain4j.internal.Utils.isNullOrBlank;
import static dev.langchain4j.internal.ValidationUtils.ensureNotBlank;
import static dev.langchain4j.model.openai.OpenAiChatModelName.*;
import static java.util.Collections.singletonList;
/**
* This class can be used to estimate the cost (in tokens) before calling OpenAI or when using streaming.
* Magic numbers present in this class were found empirically while testing.
* There are integration tests in place that are making sure that the calculations here are very close to that of OpenAI.
*/
public class OpenAiTokenizer implements Tokenizer {
private final String modelName;
private final Optional<Encoding> encoding;
/**
* Creates an instance of the {@code OpenAiTokenizer} for the "gpt-3.5-turbo" model.
* It should be suitable for all current OpenAI models, as they all use the same cl100k_base encoding.
*/
public OpenAiTokenizer() {
this(GPT_3_5_TURBO.toString());
}
/**
* Creates an instance of the {@code OpenAiTokenizer} for a given {@link OpenAiChatModelName}.
*/
public OpenAiTokenizer(OpenAiChatModelName modelName) {
this(modelName.toString());
}
/**
* Creates an instance of the {@code OpenAiTokenizer} for a given {@link OpenAiEmbeddingModelName}.
*/
public OpenAiTokenizer(OpenAiEmbeddingModelName modelName) {
this(modelName.toString());
}
/**
* Creates an instance of the {@code OpenAiTokenizer} for a given {@link OpenAiLanguageModelName}.
*/
public OpenAiTokenizer(OpenAiLanguageModelName modelName) {
this(modelName.toString());
}
/**
* Creates an instance of the {@code OpenAiTokenizer} for a given model name.
*/
public OpenAiTokenizer(String modelName) {
this.modelName = ensureNotBlank(modelName, "modelName");
// If the model is unknown, we should NOT fail fast during the creation of OpenAiTokenizer.
// Doing so would cause the failure of every OpenAI***Model that uses this tokenizer.
// This is done to account for situations when a new OpenAI model is available,
// but JTokkit does not yet support it.
this.encoding = Encodings.newLazyEncodingRegistry().getEncodingForModel(modelName);
}
public int estimateTokenCountInText(String text) {
return encoding.orElseThrow(unknownModelException())
.countTokensOrdinary(text);
}
@Override
public int estimateTokenCountInMessage(ChatMessage message) {
int tokenCount = 1; // 1 token for role
tokenCount += extraTokensPerMessage();
if (message instanceof SystemMessage) {
tokenCount += estimateTokenCountIn((SystemMessage) message);
} else if (message instanceof UserMessage) {
tokenCount += estimateTokenCountIn((UserMessage) message);
} else if (message instanceof AiMessage) {
tokenCount += estimateTokenCountIn((AiMessage) message);
} else if (message instanceof ToolExecutionResultMessage) {
tokenCount += estimateTokenCountIn((ToolExecutionResultMessage) message);
} else {
throw new IllegalArgumentException("Unknown message type: " + message);
}
return tokenCount;
}
private int estimateTokenCountIn(SystemMessage systemMessage) {
return estimateTokenCountInText(systemMessage.text());
}
private int estimateTokenCountIn(UserMessage userMessage) {
int tokenCount = 0;
for (Content content : userMessage.contents()) {
if (content instanceof TextContent) {
tokenCount += estimateTokenCountInText(((TextContent) content).text());
} else if (content instanceof ImageContent) {
tokenCount += 85; // TODO implement for HIGH/AUTO detail level
} else {
throw illegalArgument("Unknown content type: " + content);
}
}
if (userMessage.name() != null && !modelName.equals(GPT_4_VISION_PREVIEW.toString())) {
tokenCount += extraTokensPerName();
tokenCount += estimateTokenCountInText(userMessage.name());
}
return tokenCount;
}
private int estimateTokenCountIn(AiMessage aiMessage) {
int tokenCount = 0;
if (aiMessage.text() != null) {
tokenCount += estimateTokenCountInText(aiMessage.text());
}
if (aiMessage.toolExecutionRequests() != null) {
if (isOneOfLatestModels()) {
tokenCount += 6;
} else {
tokenCount += 3;
}
if (aiMessage.toolExecutionRequests().size() == 1) {
tokenCount -= 1;
ToolExecutionRequest toolExecutionRequest = aiMessage.toolExecutionRequests().get(0);
tokenCount += estimateTokenCountInText(toolExecutionRequest.name()) * 2;
tokenCount += estimateTokenCountInText(toolExecutionRequest.arguments());
} else {
tokenCount += 15;
for (ToolExecutionRequest toolExecutionRequest : aiMessage.toolExecutionRequests()) {
tokenCount += 7;
tokenCount += estimateTokenCountInText(toolExecutionRequest.name());
Map<?, ?> arguments = fromJson(toolExecutionRequest.arguments(), Map.class);
for (Map.Entry<?, ?> argument : arguments.entrySet()) {
tokenCount += 2;
tokenCount += estimateTokenCountInText(argument.getKey().toString());
tokenCount += estimateTokenCountInText(argument.getValue().toString());
}
}
}
}
return tokenCount;
}
private int estimateTokenCountIn(ToolExecutionResultMessage toolExecutionResultMessage) {
return estimateTokenCountInText(toolExecutionResultMessage.text());
}
private int extraTokensPerMessage() {
if (modelName.equals("gpt-3.5-turbo-0301")) {
return 4;
} else {
return 3;
}
}
private int extraTokensPerName() {
if (modelName.equals("gpt-3.5-turbo-0301")) {
return -1; // if there's a name, the role is omitted
} else {
return 1;
}
}
@Override
public int estimateTokenCountInMessages(Iterable<ChatMessage> messages) {
// see https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
int tokenCount = 3; // every reply is primed with <|start|>assistant<|message|>
for (ChatMessage message : messages) {
tokenCount += estimateTokenCountInMessage(message);
}
return tokenCount;
}
@Override
public int estimateTokenCountInToolSpecifications(Iterable<ToolSpecification> toolSpecifications) {
int tokenCount = 16;
for (ToolSpecification toolSpecification : toolSpecifications) {
tokenCount += 6;
tokenCount += estimateTokenCountInText(toolSpecification.name());
if (toolSpecification.description() != null) {
tokenCount += 2;
tokenCount += estimateTokenCountInText(toolSpecification.description());
}
tokenCount += estimateTokenCountInToolParameters(toolSpecification.parameters());
}
return tokenCount;
}
private int estimateTokenCountInToolParameters(ToolParameters parameters) {
if (parameters == null) {
return 0;
}
int tokenCount = 3;
Map<String, Map<String, Object>> properties = parameters.properties();
if (isOneOfLatestModels()) {
tokenCount += properties.size() - 1;
}
for (String property : properties.keySet()) {
if (isOneOfLatestModels()) {
tokenCount += 2;
} else {
tokenCount += 3;
}
tokenCount += estimateTokenCountInText(property);
for (Map.Entry<String, Object> entry : properties.get(property).entrySet()) {
if ("type".equals(entry.getKey())) {
if ("array".equals(entry.getValue()) && isOneOfLatestModels()) {
tokenCount += 1;
}
// TODO object
} else if ("description".equals(entry.getKey())) {
tokenCount += 2;
tokenCount += estimateTokenCountInText(entry.getValue().toString());
if (isOneOfLatestModels() && parameters.required().contains(property)) {
tokenCount += 1;
}
} else if ("enum".equals(entry.getKey())) {
if (isOneOfLatestModels()) {
tokenCount -= 2;
} else {
tokenCount -= 3;
}
for (Object enumValue : (Object[]) entry.getValue()) {
tokenCount += 3;
tokenCount += estimateTokenCountInText(enumValue.toString());
}
}
}
}
return tokenCount;
}
@Override
public int estimateTokenCountInForcefulToolSpecification(ToolSpecification toolSpecification) {
int tokenCount = estimateTokenCountInToolSpecifications(singletonList(toolSpecification));
tokenCount += 4;
tokenCount += estimateTokenCountInText(toolSpecification.name());
if (isOneOfLatestModels()) {
tokenCount += 3;
}
return tokenCount;
}
public List<Integer> encode(String text) {
return encoding.orElseThrow(unknownModelException())
.encodeOrdinary(text).boxed();
}
public List<Integer> encode(String text, int maxTokensToEncode) {
return encoding.orElseThrow(unknownModelException())
.encodeOrdinary(text, maxTokensToEncode).getTokens().boxed();
}
public String decode(List<Integer> tokens) {
IntArrayList intArrayList = new IntArrayList();
for (Integer token : tokens) {
intArrayList.add(token);
}
return encoding.orElseThrow(unknownModelException())
.decode(intArrayList);
}
private Supplier<IllegalArgumentException> unknownModelException() {
return () -> illegalArgument("Model '%s' is unknown to jtokkit", modelName);
}
@Override
public int estimateTokenCountInToolExecutionRequests(Iterable<ToolExecutionRequest> toolExecutionRequests) {
int tokenCount = 0;
int toolsCount = 0;
int toolsWithArgumentsCount = 0;
int toolsWithoutArgumentsCount = 0;
int totalArgumentsCount = 0;
for (ToolExecutionRequest toolExecutionRequest : toolExecutionRequests) {
tokenCount += 4;
tokenCount += estimateTokenCountInText(toolExecutionRequest.name());
tokenCount += estimateTokenCountInText(toolExecutionRequest.arguments());
int argumentCount = countArguments(toolExecutionRequest.arguments());
if (argumentCount == 0) {
toolsWithoutArgumentsCount++;
} else {
toolsWithArgumentsCount++;
}
totalArgumentsCount += argumentCount;
toolsCount++;
}
if (modelName.equals(GPT_3_5_TURBO_1106.toString()) || isOneOfLatestGpt4Models()) {
tokenCount += 16;
tokenCount += 3 * toolsWithoutArgumentsCount;
tokenCount += toolsCount;
if (totalArgumentsCount > 0) {
tokenCount -= 1;
tokenCount -= 2 * totalArgumentsCount;
tokenCount += 2 * toolsWithArgumentsCount;
tokenCount += toolsCount;
}
}
if (modelName.equals(GPT_4_1106_PREVIEW.toString())) {
tokenCount += 3;
if (toolsCount > 1) {
tokenCount += 18;
tokenCount += 15 * toolsCount;
tokenCount += totalArgumentsCount;
tokenCount -= 3 * toolsWithoutArgumentsCount;
}
}
return tokenCount;
}
@Override
public int estimateTokenCountInForcefulToolExecutionRequest(ToolExecutionRequest toolExecutionRequest) {
if (isOneOfLatestGpt4Models()) {
int argumentsCount = countArguments(toolExecutionRequest.arguments());
if (argumentsCount == 0) {
return 1;
} else {
return estimateTokenCountInText(toolExecutionRequest.arguments());
}
}
int tokenCount = estimateTokenCountInToolExecutionRequests(singletonList(toolExecutionRequest));
tokenCount -= 4;
tokenCount -= estimateTokenCountInText(toolExecutionRequest.name());
if (modelName.equals(GPT_3_5_TURBO_1106.toString())) {
int argumentsCount = countArguments(toolExecutionRequest.arguments());
if (argumentsCount == 0) {
return 1;
}
tokenCount -= 19;
tokenCount += 2 * argumentsCount;
}
return tokenCount;
}
static int countArguments(String arguments) {
if (isNullOrBlank(arguments)) {
return 0;
}
Map<?, ?> argumentsMap = fromJson(arguments, Map.class);
return argumentsMap.size();
}
private boolean isOneOfLatestModels() {
return isOneOfLatestGpt3Models() || isOneOfLatestGpt4Models();
}
private boolean isOneOfLatestGpt3Models() {
// TODO add GPT_3_5_TURBO once it points to GPT_3_5_TURBO_1106
return modelName.equals(GPT_3_5_TURBO_1106.toString())
|| modelName.equals(GPT_3_5_TURBO_0125.toString());
}
private boolean isOneOfLatestGpt4Models() {
return modelName.equals(GPT_4_TURBO_PREVIEW.toString())
|| modelName.equals(GPT_4_1106_PREVIEW.toString())
|| modelName.equals(GPT_4_0125_PREVIEW.toString());
}
}
|
[
"com.knuddels.jtokkit.Encodings.newLazyEncodingRegistry().getEncodingForModel"
] |
[((2860, 2926), 'com.knuddels.jtokkit.Encodings.newLazyEncodingRegistry().getEncodingForModel')]
|
package com.ashin.util;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingRegistry;
import com.knuddels.jtokkit.api.ModelType;
import com.theokanning.openai.completion.chat.ChatMessage;
import lombok.var;
import org.springframework.stereotype.Component;
import java.util.List;
/**
* 分词器
*
* @author ashinnotfound
* @date 2023/08/06
*/
@Component
public class Tokenizer {
private final EncodingRegistry registry = Encodings.newDefaultEncodingRegistry();
/**
* 计算消息token
* via https://jtokkit.knuddels.de/docs/getting-started/recipes/chatml
*
* @param model 模型
* @param messages 消息
* @return int
*/
public int countMessageTokens(ModelType model, List<ChatMessage> messages) {
Encoding encoding = registry.getEncodingForModel(model);
int tokensPerMessage = 0;
if (model.getName().startsWith("gpt-4")) {
tokensPerMessage = 3;
} else if (model.getName().startsWith("gpt-3.5-turbo")) {
tokensPerMessage = 4; // every message follows <|start|>{role/name}\n{content}<|end|>\n
}
int sum = 0;
for (final var message : messages) {
sum += tokensPerMessage;
sum += encoding.countTokens(message.getContent());
sum += encoding.countTokens(message.getRole());
}
sum += 3; // every reply is primed with <|start|>assistant<|message|>
return sum;
}
public int countMessageTokens(String modelName, List<ChatMessage> messages) {
return countMessageTokens(getModelTypeByName(modelName), messages);
}
/**
* 根据名字获取模型类型
*
* @param modelName 模型名称
* @return {@code ModelType}
*/
public ModelType getModelTypeByName(String modelName){
if (ModelType.GPT_4.getName().equals(modelName)){
return ModelType.GPT_4;
} else if (ModelType.GPT_4_32K.getName().equals(modelName)){
return ModelType.GPT_4_32K;
} else if (ModelType.GPT_3_5_TURBO_16K.getName().equals(modelName)){
return ModelType.GPT_3_5_TURBO_16K;
} else {
return ModelType.GPT_3_5_TURBO;
}
}
}
|
[
"com.knuddels.jtokkit.api.ModelType.GPT_3_5_TURBO_16K.getName().equals",
"com.knuddels.jtokkit.api.ModelType.GPT_4.getName",
"com.knuddels.jtokkit.api.ModelType.GPT_4_32K.getName",
"com.knuddels.jtokkit.api.ModelType.GPT_4.getName().equals",
"com.knuddels.jtokkit.api.ModelType.GPT_3_5_TURBO_16K.getName",
"com.knuddels.jtokkit.api.ModelType.GPT_4_32K.getName().equals"
] |
[((1898, 1941), 'com.knuddels.jtokkit.api.ModelType.GPT_4.getName().equals'), ((1898, 1923), 'com.knuddels.jtokkit.api.ModelType.GPT_4.getName'), ((1999, 2046), 'com.knuddels.jtokkit.api.ModelType.GPT_4_32K.getName().equals'), ((1999, 2028), 'com.knuddels.jtokkit.api.ModelType.GPT_4_32K.getName'), ((2108, 2163), 'com.knuddels.jtokkit.api.ModelType.GPT_3_5_TURBO_16K.getName().equals'), ((2108, 2145), 'com.knuddels.jtokkit.api.ModelType.GPT_3_5_TURBO_16K.getName')]
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hw.langchain.embeddings.openai;
import com.google.common.primitives.Doubles;
import com.google.common.primitives.Floats;
import com.hw.langchain.embeddings.base.Embeddings;
import com.hw.langchain.exception.LangChainException;
import com.hw.openai.OpenAiClient;
import com.hw.openai.common.OpenaiApiType;
import com.hw.openai.entity.embeddings.Embedding;
import com.hw.openai.entity.embeddings.EmbeddingResp;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.factory.Nd4j;
import lombok.AllArgsConstructor;
import lombok.Builder;
import java.util.*;
import java.util.stream.IntStream;
import static com.hw.langchain.utils.Utils.getOrEnvOrDefault;
/**
* Wrapper around OpenAI embedding models.
* <p>
* To use, you should have the environment variable OPENAI_API_KEY set with your API key or pass it as a
* named parameter to the constructor.
*
* @author HamaWhite
*/
@Builder
@AllArgsConstructor
public class OpenAIEmbeddings implements Embeddings {
private OpenAiClient client;
@Builder.Default
private String model = "text-embedding-ada-002";
private String openaiApiBase;
/**
* To support explicit proxy for OpenAI.
*/
private String openaiProxy;
@Builder.Default
private int embeddingCtxLength = 8191;
private String openaiApiKey;
@Builder.Default
private OpenaiApiType openaiApiType = OpenaiApiType.OPENAI;
private String openaiApiVersion;
protected String openaiOrganization;
/**
* Maximum number of texts to embed in each batch
*/
@Builder.Default
private int chunkSize = 1000;
/**
* Maximum number of retries to make when generating.
*/
@Builder.Default
private int maxRetries = 6;
/**
* Timeout for requests to OpenAI completion API. Default is 16 seconds.
*/
@Builder.Default
protected long requestTimeout = 16;
/**
* Validate parameters and init client
*/
public OpenAIEmbeddings init() {
openaiApiKey = getOrEnvOrDefault(openaiApiKey, "OPENAI_API_KEY");
openaiApiBase = getOrEnvOrDefault(openaiApiBase, "OPENAI_API_BASE", "");
openaiProxy = getOrEnvOrDefault(openaiProxy, "OPENAI_PROXY", "");
openaiOrganization = getOrEnvOrDefault(openaiOrganization, "OPENAI_ORGANIZATION", "");
openaiApiVersion = getOrEnvOrDefault(openaiApiVersion, "OPENAI_API_VERSION", "");
this.client = OpenAiClient.builder()
.openaiApiBase(openaiApiBase)
.openaiApiKey(openaiApiKey)
.openaiApiVersion(openaiApiVersion)
.openaiApiType(openaiApiType)
.openaiOrganization(openaiOrganization)
.openaiProxy(openaiProxy)
.requestTimeout(requestTimeout)
.build()
.init();
return this;
}
/**
* <a href="https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb">Embedding texts that are longer than the model's maximum context length</a>
*/
private List<List<Float>> getLenSafeEmbeddings(List<String> texts) {
List<List<Float>> embeddings = new ArrayList<>(texts.size());
List<List<Integer>> tokens = new ArrayList<>();
List<Integer> indices = new ArrayList<>();
Encoding encoding = Encodings.newDefaultEncodingRegistry()
.getEncodingForModel(model)
.orElseThrow(() -> new LangChainException("Encoding not found."));
for (int i = 0; i < texts.size(); i++) {
String text = texts.get(i);
if (model.endsWith("001")) {
// See https://github.com/openai/openai-python/issues/418#issuecomment-1525939500
// replace newlines, which can negatively affect performance.
text = text.replace("\n", " ");
}
List<Integer> token = encoding.encode(text);
for (int j = 0; j < token.size(); j += embeddingCtxLength) {
tokens.add(token.subList(j, Math.min(j + embeddingCtxLength, token.size())));
indices.add(i);
}
}
List<List<Float>> batchedEmbeddings = new ArrayList<>();
for (int i = 0; i < tokens.size(); i += chunkSize) {
List<?> input = tokens.subList(i, Math.min(i + chunkSize, tokens.size()));
var response = embedWithRetry(input);
response.getData().forEach(result -> batchedEmbeddings.add(result.getEmbedding()));
}
List<? extends List<List<Float>>> results = IntStream.range(0, texts.size())
.mapToObj(i -> new ArrayList<List<Float>>())
.toList();
List<? extends List<Integer>> numTokensInBatch = IntStream.range(0, texts.size())
.mapToObj(i -> new ArrayList<Integer>())
.toList();
for (int i = 0; i < indices.size(); i++) {
int index = indices.get(i);
results.get(index).add(batchedEmbeddings.get(i));
numTokensInBatch.get(index).add(tokens.get(i).size());
}
for (int i = 0; i < texts.size(); i++) {
INDArray average;
try (INDArray resultArray =
Nd4j.create(results.get(i).stream().map(Floats::toArray).toArray(float[][]::new))) {
INDArray weightsArray = Nd4j.create(Doubles.toArray(numTokensInBatch.get(i)));
average = resultArray.mulRowVector(weightsArray).sum(0).div(weightsArray.sum(0));
}
INDArray normalizedAverage = average.div(average.norm2Number());
embeddings.add(Floats.asList(normalizedAverage.toFloatVector()));
}
return embeddings;
}
/**
* Call out to OpenAI's embedding endpoint.
*/
public List<Float> embeddingFunc(String text) {
if (text.length() > embeddingCtxLength) {
return getLenSafeEmbeddings(List.of(text)).get(0);
} else {
if (model.endsWith("001")) {
// See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500
// replace newlines, which can negatively affect performance.
text = text.replace("\n", " ");
}
return embedWithRetry(List.of(text)).getData().get(0).getEmbedding();
}
}
/**
* Call out to OpenAI's embedding endpoint for embedding search docs.
*
* @param texts The list of texts to embed.
* @return List of embeddings, one for each text.
*/
@Override
public List<List<Float>> embedDocuments(List<String> texts) {
// NOTE: to keep things simple, we assume the list may contain texts longer
// than the maximum context and use length-safe embedding function.
return this.getLenSafeEmbeddings(texts);
}
/**
* Call out to OpenAI's embedding endpoint for embedding query text.
*
* @param text The text to embed.
* @return Embedding for the text.
*/
@Override
public List<Float> embedQuery(String text) {
return embeddingFunc(text);
}
public EmbeddingResp embedWithRetry(List<?> input) {
var embedding = Embedding.builder()
.model(model)
.input(input)
.build();
return client.createEmbedding(embedding);
}
}
|
[
"com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncodingForModel",
"com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry()\n .getEncodingForModel(model).orElseThrow"
] |
[((3326, 3731), 'com.hw.openai.OpenAiClient.builder()\n .openaiApiBase(openaiApiBase)\n .openaiApiKey(openaiApiKey)\n .openaiApiVersion(openaiApiVersion)\n .openaiApiType(openaiApiType)\n .openaiOrganization(openaiOrganization)\n .openaiProxy(openaiProxy)\n .requestTimeout(requestTimeout)\n .build().init'), ((3326, 3707), 'com.hw.openai.OpenAiClient.builder()\n .openaiApiBase(openaiApiBase)\n .openaiApiKey(openaiApiKey)\n .openaiApiVersion(openaiApiVersion)\n .openaiApiType(openaiApiType)\n .openaiOrganization(openaiOrganization)\n .openaiProxy(openaiProxy)\n .requestTimeout(requestTimeout).build'), ((3326, 3682), 'com.hw.openai.OpenAiClient.builder()\n .openaiApiBase(openaiApiBase)\n .openaiApiKey(openaiApiKey)\n .openaiApiVersion(openaiApiVersion)\n .openaiApiType(openaiApiType)\n .openaiOrganization(openaiOrganization)\n .openaiProxy(openaiProxy).requestTimeout'), ((3326, 3634), 'com.hw.openai.OpenAiClient.builder()\n .openaiApiBase(openaiApiBase)\n .openaiApiKey(openaiApiKey)\n .openaiApiVersion(openaiApiVersion)\n .openaiApiType(openaiApiType)\n .openaiOrganization(openaiOrganization).openaiProxy'), ((3326, 3592), 'com.hw.openai.OpenAiClient.builder()\n .openaiApiBase(openaiApiBase)\n .openaiApiKey(openaiApiKey)\n .openaiApiVersion(openaiApiVersion)\n .openaiApiType(openaiApiType).openaiOrganization'), ((3326, 3536), 'com.hw.openai.OpenAiClient.builder()\n .openaiApiBase(openaiApiBase)\n .openaiApiKey(openaiApiKey)\n .openaiApiVersion(openaiApiVersion).openaiApiType'), ((3326, 3490), 'com.hw.openai.OpenAiClient.builder()\n .openaiApiBase(openaiApiBase)\n .openaiApiKey(openaiApiKey).openaiApiVersion'), ((3326, 3438), 'com.hw.openai.OpenAiClient.builder()\n .openaiApiBase(openaiApiBase).openaiApiKey'), ((3326, 3394), 'com.hw.openai.OpenAiClient.builder().openaiApiBase'), ((4238, 4402), 'com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry()\n .getEncodingForModel(model).orElseThrow'), ((4238, 4320), 'com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncodingForModel'), ((5476, 5595), 'java.util.stream.IntStream.range(0, texts.size())\n .mapToObj(i -> new ArrayList<List<Float>>()).toList'), ((5476, 5569), 'java.util.stream.IntStream.range(0, texts.size()).mapToObj'), ((5654, 5769), 'java.util.stream.IntStream.range(0, texts.size())\n .mapToObj(i -> new ArrayList<Integer>()).toList'), ((5654, 5743), 'java.util.stream.IntStream.range(0, texts.size()).mapToObj'), ((8112, 8216), 'com.hw.openai.entity.embeddings.Embedding.builder()\n .model(model)\n .input(input).build'), ((8112, 8191), 'com.hw.openai.entity.embeddings.Embedding.builder()\n .model(model).input'), ((8112, 8161), 'com.hw.openai.entity.embeddings.Embedding.builder().model')]
|
package com.hugai.chatsdk.common.utils;
import cn.hutool.core.collection.CollUtil;
import cn.hutool.core.util.StrUtil;
import com.alibaba.fastjson2.JSONArray;
import com.alibaba.fastjson2.JSONObject;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingType;
import com.org.bebas.core.function.OR;
import java.util.List;
import java.util.Objects;
/**
* 估计token工具类
*
* @author WuHao
* @since 2023/6/1 14:32
*/
public class TokenCalculateUtil {
private static Encoding encoding;
static {
encoding = Encodings.newDefaultEncodingRegistry().getEncoding(EncodingType.CL100K_BASE);
}
/**
* 根据内容获取消耗的token数
*
* @param content
* @return
*/
public static int getTokenNumOfContent(String content) {
if (StrUtil.isEmpty(content))
return 0;
return encoding.encode(content).size();
}
/**
* 根据内容获取消耗的token数
*
* @param t
* @return
*/
public static <T> int getTokenNumOfContents(T t) {
return getTokenNumOfContents(CollUtil.newArrayList(t));
}
/**
* 多轮对话
*
* @param tList
* @return
*/
public static <T> int getTokenNumOfContents(List<T> tList) {
if (CollUtil.isEmpty(tList))
return 0;
StringBuilder strBuilder = new StringBuilder();
OR.run(JSONArray.parseArray(JSONArray.toJSONString(tList), JSONObject.class), CollUtil::isNotEmpty, jsonArrays -> {
jsonArrays.forEach(item -> {
if (Objects.nonNull(item)) {
strBuilder.append("{")
.append("message:").append(item.get("content"))
.append("}");
}
});
});
return encoding.encode(strBuilder.toString()).size();
}
}
|
[
"com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncoding"
] |
[((601, 677), 'com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncoding')]
|
package kr.jm.gpt;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.EncodingType;
import kr.jm.openai.OpenAiChatCompletions;
import kr.jm.openai.dto.Message;
import kr.jm.openai.dto.OpenAiChatCompletionsRequest;
import kr.jm.openai.dto.Role;
import kr.jm.openai.sse.OpenAiSseChatCompletionsPartConsumer;
import kr.jm.utils.JMArrays;
import kr.jm.utils.JMOptional;
import kr.jm.utils.JMResources;
import kr.jm.utils.enums.OS;
import kr.jm.utils.helper.JMFile;
import kr.jm.utils.helper.JMPath;
import java.awt.*;
import java.awt.datatransfer.StringSelection;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.function.Function;
public class CliGenie {
private final Message systemMessage;
private final String userPromptFormat;
public CliGenie() {
String lineSeparator = OS.getLineSeparator();
this.systemMessage =
new Message(Role.system,
"Act as CLI Assistant for " + OS.getOsName() + " " + OS.getOsVersion() + lineSeparator +
"- Do Not: explanations and code blocks(```)\n- Response: in user's language");
this.userPromptFormat = "Generate a shell command or recommendation to %s";
}
public String spell(String prmpter, Function<String, String> spellFunction) {
return JMOptional.getOptional(prmpter).map(spellFunction).map(String::trim).orElseThrow();
}
List<Message> buildPromptMessageList(String userPrompt) {
return List.of(systemMessage, new Message(Role.user, userPrompt));
}
public static void main(String... args) {
CliGenieCommandLine cliGenieCommandLine = new CliGenieCommandLine();
if (JMArrays.isNullOrEmpty(args))
cliGenieCommandLine.printHelp();
else
Optional.ofNullable(cliGenieCommandLine.buildCliOptionsPrompt(args))
.ifPresent(CliGenie::handleOptionAndSpell);
}
private static void handleOptionAndSpell(CliOptionsPrompt cliOptionsPrompt) {
System.out.println();
handlePostOptions(handleGptPromptOption(cliOptionsPrompt.getOptions(), cliOptionsPrompt.getPrompt()),
cliOptionsPrompt.getOptions());
}
private static String handleGptPromptOption(Set<String> cliOptions, String prompt) {
return cliOptions.contains("tc") ?
handleTokenCounterOption(Encodings.newLazyEncodingRegistry().getEncoding(EncodingType.CL100K_BASE)
.encodeOrdinary(prompt), prompt.length())
: handleOptionAndSpell(new CliGenie(), cliOptions, prompt, new OpenAiChatCompletions(getOpenaiApiKey()),
new OpenAiSseChatCompletionsPartConsumer(System.out::print));
}
private static String handleTokenCounterOption(List<Integer> tokenIds, int characterLength) {
String tokenCounterString = String.format("%-8s%-10s%s", "Tokens", "Character", "TOKEN IDS\n") +
String.format("%-8d%-10d%s", tokenIds.size(), characterLength, tokenIds);
System.out.println(tokenCounterString);
return tokenCounterString;
}
private static String handleOptionAndSpell(CliGenie cliGenie, Set<String> cliOptions, String prompt,
OpenAiChatCompletions openAiChatCompletions,
OpenAiSseChatCompletionsPartConsumer openAiSseChatCompletionsPartConsumer) {
return cliOptions.contains("g") ? cliGenie.spell(prompt,
spell -> requestWithSse(openAiChatCompletions, openAiSseChatCompletionsPartConsumer,
cliGenie.buildPromptMessageList(spell), 1D)) : cliGenie.spell(prompt,
spell -> requestWithSse(openAiChatCompletions, openAiSseChatCompletionsPartConsumer,
cliGenie.buildPromptMessageList(String.format(cliGenie.userPromptFormat, spell)), 0D));
}
private static String requestWithSse(OpenAiChatCompletions openAiChatCompletions,
OpenAiSseChatCompletionsPartConsumer openAiSseChatCompletionsPartConsumer, List<Message> messages,
Double temperature) {
try {
return openAiChatCompletions.requestWithSse(
new OpenAiChatCompletionsRequest().setModel("gpt-3.5-turbo").setMaxTokens(3000)
.setTemperature(temperature).setStream(true).setMessages(messages),
() -> openAiSseChatCompletionsPartConsumer).get().getChoices().get(0).getMessage().getContent();
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
private static void handlePostOptions(String result, Set<String> options) {
if (Objects.isNull(options) || !options.contains("no")) {
String osName = System.getProperty("os.name").toLowerCase();
OS.addShutdownHook(() -> copyToClipboard(osName, result));
Optional.ofNullable(getCopyAndPasteInfo(osName)).ifPresent(copyAndPasteInfo ->
System.out.println(OS.getLineSeparator() + OS.getLineSeparator() + copyAndPasteInfo));
}
}
private static void copyToClipboard(String osName, String result) {
try {
if (System.getenv("SSH_CLIENT") != null || System.getProperty("java.awt.headless") != null)
copyWithCliToClipboard(result, osName);
else
Toolkit.getDefaultToolkit().getSystemClipboard().setContents(new StringSelection(result), null);
} catch (IOException e) {
throw new UnsupportedOperationException("Unsupported OS: " + osName);
}
}
private static String getCopyAndPasteInfo(String osName) {
if (osName.contains("linux"))
return "Outputs copied, please paste it: Ctrl + Shift + V (Linux).";
else if (osName.contains("mac"))
return "Outputs copied, please paste it: Command + V (MacOS).";
else if (osName.contains("windows"))
return "Outputs copied, please paste it: Ctrl + V (Windows).";
else return null;
}
private static void copyWithCliToClipboard(String result, String osName) throws IOException {
if (osName.contains("linux")) {
Runtime.getRuntime().exec("echo '" + result + "' | xclip -selection clipboard");
} else if (osName.contains("mac")) {
Runtime.getRuntime().exec("echo '" + result + "' | pbcopy");
} else if (osName.contains("windows")) {
Runtime.getRuntime().exec("cmd.exe /c echo " + result + " | clip");
} else {
System.out.println("Unsupported OS: " + osName);
}
}
private static String getOpenaiApiKey() {
Path openAiApiKeyFilePath = Paths.get(System.getProperty("user.home"), ".cg", "openai-api-key");
return JMOptional.getOptional(System.getenv("OPENAI_API_KEY"))
.map(openAiKey -> saveOpenApiKey(openAiApiKeyFilePath, openAiKey))
.or(() -> JMResources.getStringOptionalWithFilePath(openAiApiKeyFilePath.toString()))
.orElseThrow(() -> new RuntimeException("The OPENAI_API_KEY environment variable is not set."));
}
private static String saveOpenApiKey(Path openAiApiKeyFilePath, String openAiApiKey) {
if (!JMPath.getInstance().exists(openAiApiKeyFilePath))
OS.addShutdownHook(() -> {
JMPath.getInstance().createDirectory(openAiApiKeyFilePath.getParent());
JMFile.getInstance().writeString(openAiApiKey, openAiApiKeyFilePath.toFile());
});
return openAiApiKey;
}
}
|
[
"com.knuddels.jtokkit.Encodings.newLazyEncodingRegistry().getEncoding",
"com.knuddels.jtokkit.Encodings.newLazyEncodingRegistry().getEncoding(EncodingType.CL100K_BASE).encodeOrdinary"
] |
[((1518, 1600), 'kr.jm.utils.JMOptional.getOptional(prmpter).map(spellFunction).map(String::trim).orElseThrow'), ((1518, 1586), 'kr.jm.utils.JMOptional.getOptional(prmpter).map(spellFunction).map'), ((1518, 1568), 'kr.jm.utils.JMOptional.getOptional(prmpter).map'), ((1988, 2119), 'java.util.Optional.ofNullable(cliGenieCommandLine.buildCliOptionsPrompt(args)).ifPresent'), ((2578, 2699), 'com.knuddels.jtokkit.Encodings.newLazyEncodingRegistry().getEncoding(EncodingType.CL100K_BASE).encodeOrdinary'), ((2578, 2651), 'com.knuddels.jtokkit.Encodings.newLazyEncodingRegistry().getEncoding'), ((5080, 5264), 'java.util.Optional.ofNullable(getCopyAndPasteInfo(osName)).ifPresent'), ((6983, 7335), 'kr.jm.utils.JMOptional.getOptional(System.getenv("OPENAI_API_KEY"))\n .map(openAiKey -> saveOpenApiKey(openAiApiKeyFilePath, openAiKey))\n .or(() -> JMResources.getStringOptionalWithFilePath(openAiApiKeyFilePath.toString())).orElseThrow'), ((6983, 7223), 'kr.jm.utils.JMOptional.getOptional(System.getenv("OPENAI_API_KEY"))\n .map(openAiKey -> saveOpenApiKey(openAiApiKeyFilePath, openAiKey)).or'), ((6983, 7121), 'kr.jm.utils.JMOptional.getOptional(System.getenv("OPENAI_API_KEY")).map'), ((7448, 7497), 'kr.jm.utils.helper.JMPath.getInstance().exists'), ((7554, 7624), 'kr.jm.utils.helper.JMPath.getInstance().createDirectory'), ((7642, 7719), 'kr.jm.utils.helper.JMFile.getInstance().writeString')]
|
package com.example.springai.aoai.service;
import com.azure.ai.openai.OpenAIClient;
import com.azure.ai.openai.OpenAIClientBuilder;
import com.azure.core.credential.AzureKeyCredential;
import com.example.springai.aoai.dto.ChatMessageDto;
import com.example.springai.aoai.exception.TooManyRequestsException;
import com.example.springai.aoai.mapper.ChatMessageMapper;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.ModelType;
import jakarta.annotation.PostConstruct;
import lombok.extern.slf4j.Slf4j;
import org.springframework.ai.azure.openai.AzureOpenAiChatClient;
import org.springframework.ai.azure.openai.AzureOpenAiChatOptions;
import org.springframework.ai.chat.ChatClient;
import org.springframework.ai.chat.Generation;
import org.springframework.ai.chat.messages.Message;
import org.springframework.ai.chat.messages.SystemMessage;
import org.springframework.ai.chat.prompt.Prompt;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.http.HttpStatus;
import org.springframework.retry.annotation.Backoff;
import org.springframework.retry.annotation.Retryable;
import org.springframework.stereotype.Service;
import org.springframework.web.client.HttpClientErrorException;
import reactor.core.publisher.Flux;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
/**
* Chatbot service
*/
@Service
@Slf4j
public class ChatbotService {
@Autowired
private final ChatClient chatClient;
@Autowired
private final SystemMessage systemMessage;
@Autowired
private final ChatMessageMapper chatMessageMapper;
@Value("${spring.ai.azure.openai.endpoint}")
private String azureOpenAiEndpoint;
@Value("${spring.ai.azure.openai.api-key}")
private String azureOpenAiApiKey;
@Value("${spring.ai.azure.openai.chat.options.model}")
private String model;
@Value("${spring.ai.azure.openai.chat.options.max-tokens}")
private Integer maxTokens;
@Value("${spring.ai.azure.openai.chat.options.temperature}")
private Float temperature;
private Encoding encoding;
private Optional<ModelType> modelType;
/**
* Constructor
*
* @param chatClient the chatClient
* @param systemMessage the system message
* @param chatMessageMapper the ChatMessage Mapper
*/
public ChatbotService(ChatClient chatClient, SystemMessage systemMessage, ChatMessageMapper chatMessageMapper) {
this.chatClient = chatClient;
this.systemMessage = systemMessage;
this.chatMessageMapper = chatMessageMapper;
}
/**
* Calls OpenAI chat completion API and returns a Generation object
* Retry with exponential backoff on 429 Too Many Requests status code
*
* @param chatMessageDtos list of all user and assistant messages
* @return a Generation object
*/
@Retryable(
retryFor = {TooManyRequestsException.class},
maxAttempts = 3,
backoff = @Backoff(delay = 1000, maxDelay = 5000, multiplier = 2))
public Optional<Generation> completion(List<ChatMessageDto> chatMessageDtos) {
List<Message> messages = new ArrayList<>(chatMessageMapper.toMessage(chatMessageDtos));
messages.add(0, systemMessage);
Prompt prompt = new Prompt(messages);
Generation gen = null;
try {
gen = chatClient.call(prompt).getResults().get(0);
} catch (RuntimeException e) {
log.error("Caught a RuntimeException: " + e.getMessage());
if (e instanceof HttpClientErrorException) {
if (((HttpClientErrorException) e).getStatusCode().equals(HttpStatus.TOO_MANY_REQUESTS)) {
throw new TooManyRequestsException(e.getMessage());
}
}
}
return Optional.ofNullable(gen);
}
/**
* Calls OpenAI chat completion API in Stream mode and returns a Flux object
*
* @param chatMessageDtos list of all user and assistant messages
* @return a Generation object
*/
@Retryable(
retryFor = {TooManyRequestsException.class},
maxAttempts = 3,
backoff = @Backoff(delay = 1000, maxDelay = 5000, multiplier = 2))
public Flux<Generation> completionWithStream(List<ChatMessageDto> chatMessageDtos) {
List<Message> messages = new ArrayList<>(chatMessageMapper.toMessage(chatMessageDtos));
messages.add(0, systemMessage);
Prompt prompt = new Prompt(messages);
OpenAIClientBuilder openAIClientBuilder = new OpenAIClientBuilder();
OpenAIClient openAIClient = openAIClientBuilder.credential(new AzureKeyCredential(azureOpenAiApiKey)).endpoint(azureOpenAiEndpoint).buildClient();
var azureOpenAiChatClient = new AzureOpenAiChatClient(openAIClient).withDefaultOptions(AzureOpenAiChatOptions.builder().withModel(model).withTemperature(temperature).withMaxTokens(maxTokens).build());
return azureOpenAiChatClient.stream(prompt)
.onErrorResume(e -> {
log.error("Caught a RuntimeException: " + e.getMessage());
if (e instanceof HttpClientErrorException) {
if (((HttpClientErrorException) e).getStatusCode().equals(HttpStatus.TOO_MANY_REQUESTS)) {
throw new TooManyRequestsException(e.getMessage());
}
}
return Flux.empty();
})
.flatMap(s -> Flux.fromIterable(s.getResults()));
}
/**
* Initializing encoding and model type after bean creation
*/
@PostConstruct
public void postConstructInit() {
//Hack because jtokkit's model name has a dot
this.modelType = ModelType.fromName(model.replace("35", "3.5"));
if (this.modelType.isEmpty()) {
log.error("Could not get model from name");
throw new IllegalStateException();
}
this.encoding = Encodings.newDefaultEncodingRegistry().getEncodingForModel(this.modelType.get());
}
/**
* Checking if the context window of the model is big enough
*
* @param chatMessageDtos list of all user and assistant messages
* @return true if the context window of the model is big enough, false if not
*/
public boolean isContextLengthValid(List<ChatMessageDto> chatMessageDtos) {
String contextWindow = systemMessage.getContent() + chatMessageDtos.stream()
.map(ChatMessageDto::getContent)
.collect(Collectors.joining());
int currentContextLength = this.encoding.countTokens(contextWindow);
if (this.modelType.isPresent()) {
return (this.modelType.get().getMaxContextLength() > (currentContextLength + maxTokens));
}
return false;
}
/**
* Adjusting the number of messages in the context window to fit the model's max context length
*
* @param chatMessageDtos list of all user and assistant messages
* @return same list if model's max context length has not been reached, smaller list if it has
*/
public List<ChatMessageDto> adjustContextWindow(List<ChatMessageDto> chatMessageDtos) {
List<ChatMessageDto> chatMessagesDtosAdjusted = new ArrayList<>(chatMessageDtos);
for (int i = 0; i < chatMessageDtos.size(); i++) {
if (!isContextLengthValid(chatMessagesDtosAdjusted)) {
chatMessagesDtosAdjusted.remove(0);
} else {
break;
}
}
return chatMessagesDtosAdjusted;
}
}
|
[
"com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncodingForModel"
] |
[((4999, 5110), 'org.springframework.ai.azure.openai.AzureOpenAiChatOptions.builder().withModel(model).withTemperature(temperature).withMaxTokens(maxTokens).build'), ((4999, 5102), 'org.springframework.ai.azure.openai.AzureOpenAiChatOptions.builder().withModel(model).withTemperature(temperature).withMaxTokens'), ((4999, 5077), 'org.springframework.ai.azure.openai.AzureOpenAiChatOptions.builder().withModel(model).withTemperature'), ((4999, 5048), 'org.springframework.ai.azure.openai.AzureOpenAiChatOptions.builder().withModel'), ((6165, 6245), 'com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncodingForModel')]
|
package top.verytouch.vkit.chat.gpt;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingType;
import lombok.extern.slf4j.Slf4j;
import top.verytouch.vkit.chat.gpt.pojo.*;
import top.verytouch.vkit.common.util.HttpUtils;
import top.verytouch.vkit.common.util.JsonUtils;
import top.verytouch.vkit.common.util.MapUtils;
import top.verytouch.vkit.common.util.StringUtils;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
/**
* chat-gpt接口
*
* @author verytouch
* @since 2023/7/3 10:49
*/
@Slf4j
public class ChatGPTService {
private final ChatGPTProperties properties;
private final Encoding encoding;
public ChatGPTService(ChatGPTProperties properties) {
this.properties = properties;
this.encoding = Encodings.newLazyEncodingRegistry().getEncoding(EncodingType.fromName(properties.getTokenEncodingType()).orElse(EncodingType.CL100K_BASE));
}
/**
* 计算token数量
*/
public int tokens(String content) {
return encoding.countTokens(content);
}
/**
* 聊天 gpt-3.5-turbo
*
* @param messages 如果需要上下文,需要把之前的会话信息放入messages数组
*/
public ChatCompletionsResponse chatCompletions(List<Message> messages) {
Map<String, Object> body = MapUtils.Builder.hashMap(String.class, Object.class)
.put("model", ChatGPTProperties.MODEL_TURBO)
.put("messages", messages)
.build();
String responseString = post(JsonUtils.toJson(body), ChatGPTApiEnum.CHAT_COMPLETIONS);
return JsonUtils.fromJson(responseString, ChatCompletionsResponse.class);
}
/**
* 聊天 gpt-3.5-turbo sse方式
*
* @param messages 如果需要上下文,需要把之前的会话信息放入messages数组
* @param consumer 每次返回的数据
* @param finisher ChunkResponse为所有请求拼接好的,usage依照拼接好的算
*/
public void chatCompletions(List<Message> messages, Consumer<String> consumer, BiConsumer<ChunkResponse, Usage> finisher) {
Map<String, Object> body = MapUtils.Builder.hashMap(String.class, Object.class)
.put("model", ChatGPTProperties.MODEL_TURBO)
.put("messages", messages)
.put("stream", true)
.build();
Consumer<List<String>> myFinisher = list -> {
ChunkResponse response = list.stream().map(this::parseChunk)
.filter(Objects::nonNull)
.reduce(ChunkResponse::mergeMessage)
.orElse(null);
int promptTokens = messages.stream()
.map(Message::getContent)
.map(this::tokens)
.reduce(0, Math::addExact);
int completionTokens = 0;
try {
completionTokens = response == null ? 0 : this.tokens(response.getChoices().get(0).getDelta().getContent());
} catch (Exception e) {
log.error("计算completionTokens异常", e);
}
Usage usage = Usage.of(promptTokens, completionTokens);
finisher.accept(response, usage);
log.info("chat-gpt-chunk组装后的数据 = {}, usage = {}", response, usage);
};
post(JsonUtils.toJson(body), ChatGPTApiEnum.CHAT_COMPLETIONS, consumer, myFinisher);
}
/**
* 聊天 text-davinci-003
*/
public CompletionsResponse completions(CompletionsRequest request) {
request.setModel(ChatGPTProperties.MODEL_DAVINCI3);
String responseString = post(JsonUtils.toJson(request), ChatGPTApiEnum.COMPLETIONS);
return JsonUtils.fromJson(responseString, CompletionsResponse.class);
}
/**
* 生成图片
*/
public CreateImageResponse createImage(CreateImageRequest request) {
String responseString = post(JsonUtils.toJson(request), ChatGPTApiEnum.CREATE_IMAGE);
CreateImageResponse imageResponse = JsonUtils.fromJson(responseString, CreateImageResponse.class);
Integer completionTokens = imageResponse.getData().stream()
.map(CreateImageResponse.Data::getUrl)
.map(this::tokens)
.reduce(0, Math::addExact);
imageResponse.setUsage(Usage.of(this.tokens(request.getPrompt()), completionTokens));
return imageResponse;
}
/**
* chunk字符串转bean
*
* @param json json or sse-prefixed json
*/
public ChunkResponse parseChunk(String json) {
String prefix = "data: ";
if (StringUtils.isBlank(json)) {
return null;
}
if (json.startsWith(prefix)) {
json = json.substring(prefix.length());
}
json = json.trim();
if (!json.startsWith("{") && !json.endsWith("}")) {
return null;
}
return JsonUtils.fromJson(json, ChunkResponse.class);
}
public String post(String body, ChatGPTApiEnum apiEnum) {
String responseString;
try {
responseString = prepare(body, apiEnum).request().getString();
} catch (Exception e) {
log.error("请求chat-gpt失败", e);
throw new RuntimeException(e);
}
log.info("请求chat-gpt完成, api={}, response={}", apiEnum.getPath(), responseString);
return responseString;
}
public void post(String body, ChatGPTApiEnum apiEnum, Consumer<String> consumer, Consumer<List<String>> finisher) {
List<String> msgList = new LinkedList<>();
try {
prepare(body, apiEnum).sseRequest(msg -> {
log.debug("请求chat-gpt-chunk返回, api={}, chunk={}", apiEnum.getPath(), msg);
consumer.accept(msg);
msgList.add(msg);
});
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
log.info("请求chat-gpt-chunk完成, api={}", apiEnum.getPath());
finisher.accept(msgList);
}
}
private HttpUtils prepare(String body, ChatGPTApiEnum apiEnum) {
log.info("请求chat-gpt开始, api={}, params={}", apiEnum.getPath(), body);
return new HttpUtils(properties.getHost() + apiEnum.getPath())
.addHeader("Authorization", "Bearer " + properties.getApikey())
.addHeader("Content-Type", "application/json")
.body(body.getBytes(StandardCharsets.UTF_8))
.method("POST")
.connectTimeout(Duration.ofSeconds(30))
.readTimeout(Duration.ofSeconds(120));
}
}
|
[
"com.knuddels.jtokkit.Encodings.newLazyEncodingRegistry().getEncoding",
"com.knuddels.jtokkit.api.EncodingType.fromName(properties.getTokenEncodingType()).orElse"
] |
[((1003, 1141), 'com.knuddels.jtokkit.Encodings.newLazyEncodingRegistry().getEncoding'), ((1051, 1140), 'com.knuddels.jtokkit.api.EncodingType.fromName(properties.getTokenEncodingType()).orElse'), ((1545, 1726), 'top.verytouch.vkit.common.util.MapUtils.Builder.hashMap(String.class, Object.class)\n .put("model", ChatGPTProperties.MODEL_TURBO)\n .put("messages", messages).build'), ((1545, 1701), 'top.verytouch.vkit.common.util.MapUtils.Builder.hashMap(String.class, Object.class)\n .put("model", ChatGPTProperties.MODEL_TURBO).put'), ((1545, 1658), 'top.verytouch.vkit.common.util.MapUtils.Builder.hashMap(String.class, Object.class).put'), ((1545, 1597), 'top.verytouch.vkit.common.util.MapUtils.Builder.hashMap'), ((2372, 2590), 'top.verytouch.vkit.common.util.MapUtils.Builder.hashMap(String.class, Object.class)\n .put("model", ChatGPTProperties.MODEL_TURBO)\n .put("messages", messages)\n .put("stream", true).build'), ((2372, 2565), 'top.verytouch.vkit.common.util.MapUtils.Builder.hashMap(String.class, Object.class)\n .put("model", ChatGPTProperties.MODEL_TURBO)\n .put("messages", messages).put'), ((2372, 2528), 'top.verytouch.vkit.common.util.MapUtils.Builder.hashMap(String.class, Object.class)\n .put("model", ChatGPTProperties.MODEL_TURBO).put'), ((2372, 2485), 'top.verytouch.vkit.common.util.MapUtils.Builder.hashMap(String.class, Object.class).put'), ((2372, 2424), 'top.verytouch.vkit.common.util.MapUtils.Builder.hashMap')]
|
package com.xiaohai.spider.chatgpt.azure;
import cn.hutool.core.text.CharSequenceUtil;
import com.azure.ai.openai.models.ChatMessage;
import com.azure.ai.openai.models.ChatRole;
import com.azure.ai.openai.models.FunctionCall;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingRegistry;
import com.knuddels.jtokkit.api.ModelType;
import com.xiaohai.common.exception.ServiceException;
import lombok.extern.slf4j.Slf4j;
import javax.validation.constraints.NotNull;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
/**
* token计算工具类
*
* @author wangchenghai
* @date 2023/08/22 16:00:18
*/
@Slf4j
public class TikTokensUtil {
public static final String NOT_MODEL="当前模型不存在,无法计算tokens";
/**
* registry实例
*/
private static final EncodingRegistry REGISTRY = Encodings.newDefaultEncodingRegistry();
/**
* 通过模型名称计算messages获取编码数组
* 参考官方的处理逻辑:
* <a href=https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb>https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb</a>
*
* @param modelName 模型名称
* @param messages 消息体
* @return tokens数量
*/
public static int tokens(@NotNull String modelName, @NotNull List<ChatMessage> messages) throws ServiceException {
Encoding encoding = REGISTRY.getEncodingForModel(modelName).orElseThrow(() -> new ServiceException(NOT_MODEL));
int tokensPerMessage = 0;
int tokensPerName = 0;
if (modelName.equals("gpt-3.5-turbo-0613") ||
modelName.equals("gpt-3.5-turbo-16k-0613") ||
modelName.equals("gpt-4-0314") ||
modelName.equals("gpt-4-32k-0314") ||
modelName.equals("gpt-4-0613") ||
modelName.equals("gpt-4-32k-0613")
) {
tokensPerMessage = 3;
tokensPerName = 1;
} else if (modelName.equals("gpt-3.5-turbo-0301")) {
tokensPerMessage = 4;
tokensPerName = -1;
} else if (modelName.contains("gpt-3.5-turbo")) {
//"gpt-3.5-turbo" in model:
log.warn("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.");
tokensPerMessage = 3;
tokensPerName = 1;
} else if (modelName.contains("gpt-4")) {
log.warn("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.");
tokensPerMessage = 3;
tokensPerName = 1;
} else {
log.warn("不支持的model {}. See https://github.com/openai/openai-python/blob/main/chatml.md 更多信息.", modelName);
}
int sum = 0;
for (ChatMessage msg : messages) {
sum += tokensPerMessage;
sum += encoding.countTokens(msg.getContent());
sum += encoding.countTokens(msg.getRole().toString());
sum += encoding.countTokens(msg.getName());
FunctionCall functionCall = msg.getFunctionCall();
sum += Objects.isNull(functionCall) ? 0 : encoding.countTokens(functionCall.toString());
if (CharSequenceUtil.isNotBlank(msg.getName())) {
sum += tokensPerName;
}
}
//every reply is primed with <|start|>assistant<|message|>
sum += 3;
return sum;
}
public static int tokenCount(@NotNull String modelName, @NotNull String text) throws ServiceException {
Encoding encoding = REGISTRY.getEncodingForModel(modelName).orElseThrow(() -> new ServiceException(NOT_MODEL));
return encoding.countTokens(text);
}
public static void main(String[] args) throws ServiceException {
List<ChatMessage> chatMessage = new ArrayList<>();
chatMessage.add(new ChatMessage(ChatRole.USER, "请介绍一下你自己"));
System.out.println(tokens(ModelType.GPT_3_5_TURBO_16K.getName(), chatMessage));
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(new ChatMessage(ChatRole.SYSTEM, "你是一个乐于助人的助手.你会像海盗一样说话."));
chatMessages.add(new ChatMessage(ChatRole.USER, "你可以帮我吗?"));
chatMessages.add(new ChatMessage(ChatRole.ASSISTANT, "当然,亲爱的!我能为你做什么?"));
chatMessages.add(new ChatMessage(ChatRole.USER, "训练一只鹦鹉最好的方法是什么?"));
System.out.println(tokens(ModelType.GPT_3_5_TURBO_16K.getName(), chatMessages));
String text="嗯,听起来你想要训练一只鹦鹉啊!啊哈哈,这可正是我的长项之一,就像航海中的掌舵一样。让我来为你指引一下正确的航线吧!\n" +
"\n" +
"首先,要记住,训练鹦鹉需要耐心和恒心。鹦鹉是聪明的鸟类,但训练可能需要一些时间。你愿意与我一同探索吗?\n" +
"\n" +
"1. 建立良好的关系:与鹦鹉建立互信和强烈的联系非常重要。花时间与它相处,让它逐渐熟悉你的声音和存在。\n" +
"\n" +
"2. 基本指令训练:从基本指令开始,如“坐下”、“飞起来”等。使用手势和声音命令来帮助鹦鹉理解你的意图。\n" +
"\n" +
"3. 奖励系统:表扬和奖励是鹦鹉训练的关键。使用喜欢的零食或鹦鹉喜欢的食物来奖励它,以积极正面的方式加强它的行为。\n" +
"\n" +
"4. 重复训练:鹦鹉需要不断的重复和强化来巩固训练成果。每天进行几次短时间的训练会带来更好的效果。\n" +
"\n" +
"5. 社交训练:让鹦鹉与人和其他动物进行互动有助于增加它的社交能力和适应性。\n" +
"\n" +
"6. 避免惩罚:避免使用过度惩罚或伤害鹦鹉的方法,这对于建立良好的训练关系不利。\n" +
"\n" +
"记住,每只鹦鹉都有自己的个性和学习速度,所以要根据鹦鹉的需求和进展调整训练计划。希望这些建议能帮你顺利驾驭你的鹦鹉船,啊哈哈!";
int tokenCount = tokenCount(ModelType.GPT_3_5_TURBO_16K.getName(), text);
System.out.println(tokenCount);
}
}
|
[
"com.knuddels.jtokkit.api.ModelType.GPT_3_5_TURBO_16K.getName"
] |
[((4098, 4135), 'com.knuddels.jtokkit.api.ModelType.GPT_3_5_TURBO_16K.getName'), ((4666, 4703), 'com.knuddels.jtokkit.api.ModelType.GPT_3_5_TURBO_16K.getName'), ((6482, 6519), 'com.knuddels.jtokkit.api.ModelType.GPT_3_5_TURBO_16K.getName')]
|
package tw.xserver.gpt;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingType;
public class TokenCounter {
// private static final Encoding ENC =Encodings.newDefaultEncodingRegistry().getEncodingForModel(ModelType.GPT_3_5_TURBO);
private static final Encoding ENC = Encodings.newDefaultEncodingRegistry().getEncoding(EncodingType.P50K_BASE);
public static int getToken(String content) {
return ENC.encode(content).size() + 7;
}
}
|
[
"com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncoding"
] |
[((352, 426), 'com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncoding')]
|
/*
* Copyright 2023 lzhpo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.lzhpo.chatgpt.utils;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingRegistry;
import com.knuddels.jtokkit.api.ModelType;
import com.lzhpo.chatgpt.entity.chat.ChatCompletionMessage;
import com.lzhpo.chatgpt.exception.OpenAiException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.ToIntFunction;
import lombok.experimental.UtilityClass;
import lombok.extern.slf4j.Slf4j;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;
/**
* <a href="https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb">How_to_count_tokens_with_tiktoken</a>
* <pre>
* {@code
* def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301"):
* """Returns the number of tokens used by a list of messages."""
* try:
* encoding = tiktoken.encoding_for_model(model)
* except KeyError:
* print("Warning: model not found. Using cl100k_base encoding.")
* encoding = tiktoken.get_encoding("cl100k_base")
* if model == "gpt-3.5-turbo":
* print("Warning: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.")
* return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301")
* elif model == "gpt-4":
* print("Warning: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.")
* return num_tokens_from_messages(messages, model="gpt-4-0314")
* elif model == "gpt-3.5-turbo-0301":
* tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
* tokens_per_name = -1 # if there's a name, the role is omitted
* elif model == "gpt-4-0314":
* tokens_per_message = 3
* tokens_per_name = 1
* else:
* raise NotImplementedError(f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
* num_tokens = 0
* for message in messages:
* num_tokens += tokens_per_message
* for key, value in message.items():
* num_tokens += len(encoding.encode(value))
* if key == "name":
* num_tokens += tokens_per_name
* num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
* return num_tokens
* }
* </pre>
*
*
* @author lzhpo
*/
@Slf4j
@UtilityClass
public class TokenUtils {
private static final int REPLY_PRIMED_NUM = 3;
private static final EncodingRegistry REGISTRY = Encodings.newDefaultEncodingRegistry();
private static final Map<String, ToIntFunction<String>> PER_MODEL_MAP = new HashMap<>(4);
static {
PER_MODEL_MAP.put("gpt-3.5-turbo", name -> 4 + (StringUtils.hasText(name) ? -1 : 0));
PER_MODEL_MAP.put("gpt-3.5-turbo-0301", name -> 4 + (StringUtils.hasText(name) ? -1 : 0));
PER_MODEL_MAP.put("gpt-4", name -> 3 + (StringUtils.hasText(name) ? 1 : 0));
PER_MODEL_MAP.put("gpt-4-0314", name -> 3 + (StringUtils.hasText(name) ? 1 : 0));
}
/**
* Get the {@link #REGISTRY}.
*
* @return {@link EncodingRegistry}
*/
public static EncodingRegistry getRegistry() {
return REGISTRY;
}
/**
* Returns the encoding that is used for the given model type.
*
* @param modelType {@link ModelType}
* @return the encoding
*/
public static Encoding getEncoding(ModelType modelType) {
return getRegistry().getEncodingForModel(modelType);
}
/**
* Encodes the {@code content} into a list of token ids and returns the amount of tokens.
*
* @param modelType {@link ModelType}
* @param content content
* @return the tokens
*/
public static Long tokens(ModelType modelType, String content) {
Encoding encoding = getEncoding(modelType);
return (long) encoding.countTokens(content);
}
/**
* Encodes the {@code content} into a list of token ids and returns the amount of tokens.
*
* @param modelTypeName {@link ModelType} name
* @param content content
* @return the tokens
*/
public static Long tokens(String modelTypeName, String content) {
ModelType modelType = ModelType.fromName(modelTypeName)
.orElseThrow(() -> new OpenAiException("Unknown model " + modelTypeName));
return tokens(modelType, content);
}
/**
* Encodes the {@code messages} into a list of token ids and returns the amount of tokens.
*
* @param model model
* @param messages messages
* @return tokens
*/
public static Long tokens(String model, List<ChatCompletionMessage> messages) {
Assert.hasText(model, "model cannot empty.");
Assert.notEmpty(messages, "messages cannot empty.");
return REPLY_PRIMED_NUM
+ messages.stream()
.map(message -> {
String name = message.getName();
ToIntFunction<String> handler = PER_MODEL_MAP.getOrDefault(model, x -> 0);
return handler.applyAsInt(name)
+ tokens(model, name)
+ tokens(model, message.getRole())
+ tokens(model, message.getContent());
})
.mapToLong(Long::longValue)
.sum();
}
}
|
[
"com.knuddels.jtokkit.api.ModelType.fromName(modelTypeName).orElseThrow"
] |
[((5020, 5143), 'com.knuddels.jtokkit.api.ModelType.fromName(modelTypeName).orElseThrow')]
|
package com.gzhu.funai.api.openai;
import cn.hutool.http.ContentType;
import cn.hutool.json.JSONUtil;
import com.alibaba.fastjson.JSON;
import com.gzhu.funai.api.openai.constant.OpenAIConst;
import com.gzhu.funai.api.openai.enums.OpenAiRespError;
import com.gzhu.funai.api.openai.req.ChatGPTReq;
import com.gzhu.funai.api.openai.req.EmbeddingReq;
import com.gzhu.funai.api.openai.resp.BillingUsage;
import com.gzhu.funai.api.openai.resp.ChatGPTResp;
import com.gzhu.funai.api.openai.resp.EmbeddingResp;
import com.gzhu.funai.api.openai.resp.CreditGrantsResp;
import com.gzhu.funai.exception.BaseException;
import com.gzhu.funai.global.constant.GlobalConstant;
import com.gzhu.funai.utils.DateTimeFormatterUtil;
import com.gzhu.funai.utils.OkHttpClientUtil;
import com.gzhu.funai.utils.ResultCode;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingType;
import lombok.extern.slf4j.Slf4j;
import okhttp3.*;
import okhttp3.sse.EventSource;
import okhttp3.sse.EventSourceListener;
import okhttp3.sse.EventSources;
import java.io.IOException;
import java.math.BigDecimal;
import java.math.RoundingMode;
import java.time.Instant;
import java.time.LocalDate;
import java.time.ZoneId;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* @Author: huangpenglong
* @Date: 2023/3/9 10:57
*/
@Slf4j
public class ChatGPTApi{
private static final String AUTHORIZATION_STR = "Authorization";
private static Encoding enc;
static {
enc = Encodings.newDefaultEncodingRegistry().getEncoding(EncodingType.CL100K_BASE);
}
/**
* 一次对话
* @param gpt
* @param apiKey
* @return
*/
public static ChatGPTResp oneShotReq(ChatGPTReq gpt, String apiKey){
return sessionReq(gpt, apiKey);
}
/**
* 带上下文的对话
* Ps:之前使用hutool的HttpRequest写请求,但遇到了handshake_failure 错误。目前换成了OKHttp
* @param gpt
* @param apiKey
* @return
*/
public static ChatGPTResp sessionReq(ChatGPTReq gpt, String apiKey) {
Request request = new Request.Builder()
.url(OpenAIConst.HOST + OpenAIConst.CHATGPT_MAPPING)
.post(RequestBody.create(MediaType.parse(ContentType.JSON.getValue()), JSONUtil.parseObj(gpt).toString()))
.header(AUTHORIZATION_STR, "Bearer " + apiKey)
.build();
Response response = null;
try {
response = OkHttpClientUtil.getClient().newCall(request).execute();
if(!response.isSuccessful()){
OpenAiRespError openAiRespError = OpenAiRespError.get(response.code());
log.error("请求ChatGPT异常! {}", openAiRespError.msg);
throw new BaseException(openAiRespError.msg);
}
String body = response.body().string();
return JSONUtil.toBean(body, ChatGPTResp.class);
}
catch (IOException e) {
log.error("okHttpClient异常! {}", e.getMessage());
}
finally {
if(response != null){
response.close();
}
}
return null;
}
/**
* 查询apiKey的余额
* Ps:之前使用hutool的HttpRequest写请求,但遇到了handshake_failure 错误。目前换成了OKHttp
* @param apiKey
* @return
*/
public static CreditGrantsResp creditGrants(String apiKey){
Request request = new Request.Builder()
.url(OpenAIConst.HOST + OpenAIConst.CREDIT_GRANTS_MAPPING)
.get()
.header(AUTHORIZATION_STR, "Bearer " + apiKey)
.build();
Response response = null;
try {
response = OkHttpClientUtil.getClient().newCall(request).execute();
if(!response.isSuccessful()){
OpenAiRespError openAiRespError = OpenAiRespError.get(response.code());
log.error("请求ChatGPT异常! {}", openAiRespError.msg);
throw new BaseException(openAiRespError.msg);
}
String body = response.body().string();
log.info("{}调用查询余额请求,返回值:{}",apiKey, body);
return JSONUtil.toBean(body, CreditGrantsResp.class);
}
catch (IOException e) {
log.error("okHttpClient异常! {}", e.getMessage());
}
finally {
if(response != null){
response.close();
}
}
return null;
}
/**
* 以流式输出的方式进行多轮对话
* @param chatGPTReq
* @param apiKey
* @param eventSourceListener
*/
public static void streamSessionReq(ChatGPTReq chatGPTReq, String apiKey, EventSourceListener eventSourceListener){
if (Objects.isNull(eventSourceListener)) {
log.error("参数异常:EventSourceListener不能为空");
throw new BaseException(ResultCode.EMPTY_PARAM.msg);
}
try {
EventSource.Factory factory = EventSources.createFactory(OkHttpClientUtil.getClient());
String requestBody = JSONUtil.parseObj(chatGPTReq).toString();
Request request = new Request.Builder()
.url(OpenAIConst.HOST + OpenAIConst.CHATGPT_MAPPING)
.post(RequestBody.create(MediaType.parse(ContentType.JSON.getValue()), requestBody))
.header(AUTHORIZATION_STR, "Bearer " + apiKey)
.build();
// 绑定请求 和 事件监听器
factory.newEventSource(request, eventSourceListener);
}
catch (Exception e) {
log.error("请求参数解析异常:{}", e);
}
}
/**
* 文本编码
* @param input
* @param apiKey
* @return
*/
public static EmbeddingResp embeddings(List<String> input, String apiKey){
EmbeddingReq embeddingReq = EmbeddingReq.builder().input(input).build();
Request request = new Request.Builder()
.url(OpenAIConst.HOST + OpenAIConst.EMBEDDING_MAPPING)
.post(RequestBody.create(MediaType.parse(ContentType.JSON.getValue()), JSONUtil.parseObj(embeddingReq).toString()))
.header(AUTHORIZATION_STR, "Bearer " + apiKey)
.build();
Response response = null;
try {
response = OkHttpClientUtil.getClient().newCall(request).execute();
if(!response.isSuccessful()){
OpenAiRespError openAiRespError = OpenAiRespError.get(response.code());
log.error("Embedding异常! {}", openAiRespError.msg);
throw new BaseException(openAiRespError.msg);
}
String body = response.body().string();
return JSONUtil.toBean(body, EmbeddingResp.class);
}
catch (IOException e) {
log.error("okHttpClient异常! {}", e.getMessage());
}
finally {
if(response != null){
response.close();
}
}
return null;
}
/**
* 估计字符串占多少个token
* @param message
* @return
*/
public static int getTokenNum(String message){
return enc.encode(message).size();
}
/**
* 估计一轮上下文对话占多少个token
* @param message
* @return
*/
public static int getMessageTokenNum(String message){
return enc.encode("role: {user}, message: {" + message + "}").size();
}
/**
* 获取apiKey的额度信息
* @param apiKey
* @return
*/
public static BillingUsage getBillingUsage(String apiKey){
Response subResponse = null;
Response usageResponse = null;
try {
subResponse = OkHttpClientUtil.getClient()
.newCall(
new Request.Builder()
.url(OpenAIConst.HOST + OpenAIConst.SUBSCRIPTION_MAPPING)
.get()
.header(AUTHORIZATION_STR, "Bearer " + apiKey)
.build())
.execute();
// openai请求错误
if(!subResponse.isSuccessful()){
OpenAiRespError openAiRespError = OpenAiRespError.get(subResponse.code());
log.error("请求ChatGPT异常! {}", openAiRespError.msg);
throw new BaseException(openAiRespError.msg);
}
// 判断账号是否过期
Map subMap = JSON.parseObject(subResponse.body().string(), Map.class);
long accessUntil = Long.parseLong(String.valueOf(subMap.get("access_until")));
if(accessUntil * GlobalConstant.TEN_K < System.currentTimeMillis()){
log.warn("检查到apiKey:{}过期,过期时间{}", apiKey,
Instant.ofEpochMilli(accessUntil * GlobalConstant.TEN_K).atZone(ZoneId.systemDefault()).toLocalDate());
// 不抛异常,因为特殊的apiKey过期了还能使用
// throw new BaseException(OpenAiRespError.OPENAI_APIKEY_EXPIRED.code, OpenAiRespError.OPENAI_APIKEY_EXPIRED.msg);
}
// 获取总额度
BigDecimal totalAmount = BigDecimal.valueOf(Double.parseDouble(String.valueOf(subMap.get("hard_limit_usd"))));
// 获取已使用额度 (滑动日期窗口获取,因为该死的openai一次只能拿100天的使用额度)
BigDecimal totalUsage = new BigDecimal(0);
LocalDate startDate = LocalDate.now().minusDays(95);
LocalDate endDate = LocalDate.now().plusDays(1);
while(true){
// 查询日期范围内的使用额度
String usageUrl = OpenAIConst.HOST + String.format(
OpenAIConst.USAGE_MAPPING,
DateTimeFormatterUtil.DFT.format(startDate),
DateTimeFormatterUtil.DFT.format(endDate));
usageResponse = OkHttpClientUtil.getClient()
.newCall(new Request.Builder()
.url(usageUrl)
.get()
.header(AUTHORIZATION_STR, "Bearer " + apiKey)
.build())
.execute();
Map usageMap = JSON.parseObject(usageResponse.body().string(), Map.class);
BigDecimal curUsage = BigDecimal.valueOf(Double.parseDouble(String.valueOf(usageMap.get("total_usage"))));
// 当在某次范围内查出的使用额度为0,说明此前长时间没使用过
if(curUsage.compareTo(BigDecimal.ZERO) <= 0){
break;
}
// 累加使用额度
totalUsage = totalUsage.add(curUsage);
// 统计日期窗口向前滑动
endDate = startDate;
startDate = endDate.minusDays(95);
}
return new BillingUsage(
totalAmount,
totalUsage.divide(new BigDecimal("100"), 2, RoundingMode.HALF_UP),
Instant.ofEpochMilli(accessUntil * GlobalConstant.TEN_K).atZone(ZoneId.systemDefault()).toLocalDate());
}
catch (IOException e) {
log.error("okHttpClient异常! {}", e.getMessage());
}
finally {
if(subResponse != null){
subResponse.close();
}
if(usageResponse != null){
usageResponse.close();
}
}
return null;
}
private ChatGPTApi(){}
}
|
[
"com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncoding"
] |
[((1543, 1619), 'com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncoding'), ((2303, 2330), 'cn.hutool.http.ContentType.JSON.getValue'), ((2333, 2366), 'cn.hutool.json.JSONUtil.parseObj(gpt).toString'), ((2529, 2584), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient().newCall(request).execute'), ((2529, 2574), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient().newCall'), ((3809, 3864), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient().newCall(request).execute'), ((3809, 3854), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient().newCall'), ((5218, 5258), 'cn.hutool.json.JSONUtil.parseObj(chatGPTReq).toString'), ((5446, 5473), 'cn.hutool.http.ContentType.JSON.getValue'), ((6027, 6070), 'com.gzhu.funai.api.openai.req.EmbeddingReq.builder().input(input).build'), ((6027, 6062), 'com.gzhu.funai.api.openai.req.EmbeddingReq.builder().input'), ((6248, 6275), 'cn.hutool.http.ContentType.JSON.getValue'), ((6278, 6320), 'cn.hutool.json.JSONUtil.parseObj(embeddingReq).toString'), ((6483, 6538), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient().newCall(request).execute'), ((6483, 6528), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient().newCall'), ((7899, 8252), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient()\n .newCall(\n new Request.Builder()\n .url(OpenAIConst.HOST + OpenAIConst.SUBSCRIPTION_MAPPING)\n .get()\n .header(AUTHORIZATION_STR, "Bearer " + apiKey)\n .build()).execute'), ((7899, 8221), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient().newCall'), ((8976, 9077), 'java.time.Instant.ofEpochMilli(accessUntil * GlobalConstant.TEN_K).atZone(ZoneId.systemDefault()).toLocalDate'), ((8976, 9063), 'java.time.Instant.ofEpochMilli(accessUntil * GlobalConstant.TEN_K).atZone'), ((9673, 9702), 'java.time.LocalDate.now().minusDays'), ((9736, 9763), 'java.time.LocalDate.now().plusDays'), ((9989, 10032), 'com.gzhu.funai.utils.DateTimeFormatterUtil.DFT.format'), ((10058, 10099), 'com.gzhu.funai.utils.DateTimeFormatterUtil.DFT.format'), ((10134, 10459), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient()\n .newCall(new Request.Builder()\n .url(usageUrl)\n .get()\n .header(AUTHORIZATION_STR, "Bearer " + apiKey)\n .build()).execute'), ((10134, 10424), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient().newCall'), ((11310, 11411), 'java.time.Instant.ofEpochMilli(accessUntil * GlobalConstant.TEN_K).atZone(ZoneId.systemDefault()).toLocalDate'), ((11310, 11397), 'java.time.Instant.ofEpochMilli(accessUntil * GlobalConstant.TEN_K).atZone')]
|
/*
*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
package com.meta.cp4m.llm;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.meta.cp4m.message.Message;
import com.meta.cp4m.message.Message.Role;
import com.meta.cp4m.message.ThreadState;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.time.Instant;
import java.util.Optional;
import org.apache.hc.client5.http.fluent.Request;
import org.apache.hc.client5.http.fluent.Response;
import org.apache.hc.core5.http.ContentType;
import org.checkerframework.checker.nullness.qual.Nullable;
import org.checkerframework.common.returnsreceiver.qual.This;
import org.jetbrains.annotations.TestOnly;
public class OpenAIPlugin<T extends Message> implements LLMPlugin<T> {
private static final ObjectMapper MAPPER = new ObjectMapper();
private static final String ENDPOINT = "https://api.openai.com/v1/chat/completions";
private final OpenAIConfig config;
private final Encoding tokenEncoding;
private final int tokensPerMessage;
private final int tokensPerName;
private URI endpoint;
public OpenAIPlugin(OpenAIConfig config) {
this.config = config;
try {
this.endpoint = new URI(ENDPOINT);
} catch (URISyntaxException e) {
throw new RuntimeException(e); // this should be impossible
}
tokenEncoding =
Encodings.newDefaultEncodingRegistry()
.getEncodingForModel(config.model().properties().jtokkinModel());
switch (config.model()) {
case GPT4, GPT432K -> {
tokensPerMessage = 3;
tokensPerName = 1;
}
case GPT35TURBO, GPT35TURBO16K -> {
tokensPerMessage = 4; // every message follows <|start|>{role/name}\n{content}<|end|>\n
tokensPerName = -1; // if there's a name, the role is omitted
}
default -> throw new IllegalArgumentException("Unsupported model: " + config.model());
}
}
@TestOnly
public @This OpenAIPlugin<T> endpoint(URI endpoint) {
this.endpoint = endpoint;
return this;
}
private int tokenCount(JsonNode message) {
int tokenCount = tokensPerMessage;
tokenCount += tokenEncoding.countTokens(message.get("content").textValue());
tokenCount += tokenEncoding.countTokens(message.get("role").textValue());
@Nullable JsonNode name = message.get("name");
if (name != null) {
tokenCount += tokenEncoding.countTokens(name.textValue());
tokenCount += tokensPerName;
}
return tokenCount;
}
private Optional<ArrayNode> pruneMessages(ArrayNode messages, @Nullable JsonNode functions)
throws JsonProcessingException {
int functionTokens = 0;
if (functions != null) {
// This is honestly a guess, it's undocumented
functionTokens = tokenEncoding.countTokens(MAPPER.writeValueAsString(functions));
}
ArrayNode output = MAPPER.createArrayNode();
int totalTokens = functionTokens;
totalTokens += 3; // every reply is primed with <|start|>assistant<|message|>
JsonNode systemMessage = messages.get(0);
boolean hasSystemMessage = systemMessage.get("role").textValue().equals("system");
if (hasSystemMessage) {
// if the system message is present it's required
totalTokens += tokenCount(messages.get(0));
}
for (int i = messages.size() - 1; i >= 0; i--) {
JsonNode m = messages.get(i);
String role = m.get("role").textValue();
if (role.equals("system")) {
continue; // system has already been counted
}
totalTokens += tokenCount(m);
if (totalTokens > config.maxInputTokens()) {
break;
}
output.insert(0, m);
}
if (hasSystemMessage) {
output.insert(0, systemMessage);
}
if ((hasSystemMessage && output.size() <= 1) || output.isEmpty()) {
return Optional.empty();
}
return Optional.of(output);
}
@Override
public T handle(ThreadState<T> threadState) throws IOException {
T fromUser = threadState.tail();
ObjectNode body = MAPPER.createObjectNode();
body.put("model", config.model().properties().name())
// .put("function_call", "auto") // Update when we support functions
.put("n", 1)
.put("stream", false)
.put("user", fromUser.senderId().toString());
config.topP().ifPresent(v -> body.put("top_p", v));
config.temperature().ifPresent(v -> body.put("temperature", v));
config.maxOutputTokens().ifPresent(v -> body.put("max_tokens", v));
config.presencePenalty().ifPresent(v -> body.put("presence_penalty", v));
config.frequencyPenalty().ifPresent(v -> body.put("frequency_penalty", v));
if (!config.logitBias().isEmpty()) {
body.set("logit_bias", MAPPER.valueToTree(config.logitBias()));
}
if (!config.stop().isEmpty()) {
body.set("stop", MAPPER.valueToTree(config.stop()));
}
ArrayNode messages = MAPPER.createArrayNode();
messages
.addObject()
.put("role", Role.SYSTEM.toString().toLowerCase())
.put("content", config.systemMessage());
for (T message : threadState.messages()) {
messages
.addObject()
.put("role", message.role().toString().toLowerCase())
.put("content", message.message());
}
Optional<ArrayNode> prunedMessages = pruneMessages(messages, null);
if (prunedMessages.isEmpty()) {
return threadState.newMessageFromBot(
Instant.now(), "I'm sorry but that request was too long for me.");
}
body.set("messages", prunedMessages.get());
String bodyString;
try {
bodyString = MAPPER.writeValueAsString(body);
} catch (JsonProcessingException e) {
throw new RuntimeException(e); // this should be impossible
}
Response response =
Request.post(endpoint)
.bodyString(bodyString, ContentType.APPLICATION_JSON)
.setHeader("Authorization", "Bearer " + config.apiKey())
.execute();
JsonNode responseBody = MAPPER.readTree(response.returnContent().asBytes());
Instant timestamp = Instant.ofEpochSecond(responseBody.get("created").longValue());
JsonNode choice = responseBody.get("choices").get(0);
String messageContent = choice.get("message").get("content").textValue();
return threadState.newMessageFromBot(timestamp, messageContent);
}
}
|
[
"com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncodingForModel"
] |
[((1803, 1918), 'com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncodingForModel'), ((5402, 5438), 'com.meta.cp4m.message.Message.Role.SYSTEM.toString().toLowerCase'), ((5402, 5424), 'com.meta.cp4m.message.Message.Role.SYSTEM.toString'), ((6206, 6386), 'org.apache.hc.client5.http.fluent.Request.post(endpoint)\n .bodyString(bodyString, ContentType.APPLICATION_JSON)\n .setHeader("Authorization", "Bearer " + config.apiKey()).execute'), ((6206, 6363), 'org.apache.hc.client5.http.fluent.Request.post(endpoint)\n .bodyString(bodyString, ContentType.APPLICATION_JSON).setHeader'), ((6206, 6294), 'org.apache.hc.client5.http.fluent.Request.post(endpoint).bodyString')]
|
package com.hty.utils.ai;
import com.alibaba.fastjson.JSON;
import com.hty.config.OpenAIConfig;
import com.hty.constant.ChatModel;
import com.hty.constant.RequestURL;
import com.hty.dao.ai.OpenaiChatModelMapper;
import com.hty.entity.ai.ChatRequestParam;
import com.hty.entity.ai.Usage;
import com.hty.entity.pojo.OpenaiChatModel;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingRegistry;
import com.knuddels.jtokkit.api.EncodingType;
import com.knuddels.jtokkit.api.ModelType;
import lombok.extern.slf4j.Slf4j;
import okhttp3.*;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.stereotype.Component;
import javax.annotation.Resource;
import java.io.IOException;
import java.util.*;
/**
* @author hty
* @date 2023-12-07 13:07
* @email [email protected]
* @description
*/
@Slf4j
@Component
public class ChatUtil {
@Resource
private OkHttpClient okHttpClient;
@Resource
private OpenAIConfig openAIConfig;
@Resource
private EncodingRegistry registry;
@Resource
private OpenaiChatModelMapper openaiChatModelMapper;
@Resource(name = "stringRedisTemplate")
private StringRedisTemplate stringRedisTemplate;
/***
* 一个最普通的非流式请求接口,提交问题并返回结果
* @param question
* @return
*/
public String chat(String question,String model){
ChatRequestParam requestParam = new ChatRequestParam();
LinkedList<Map<String,String>> messages = new LinkedList<>();
Map<String,String> map = new HashMap<>();
map.put("role","user");
map.put("content",question);
messages.add(map);
requestParam.setMessages(messages);
requestParam.setModel(model);
String answer = "";
try (Response response = chat(requestParam);
ResponseBody responseBody = response.body();){
if(responseBody != null){
answer = responseBody.string();
}
} catch (IOException e) {
log.info("请求出错 => {}",e.getMessage());
}
return answer;
}
/***
* 问答接口
* @param requestParam
* @return
*/
public Response chat(ChatRequestParam requestParam){
if(!checkTokenCount(requestParam)){
log.info("消息长度过长,请重新提问");
return null;
}
//构造请求的JSON字符串
String requestJson = constructRequestJson(requestParam);
//构造请求体
RequestBody body = RequestBody.create(MediaType.parse("application/json"), requestJson);
//构造请求
Request request = new Request.Builder()
.url(RequestURL.RENDER_PROXY_CHAT_URL)
.method("POST", body)
.addHeader("Content-Type", "application/json")
.addHeader("Authorization", "Bearer "+openAIConfig.apiKey)
.build();
try {
Response response = okHttpClient.newCall(request).execute();
// 检查响应是否成功
if (response.isSuccessful()) {
return response;
} else {
log.error("使用OkHttp访问ChatGPT请求成功但是响应不成功,响应结果:{}",response);
}
} catch (IOException e) {
log.error("流式请求出错 => {}",e.getMessage());
throw new RuntimeException(e);
}
return null;
}
/***
* 判断请求的token是否大于模型的token,超过长度的话就将message进行弹出
* @param requestParam
* @return
*/
private Boolean checkTokenCount(ChatRequestParam requestParam){
Usage usage = computePromptToken(requestParam, null);
OpenaiChatModel openaiChatModel = openaiChatModelMapper.selectModelByName(requestParam.getModel());
while(openaiChatModel.getMaxTokens() < usage.getPromptTokens()){
//因为会有system输入,所以当只剩下一条消息的时候说明已经没有了上下文
if(requestParam.getMessages().size() == 1) return false;
//删除2遍的意思就是删除输入的同时也要删除输出
requestParam.getMessages().remove(1);
requestParam.getMessages().remove(1);
//重新计算token消耗
usage = computePromptToken(requestParam, null);
}
return true;
}
/**
* 构造请求的请求参数
*/
private String constructRequestJson(ChatRequestParam requestParam) {
Map<String,Object> request = new HashMap<>();
//对于必须的字段需要增加非空判断
if(requestParam.getModel() == null || requestParam.getMessages() == null){
log.error("请求缺少参数,model => {},messages => {}",requestParam.getModel(),requestParam.getMessages());
throw new RuntimeException("请求缺少参数");
}
request.put("model",requestParam.getModel());
request.put("messages",requestParam.getMessages());
if(requestParam.getTemperature() != null){
request.put("temperature",requestParam.getTemperature());
} else if(requestParam.getTopP() != null){
request.put("top_p",requestParam.getTopP());
}
if(requestParam.getN() != null) request.put("n",requestParam.getN());
if(requestParam.getStream() != null) request.put("stream",requestParam.getStream());
if(requestParam.getStop() != null) request.put("stop",requestParam.getStop());
if(requestParam.getMaxTokens() != null) request.put("max_tokens",requestParam.getMaxTokens());
if(requestParam.getPresencePenalty() != null) request.put("presence_penalty",requestParam.getPresencePenalty());
if(requestParam.getFrequencyPenalty() != null) request.put("frequency_penalty",requestParam.getFrequencyPenalty());
if(requestParam.getLogitBias() != null) request.put("logit_bias",requestParam.getLogitBias());
if(requestParam.getUser() != null) request.put("user",requestParam.getUser());
log.info("构造的请求JSON => {}", JSON.toJSONString(request));
return JSON.toJSONString(request);
}
/***
* 向消息列表中添加用户的问题
* @param question
* @param messages
*/
public void addUserQuestion(String question,LinkedList<Map<String, String>> messages){
Map<String,String> map = new HashMap<>();
map.put("role","user");
map.put("content",question);
messages.addLast(map);
}
/***
* 向消息列表中添加AI的回复
* @param content
* @param messages
*/
public void addAssistantQuestion(String content,LinkedList<Map<String, String>> messages){
Map<String,String> map = new HashMap<>();
map.put("role","assistant");
map.put("content",content);
messages.addLast(map);
//如果超过了4对问答就将最前面的全部删除
int n = messages.size() - 8;
Iterator<Map<String, String>> iterator = messages.iterator();
if(messages.getFirst().get("role").equals("system")){
iterator.next();
n -= 1;
}
while(n -- > 0){
iterator.remove();
}
}
/***
* 使用jtokkit库来计算token消耗
* @param requestParam
* @param answer
* @return 返回的是一个usage对象
*
* 当前统计方式中,每次都会比真实略微高一点,这样可以从中去赚取差价
*/
public Usage computePromptToken(ChatRequestParam requestParam, String answer){
//获取模型对应的编码方式
Encoding encoding = registry.getEncodingForModel(ModelType.fromName(requestParam.getModel()).get());
//拼接输入,方式:将所有角色部分,content部分,model部分放入一个字符串中
StringBuilder content = new StringBuilder();
for (Map<String, String> message : requestParam.getMessages()) {
content.append("role").append(message.get("role"));
content.append(" ");
content.append("content").append(message.get("content"));
content.append(" ");
}
content.append("model").append(requestParam.getModel());
//使用获得到的编码方式计算token数量并设置为usage对象
Usage usage = new Usage();
usage.setPromptTokens(encoding.countTokens(content.toString()));
usage.setCompletionTokens(encoding.countTokens(answer));
usage.countTotalTokens();
return usage;
}
/***
* 根据content计算token消耗
* @param content
* @return
*/
public Integer computeToken(String content,String model){
//获取模型对应的编码方式
Encoding encoding = registry.getEncodingForModel(ModelType.fromName(model).get());
return encoding.countTokens(content);
}
/***
* 从数据库中加载所有没有过期的模型到redis中
* TODO:需要加锁
*/
public void loadModelFromDatabase2Redis(){
//获取所有的聊天的model
stringRedisTemplate.delete("chatModelSet");
List<OpenaiChatModel> openaiChatModelList = openaiChatModelMapper.selectAllModel();
for (OpenaiChatModel model : openaiChatModelList) {
//使用set存储,方便进行containsKey操作
stringRedisTemplate.opsForHash().put("chatModelSet",model.getName(),JSON.toJSONString(model));
}
//TODO:获取image生成、音频等的model
}
/***
*
* @return 聊天模型列表
*/
public List<OpenaiChatModel> getAllChatModel(){
List<OpenaiChatModel> modelList = new ArrayList<>();
//从redis中获取模型列表
List<Object> chatModelList = stringRedisTemplate.opsForHash().values("chatModelSet");
//为空就重新从数据库中加载
if (chatModelList.size() == 0){
loadModelFromDatabase2Redis();
}
for (Object modelJSON : chatModelList) {
modelList.add(JSON.parseObject(modelJSON.toString(),OpenaiChatModel.class));
}
return modelList;
}
}
|
[
"com.knuddels.jtokkit.api.ModelType.fromName(model).get",
"com.knuddels.jtokkit.api.ModelType.fromName(requestParam.getModel()).get"
] |
[((7792, 7841), 'com.knuddels.jtokkit.api.ModelType.fromName(requestParam.getModel()).get'), ((8919, 8950), 'com.knuddels.jtokkit.api.ModelType.fromName(model).get')]
|
package com.valkryst.Valerie.gpt;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.ModelType;
import lombok.Getter;
import lombok.NonNull;
public enum ChatGptModels {
GPT_3_5_TURBO("gpt-3.5-turbo", 4096, ModelType.GPT_3_5_TURBO),
GPT_4("gpt-4", 8192, ModelType.GPT_4),
GPT_4_32K("gpt-4-32k", 32768, ModelType.GPT_4);
/** The name of the model. */
@Getter private final String name;
/** The maximum number of tokens that can be generated. */
@Getter private final int maxTokens;
/** The Tiktoken encoding to use when tokenizing text for the model. */
@Getter private final Encoding encoding;
/**
* Constructs a new {@code ChatGptModel}.
*
* @param name The name of the model.
* @param maxTokens The maximum number of tokens that can be generated.
* @param modelType The Tiktoken model type.
*/
ChatGptModels(final @NonNull String name, final int maxTokens, final ModelType modelType) {
this.name = name;
this.maxTokens = maxTokens;
encoding = Encodings.newDefaultEncodingRegistry().getEncodingForModel(modelType);
}
}
|
[
"com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncodingForModel"
] |
[((1111, 1180), 'com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncodingForModel')]
|
/*
*
* Apache License
* Version 2.0, January 2004
* https://www.apache.org/licenses/
*
* TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
*
* 1. Definitions.
*
* "License" shall mean the terms and conditions for use, reproduction,
* and distribution as defined by Sections 1 through 9 of this document.
*
* "Licensor" shall mean the copyright owner or entity authorized by
* the copyright owner that is granting the License.
*
* "Legal Entity" shall mean the union of the acting entity and all
* other entities that control, are controlled by, or are under common
* control with that entity. For the purposes of this definition,
* "control" means (i) the power, direct or indirect, to cause the
* direction or management of such entity, whether by contract or
* otherwise, or (ii) ownership of fifty percent (50%) or more of the
* outstanding shares, or (iii) beneficial ownership of such entity.
*
* "You" (or "Your") shall mean an individual or Legal Entity
* exercising permissions granted by this License.
*
* "Source" form shall mean the preferred form for making modifications,
* including but not limited to software source code, documentation
* source, and configuration files.
*
* "Object" form shall mean any form resulting from mechanical
* transformation or translation of a Source form, including but
* not limited to compiled object code, generated documentation,
* and conversions to other media types.
*
* "Work" shall mean the work of authorship, whether in Source or
* Object form, made available under the License, as indicated by a
* copyright notice that is included in or attached to the work
* (an example is provided in the Appendix below).
*
* "Derivative Works" shall mean any work, whether in Source or Object
* form, that is based on (or derived from) the Work and for which the
* editorial revisions, annotations, elaborations, or other modifications
* represent, as a whole, an original work of authorship. For the purposes
* of this License, Derivative Works shall not include works that remain
* separable from, or merely link (or bind by name) to the interfaces of,
* the Work and Derivative Works thereof.
*
* "Contribution" shall mean any work of authorship, including
* the original version of the Work and any modifications or additions
* to that Work or Derivative Works thereof, that is intentionally
* submitted to Licensor for inclusion in the Work by the copyright owner
* or by an individual or Legal Entity authorized to submit on behalf of
* the copyright owner. For the purposes of this definition, "submitted"
* means any form of electronic, verbal, or written communication sent
* to the Licensor or its representatives, including but not limited to
* communication on electronic mailing lists, source code control systems,
* and issue tracking systems that are managed by, or on behalf of, the
* Licensor for the purpose of discussing and improving the Work, but
* excluding communication that is conspicuously marked or otherwise
* designated in writing by the copyright owner as "Not a Contribution."
*
* "Contributor" shall mean Licensor and any individual or Legal Entity
* on behalf of whom a Contribution has been received by Licensor and
* subsequently incorporated within the Work.
*
* 2. Grant of Copyright License. Subject to the terms and conditions of
* this License, each Contributor hereby grants to You a perpetual,
* worldwide, non-exclusive, no-charge, royalty-free, irrevocable
* copyright license to reproduce, prepare Derivative Works of,
* publicly display, publicly perform, sublicense, and distribute the
* Work and such Derivative Works in Source or Object form.
*
* 3. Grant of Patent License. Subject to the terms and conditions of
* this License, each Contributor hereby grants to You a perpetual,
* worldwide, non-exclusive, no-charge, royalty-free, irrevocable
* (except as stated in this section) patent license to make, have made,
* use, offer to sell, sell, import, and otherwise transfer the Work,
* where such license applies only to those patent claims licensable
* by such Contributor that are necessarily infringed by their
* Contribution(s) alone or by combination of their Contribution(s)
* with the Work to which such Contribution(s) was submitted. If You
* institute patent litigation against any entity (including a
* cross-claim or counterclaim in a lawsuit) alleging that the Work
* or a Contribution incorporated within the Work constitutes direct
* or contributory patent infringement, then any patent licenses
* granted to You under this License for that Work shall terminate
* as of the date such litigation is filed.
*
* 4. Redistribution. You may reproduce and distribute copies of the
* Work or Derivative Works thereof in any medium, with or without
* modifications, and in Source or Object form, provided that You
* meet the following conditions:
*
* (a) You must give any other recipients of the Work or
* Derivative Works a copy of this License; and
*
* (b) You must cause any modified files to carry prominent notices
* stating that You changed the files; and
*
* (c) You must retain, in the Source form of any Derivative Works
* that You distribute, all copyright, patent, trademark, and
* attribution notices from the Source form of the Work,
* excluding those notices that do not pertain to any part of
* the Derivative Works; and
*
* (d) If the Work includes a "NOTICE" text file as part of its
* distribution, then any Derivative Works that You distribute must
* include a readable copy of the attribution notices contained
* within such NOTICE file, excluding those notices that do not
* pertain to any part of the Derivative Works, in at least one
* of the following places: within a NOTICE text file distributed
* as part of the Derivative Works; within the Source form or
* documentation, if provided along with the Derivative Works; or,
* within a display generated by the Derivative Works, if and
* wherever such third-party notices normally appear. The contents
* of the NOTICE file are for informational purposes only and
* do not modify the License. You may add Your own attribution
* notices within Derivative Works that You distribute, alongside
* or as an addendum to the NOTICE text from the Work, provided
* that such additional attribution notices cannot be construed
* as modifying the License.
*
* You may add Your own copyright statement to Your modifications and
* may provide additional or different license terms and conditions
* for use, reproduction, or distribution of Your modifications, or
* for any such Derivative Works as a whole, provided Your use,
* reproduction, and distribution of the Work otherwise complies with
* the conditions stated in this License.
*
* 5. Submission of Contributions. Unless You explicitly state otherwise,
* any Contribution intentionally submitted for inclusion in the Work
* by You to the Licensor shall be under the terms and conditions of
* this License, without any additional terms or conditions.
* Notwithstanding the above, nothing herein shall supersede or modify
* the terms of any separate license agreement you may have executed
* with Licensor regarding such Contributions.
*
* 6. Trademarks. This License does not grant permission to use the trade
* names, trademarks, service marks, or product names of the Licensor,
* except as required for reasonable and customary use in describing the
* origin of the Work and reproducing the content of the NOTICE file.
*
* 7. Disclaimer of Warranty. Unless required by applicable law or
* agreed to in writing, Licensor provides the Work (and each
* Contributor provides its Contributions) on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied, including, without limitation, any warranties or conditions
* of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
* PARTICULAR PURPOSE. You are solely responsible for determining the
* appropriateness of using or redistributing the Work and assume any
* risks associated with Your exercise of permissions under this License.
*
* 8. Limitation of Liability. In no event and under no legal theory,
* whether in tort (including negligence), contract, or otherwise,
* unless required by applicable law (such as deliberate and grossly
* negligent acts) or agreed to in writing, shall any Contributor be
* liable to You for damages, including any direct, indirect, special,
* incidental, or consequential damages of any character arising as a
* result of this License or out of the use or inability to use the
* Work (including but not limited to damages for loss of goodwill,
* work stoppage, computer failure or malfunction, or any and all
* other commercial damages or losses), even if such Contributor
* has been advised of the possibility of such damages.
*
* 9. Accepting Warranty or Additional Liability. While redistributing
* the Work or Derivative Works thereof, You may choose to offer,
* and charge a fee for, acceptance of support, warranty, indemnity,
* or other liability obligations and/or rights consistent with this
* License. However, in accepting such obligations, You may act only
* on Your own behalf and on Your sole responsibility, not on behalf
* of any other Contributor, and only if You agree to indemnify,
* defend, and hold each Contributor harmless for any liability
* incurred by, or claims asserted against, such Contributor by reason
* of your accepting any such warranty or additional liability.
*
* END OF TERMS AND CONDITIONS
*
* APPENDIX: How to apply the Apache License to your work.
*
* To apply the Apache License to your work, attach the following
* boilerplate notice, with the fields enclosed by brackets "{}"
* replaced with your own identifying information. (Don't include
* the brackets!) The text should be enclosed in the appropriate
* comment syntax for the file format. We also recommend that a
* file or class name and description of purpose be included on the
* same "printed page" as the copyright notice for easier
* identification within third-party archives.
*
* Copyright 2024 onsamepage.ai
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.samepage.common.core.helper;
import cn.hutool.core.util.StrUtil;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingRegistry;
import com.knuddels.jtokkit.api.EncodingType;
import com.knuddels.jtokkit.api.ModelType;
import lombok.experimental.UtilityClass;
import java.util.Optional;
/**
* 封装 JTokkit 方便计算字符串 tokens
*/
@UtilityClass
public class JTokkitHelper {
private final EncodingRegistry registry = Encodings.newDefaultEncodingRegistry();
/**
* 计算 token
*
* @param text text
* @param modelType modelType
* @return tokens
*/
public int countTokens(String text, String modelType) {
Optional<ModelType> optional = ModelType.fromName(modelType);
Optional<Encoding> encodingOpt;
if (optional.isPresent()) {
encodingOpt = registry.getEncodingForModel(modelType);
} else {
encodingOpt = registry.getEncoding(EncodingType.CL100K_BASE.getName());
}
if (!encodingOpt.isPresent()) {
throw new IllegalArgumentException(StrUtil.format("模型{}不存在", modelType));
}
Encoding enc = encodingOpt.get();
if (text.contains("<|endoftext|>")) {
return enc.encodeOrdinary(text).size();
}
return enc.encode(text).size();
}
}
|
[
"com.knuddels.jtokkit.api.EncodingType.CL100K_BASE.getName"
] |
[((12944, 12978), 'com.knuddels.jtokkit.api.EncodingType.CL100K_BASE.getName')]
|
/**
* Copyright 2021 Rochester Institute of Technology (RIT). Developed with
* government support under contract 70RCSA22C00000008 awarded by the United
* States Department of Homeland Security for Cybersecurity and Infrastructure Security Agency.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the “Software”), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package edu.rit.se.nvip.reconciler.openai;
import com.google.common.util.concurrent.RateLimiter;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.ModelType;
import com.theokanning.openai.OpenAiHttpException;
import com.theokanning.openai.completion.chat.ChatCompletionRequest;
import com.theokanning.openai.completion.chat.ChatCompletionResult;
import com.theokanning.openai.completion.chat.ChatMessage;
import com.theokanning.openai.model.Model;
import com.theokanning.openai.service.OpenAiService;
import edu.rit.se.nvip.utils.ReconcilerEnvVars;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.*;
/**
* Make all chat completion requests through here. If other types of OpenAI requests become necessary, they should be implemented in here so that the rate limit resource is appropriately shared.
*/
public class OpenAIRequestHandler {
private final Logger logger = LogManager.getLogger(getClass().getSimpleName());
private final PriorityBlockingQueue<RequestWrapper> requestQueue = new PriorityBlockingQueue<>(1000);
private ExecutorService mainExecutor = Executors.newFixedThreadPool(1);
private ExecutorService requestExecutor = Executors.newFixedThreadPool(5);
private static OpenAIRequestHandler handler;
//https://platform.openai.com/account/rate-limits
private static final ModelType DEFAULT_CHAT_COMPLETION_MODEL = ModelType.GPT_3_5_TURBO;
private final static double TOKEN_RATE_LIMIT = 90000. / 60;
private final static double REQUEST_RATE_LIMIT = 3500. /60;
private final RateLimiter tokenLimiter = RateLimiter.create(TOKEN_RATE_LIMIT);
private final RateLimiter requestLimiter = RateLimiter.create(REQUEST_RATE_LIMIT);
private OpenAiService service;
private int nextPriorityId = 0;
private Future<?> handlerThreadFuture; // used to eventually cancel the request thread
static {
handler = new OpenAIRequestHandler();
}
private OpenAIRequestHandler() {
service = new OpenAiService(ReconcilerEnvVars.getOpenAIKey());
initExecutors();
}
/**
* shuts down internal executors and threads. Already sent requests will still be fulfilled,
*/
public void shutdown() {
//the handleRequests() thread will probably be waiting on a queue.take()
handlerThreadFuture.cancel(true); // cancelling the future cancels the task
mainExecutor.shutdown(); // should go right through
requestExecutor.shutdown(); // lets the request threads finish execution
}
private void initExecutors() {
this.mainExecutor = Executors.newFixedThreadPool(1);
this.handlerThreadFuture = this.mainExecutor.submit(this::handleRequests);
this.requestExecutor = Executors.newFixedThreadPool(5);
}
/**
* makes new executors and starts processing requests again
*/
public void start() {
if (!mainExecutor.isShutdown() || !requestExecutor.isShutdown()) {
return;
}
initExecutors();
}
/**
* This class does not allow a public constructor because it must remain a singleton in order to guarantee respect for rate limits
* @return the singleton OpenAIRequestHandler()
*/
public static OpenAIRequestHandler getInstance() {
if (handler == null) {
handler = new OpenAIRequestHandler();
}
return handler;
}
private void handleRequests() {
while (true) {
RequestWrapper wrapper;
try {
wrapper = requestQueue.take();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
}
waitForLimiters(chatCompletionTokenCount(wrapper.request));
requestExecutor.submit(() -> sendRequest(wrapper));
}
}
private void sendRequest(RequestWrapper requestWrapper) {
try {
logger.info("sending msg");
ChatCompletionResult res = service.createChatCompletion(requestWrapper.request);
logger.info("sent");
requestWrapper.futureResult.complete(res);
} catch (OpenAiHttpException e) {
Thread.currentThread().interrupt();
requestWrapper.futureResult.completeExceptionally(e); //todo properly handle this
}
}
/**
* Sets the OpenAiService to request chat completions through. Set this to a mock when testing.
* @param service
*/
public void setService(OpenAiService service) {
// dependency injection allows easier testing
this.service = service;
}
/**
* Makes a no-cost call to openAI to verify the connection
* @return true if connected
*/
public boolean testConnection() {
// the listModels() includes an API call to account for any fine-tuned models, so this effectively validates the key and connection without actually using any tokens
try {
List<Model> models = service.listModels();
return models.size() > 0;
} catch (Exception ex) {
logger.error("Could not connect to OpenAI. Check your internet connection or key");
}
return false;
}
/**
* Queues the request for sending
* @param request the chat completion request in need of handling
* @return a future object which will be populated when the rate limiters and priority allow
*/
public Future<ChatCompletionResult> createChatCompletion(ChatCompletionRequest request, RequestorIdentity requestor) {
CompletableFuture<ChatCompletionResult> future = new CompletableFuture<>();
RequestWrapper wrapper = new RequestWrapper(request, future, requestor, nextPriorityId++);
// drop the request in the queue and tell any concerned threads about it
requestQueue.put(wrapper);
return future;
}
/**
* Computes the number of tokens this request would use if sent. This method does not make any API calls.
* @param request the completion request in question
* @return the number of tokens that will be used (not counting return tokens)
*/
public int chatCompletionTokenCount(ChatCompletionRequest request) {
Optional<Encoding> optEnc = Encodings.newDefaultEncodingRegistry().getEncodingForModel(request.getModel());
Encoding enc = optEnc.orElseGet(() -> Encodings.newDefaultEncodingRegistry().getEncodingForModel(DEFAULT_CHAT_COMPLETION_MODEL));
return chatCompletionTokenCount(request.getMessages(), enc);
}
/**
* Computes the number of tokens this request would use if sent with the given messages. This method does not make any API calls
* @param messages a list of ChatMessages to be tokenized
* @param enc the encoding to use for tokenization
* @return the number of tokens that would be used in an API call (not counting return tokens)
*/
public int chatCompletionTokenCount(List<ChatMessage> messages, Encoding enc) {
// this is not as simple as just tokenizing the openAI query because that query goes through some further processing on their end, adding tokens
// numbers gotten from https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
int tokensPerMsg = 4;
int tokenCount = 0;
for (ChatMessage msg : messages) {
tokenCount += tokensPerMsg;
tokenCount += enc.encode(msg.getContent()).size();
tokenCount += enc.encode(msg.getRole()).size();
}
return tokenCount;
}
private void waitForLimiters(int tokens) {
tokenLimiter.acquire(tokens);
requestLimiter.acquire(1);
}
}
|
[
"com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncodingForModel"
] |
[((7749, 7827), 'com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncodingForModel'), ((7875, 7964), 'com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncodingForModel')]
|
package com.knuddels.jtokkit;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingRegistry;
import com.knuddels.jtokkit.api.EncodingType;
import com.knuddels.jtokkit.api.GptBytePairEncodingParams;
import com.knuddels.jtokkit.api.ModelType;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
abstract class AbstractEncodingRegistry implements EncodingRegistry {
private final ConcurrentHashMap<String, Encoding> encodings = new ConcurrentHashMap<>();
@Override
public Optional<Encoding> getEncoding(final String encodingName) {
return Optional.ofNullable(encodings.get(encodingName));
}
@Override
public Encoding getEncoding(final EncodingType encodingType) {
return Objects.requireNonNull(
encodings.get(encodingType.getName()),
() -> "No encoding registered for encoding type " + encodingType.getName()
);
}
@Override
public Optional<Encoding> getEncodingForModel(final String modelName) {
final Optional<ModelType> modelType = ModelType.fromName(modelName);
if (modelType.isPresent()) {
return Optional.of(getEncodingForModel(modelType.get()));
}
if (modelName.startsWith(ModelType.GPT_4_32K.getName())) {
return Optional.of(getEncodingForModel(ModelType.GPT_4_32K));
}
if (modelName.startsWith(ModelType.GPT_4.getName())) {
return Optional.of(getEncodingForModel(ModelType.GPT_4));
}
if (modelName.startsWith(ModelType.GPT_3_5_TURBO_16K.getName())) {
return Optional.of(getEncodingForModel(ModelType.GPT_3_5_TURBO_16K));
}
if (modelName.startsWith(ModelType.GPT_3_5_TURBO.getName())) {
return Optional.of(getEncodingForModel(ModelType.GPT_3_5_TURBO));
}
return Optional.empty();
}
@Override
public Encoding getEncodingForModel(final ModelType modelType) {
return Objects.requireNonNull(
encodings.get(modelType.getEncodingType().getName()),
() -> "No encoding registered for model type " + modelType.getName()
);
}
@Override
public EncodingRegistry registerGptBytePairEncoding(final GptBytePairEncodingParams parameters) {
return registerCustomEncoding(EncodingFactory.fromParameters(parameters));
}
@Override
public EncodingRegistry registerCustomEncoding(final Encoding encoding) {
final String encodingName = encoding.getName();
final Encoding previousEncoding = encodings.putIfAbsent(encodingName, encoding);
if (previousEncoding != null) {
throw new IllegalStateException("Encoding " + encodingName + " already registered");
}
return this;
}
protected final void addEncoding(final EncodingType encodingType) {
switch (encodingType) {
case R50K_BASE:
encodings.computeIfAbsent(encodingType.getName(), k -> EncodingFactory.r50kBase());
break;
case P50K_BASE:
encodings.computeIfAbsent(encodingType.getName(), k -> EncodingFactory.p50kBase());
break;
case P50K_EDIT:
encodings.computeIfAbsent(encodingType.getName(), k -> EncodingFactory.p50kEdit());
break;
case CL100K_BASE:
encodings.computeIfAbsent(encodingType.getName(), k -> EncodingFactory.cl100kBase());
break;
default:
throw new IllegalStateException("Unknown encoding type " + encodingType.getName());
}
}
}
|
[
"com.knuddels.jtokkit.api.ModelType.GPT_3_5_TURBO.getName",
"com.knuddels.jtokkit.api.ModelType.GPT_4_32K.getName",
"com.knuddels.jtokkit.api.ModelType.GPT_4.getName",
"com.knuddels.jtokkit.api.ModelType.GPT_3_5_TURBO_16K.getName"
] |
[((1295, 1324), 'com.knuddels.jtokkit.api.ModelType.GPT_4_32K.getName'), ((1447, 1472), 'com.knuddels.jtokkit.api.ModelType.GPT_4.getName'), ((1591, 1628), 'com.knuddels.jtokkit.api.ModelType.GPT_3_5_TURBO_16K.getName'), ((1759, 1792), 'com.knuddels.jtokkit.api.ModelType.GPT_3_5_TURBO.getName')]
|
/*
* Copyright DataStax, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.langstream.agents.text;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingRegistry;
import com.knuddels.jtokkit.api.EncodingType;
/** Java implementation of <a href="https://github.com/openai/tiktoken">tiktoken</a>. */
public class TiktokenLengthFunction implements LengthFunction {
private static final EncodingRegistry REGISTRY = Encodings.newDefaultEncodingRegistry();
private final EncodingType encodingType;
public TiktokenLengthFunction(String encoding) {
encodingType =
EncodingType.fromName(encoding)
.orElseThrow(
() ->
new IllegalArgumentException(
"Unknown encoding: " + encoding));
}
@Override
public int length(String text) {
// Encoding is stateful and it retains references to internal tokens
Encoding enc = REGISTRY.getEncoding(encodingType);
return enc.countTokens(text);
}
}
|
[
"com.knuddels.jtokkit.api.EncodingType.fromName(encoding).orElseThrow"
] |
[((1188, 1447), 'com.knuddels.jtokkit.api.EncodingType.fromName(encoding).orElseThrow')]
|
package com.abin.mallchat.common.chatai.utils;
import com.abin.mallchat.common.chatai.domain.ChatGPTMsg;
import com.abin.mallchat.common.common.exception.BusinessException;
import com.abin.mallchat.utils.JsonUtils;
import com.fasterxml.jackson.databind.JsonNode;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingType;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import okhttp3.*;
import org.apache.commons.lang3.StringUtils;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
@Slf4j
public class ChatGPTUtils {
private static final Encoding encoding = Encodings.newDefaultEncodingRegistry().getEncoding(EncodingType.CL100K_BASE);
private static final String URL = "https://api.openai.com/v1/chat/completions";
private String model = "gpt-3.5-turbo";
private final Map<String, String> headers;
/**
* 超时30秒
*/
private Integer timeout = -1;
/**
* 参数用于指定生成文本的最大长度。
* 它表示生成的文本中最多包含多少个 token。一个 token 可以是一个单词、一个标点符号或一个空格。
*/
private int maxTokens = 2048;
/**
* 用于控制生成文本的多样性。
* 较高的温度会导致更多的随机性和多样性,但可能会降低生成文本的质量。默认值为 1,建议在 0.7 到 1.3 之间调整。
*/
private Object temperature = 1;
/**
* 用于控制生成文本的多样性。
* 它会根据概率选择最高的几个单词,而不是选择概率最高的单词。默认值为 1,建议在 0.7 到 0.9 之间调整。
*/
private Object topP = 0.9;
/**
* 用于控制生成文本中重复单词的数量。
* 较高的惩罚值会导致更少的重复单词,但可能会降低生成文本的流畅性。默认值为 0,建议在 0 到 2 之间调整。
*/
private Object frequencyPenalty = 0.0;
/**
* 用于控制生成文本中出现特定单词的数量。
* 较高的惩罚值会导致更少的特定单词,但可能会降低生成文本的流畅性。默认值为 0,建议在 0 到 2 之间调整。
*/
private Object presencePenalty = 0.6;
/**
* 提示词
*/
private List<ChatGPTMsg> messages;
// private List<ChatGPTMsg> prompt;
private String proxyUrl;
public ChatGPTUtils(String key) {
HashMap<String, String> _headers_ = new HashMap<>();
_headers_.put("Content-Type", "application/json");
if (StringUtils.isBlank(key)) {
throw new BusinessException("openAi key is blank");
}
_headers_.put("Authorization", "Bearer " + key);
this.headers = _headers_;
}
public static ChatGPTUtils create(String key) {
return new ChatGPTUtils(key);
}
@SneakyThrows
public static String parseText(Response response) {
return parseText(response.body().string());
}
public static String parseText(String body) {
// log.info("body >>> " + body);
try {
return Arrays.stream(body.split("data:"))
.map(String::trim)
.filter(x -> StringUtils.isNotBlank(x) && !"[DONE]".endsWith(x))
.map(x -> Optional.ofNullable(
JsonUtils.toJsonNode(x)
.withArray("choices")
.get(0)
.with("delta")
.findValue("content"))
.map(JsonNode::asText)
.orElse(null)
).filter(Objects::nonNull).collect(Collectors.joining());
} catch (Exception e) {
log.error("parseText error e:", e);
return "闹脾气了,等会再试试吧~";
}
}
public ChatGPTUtils model(String model) {
this.model = model;
return this;
}
public ChatGPTUtils timeout(int timeout) {
this.timeout = timeout;
return this;
}
public ChatGPTUtils maxTokens(int maxTokens) {
this.maxTokens = maxTokens;
return this;
}
public ChatGPTUtils temperature(int temperature) {
this.temperature = temperature;
return this;
}
public ChatGPTUtils topP(int topP) {
this.topP = topP;
return this;
}
public ChatGPTUtils frequencyPenalty(int frequencyPenalty) {
this.frequencyPenalty = frequencyPenalty;
return this;
}
public ChatGPTUtils presencePenalty(int presencePenalty) {
this.presencePenalty = presencePenalty;
return this;
}
public ChatGPTUtils message(List<ChatGPTMsg> messages) {
this.messages = messages;
return this;
}
public ChatGPTUtils proxyUrl(String proxyUrl) {
this.proxyUrl = proxyUrl;
return this;
}
public Response send() throws IOException {
OkHttpClient okHttpClient = new OkHttpClient()
.newBuilder()
.connectTimeout(10, TimeUnit.SECONDS)
.writeTimeout(10, TimeUnit.SECONDS)
.readTimeout(60, TimeUnit.SECONDS)
.build();
Map<String, Object> paramMap = new HashMap<>();
paramMap.put("model", model);
paramMap.put("messages", messages);
paramMap.put("max_tokens", maxTokens);
paramMap.put("temperature", temperature);
paramMap.put("top_p", topP);
paramMap.put("frequency_penalty", frequencyPenalty);
paramMap.put("presence_penalty", presencePenalty);
paramMap.put("stream", true);
log.info("paramMap >>> " + JsonUtils.toStr(paramMap));
Request request = new Request.Builder()
.url(StringUtils.isNotBlank(proxyUrl) ? proxyUrl : URL)
.addHeader("Content-Type", "application/json")
.addHeader("Authorization", headers.get("Authorization"))
.post(RequestBody.create(MediaType.parse("application/json"), JsonUtils.toStr(paramMap)))
.build();
return okHttpClient.newCall(request).execute();
}
public static Integer countTokens(String messages) {
return encoding.countTokens(messages);
}
public static Integer countTokens(List<ChatGPTMsg> msg) {
return countTokens(JsonUtils.toStr(msg));
}
}
|
[
"com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncoding"
] |
[((721, 797), 'com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncoding'), ((3447, 3681), 'com.abin.mallchat.utils.JsonUtils.toJsonNode(x)\n .withArray("choices")\n .get(0)\n .with("delta").findValue'), ((3447, 3623), 'com.abin.mallchat.utils.JsonUtils.toJsonNode(x)\n .withArray("choices")\n .get(0).with'), ((3447, 3572), 'com.abin.mallchat.utils.JsonUtils.toJsonNode(x)\n .withArray("choices").get'), ((3447, 3528), 'com.abin.mallchat.utils.JsonUtils.toJsonNode(x).withArray')]
|
package com.gzhu.funai.api.openai;
import cn.hutool.http.ContentType;
import cn.hutool.json.JSONUtil;
import com.alibaba.fastjson.JSON;
import com.gzhu.funai.api.openai.constant.OpenAIConst;
import com.gzhu.funai.api.openai.enums.OpenAiRespError;
import com.gzhu.funai.api.openai.req.ChatGPTReq;
import com.gzhu.funai.api.openai.req.EmbeddingReq;
import com.gzhu.funai.api.openai.resp.BillingUsage;
import com.gzhu.funai.api.openai.resp.ChatGPTResp;
import com.gzhu.funai.api.openai.resp.EmbeddingResp;
import com.gzhu.funai.api.openai.resp.CreditGrantsResp;
import com.gzhu.funai.exception.BaseException;
import com.gzhu.funai.global.constant.GlobalConstant;
import com.gzhu.funai.utils.DateTimeFormatterUtil;
import com.gzhu.funai.utils.OkHttpClientUtil;
import com.gzhu.funai.utils.ResultCode;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingType;
import lombok.extern.slf4j.Slf4j;
import okhttp3.*;
import okhttp3.sse.EventSource;
import okhttp3.sse.EventSourceListener;
import okhttp3.sse.EventSources;
import java.io.IOException;
import java.math.BigDecimal;
import java.math.RoundingMode;
import java.time.Instant;
import java.time.LocalDate;
import java.time.ZoneId;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* @Author: huangpenglong
* @Date: 2023/3/9 10:57
*/
@Slf4j
public class ChatGPTApi{
private static final String AUTHORIZATION_STR = "Authorization";
private static Encoding enc;
static {
enc = Encodings.newDefaultEncodingRegistry().getEncoding(EncodingType.CL100K_BASE);
}
/**
* 一次对话
* @param gpt
* @param apiKey
* @return
*/
public static ChatGPTResp oneShotReq(ChatGPTReq gpt, String apiKey){
return sessionReq(gpt, apiKey);
}
/**
* 带上下文的对话
* Ps:之前使用hutool的HttpRequest写请求,但遇到了handshake_failure 错误。目前换成了OKHttp
* @param gpt
* @param apiKey
* @return
*/
public static ChatGPTResp sessionReq(ChatGPTReq gpt, String apiKey) {
Request request = new Request.Builder()
.url(OpenAIConst.HOST + OpenAIConst.CHATGPT_MAPPING)
.post(RequestBody.create(MediaType.parse(ContentType.JSON.getValue()), JSONUtil.parseObj(gpt).toString()))
.header(AUTHORIZATION_STR, "Bearer " + apiKey)
.build();
Response response = null;
try {
response = OkHttpClientUtil.getClient().newCall(request).execute();
if(!response.isSuccessful()){
OpenAiRespError openAiRespError = OpenAiRespError.get(response.code());
log.error("请求ChatGPT异常! {}", openAiRespError.msg);
throw new BaseException(openAiRespError.msg);
}
String body = response.body().string();
return JSONUtil.toBean(body, ChatGPTResp.class);
}
catch (IOException e) {
log.error("okHttpClient异常! {}", e.getMessage());
}
finally {
if(response != null){
response.close();
}
}
return null;
}
/**
* 查询apiKey的余额
* Ps:之前使用hutool的HttpRequest写请求,但遇到了handshake_failure 错误。目前换成了OKHttp
* @param apiKey
* @return
*/
public static CreditGrantsResp creditGrants(String apiKey){
Request request = new Request.Builder()
.url(OpenAIConst.HOST + OpenAIConst.CREDIT_GRANTS_MAPPING)
.get()
.header(AUTHORIZATION_STR, "Bearer " + apiKey)
.build();
Response response = null;
try {
response = OkHttpClientUtil.getClient().newCall(request).execute();
if(!response.isSuccessful()){
OpenAiRespError openAiRespError = OpenAiRespError.get(response.code());
log.error("请求ChatGPT异常! {}", openAiRespError.msg);
throw new BaseException(openAiRespError.msg);
}
String body = response.body().string();
log.info("{}调用查询余额请求,返回值:{}",apiKey, body);
return JSONUtil.toBean(body, CreditGrantsResp.class);
}
catch (IOException e) {
log.error("okHttpClient异常! {}", e.getMessage());
}
finally {
if(response != null){
response.close();
}
}
return null;
}
/**
* 以流式输出的方式进行多轮对话
* @param chatGPTReq
* @param apiKey
* @param eventSourceListener
*/
public static void streamSessionReq(ChatGPTReq chatGPTReq, String apiKey, EventSourceListener eventSourceListener){
if (Objects.isNull(eventSourceListener)) {
log.error("参数异常:EventSourceListener不能为空");
throw new BaseException(ResultCode.EMPTY_PARAM.msg);
}
try {
EventSource.Factory factory = EventSources.createFactory(OkHttpClientUtil.getClient());
String requestBody = JSONUtil.parseObj(chatGPTReq).toString();
Request request = new Request.Builder()
.url(OpenAIConst.HOST + OpenAIConst.CHATGPT_MAPPING)
.post(RequestBody.create(MediaType.parse(ContentType.JSON.getValue()), requestBody))
.header(AUTHORIZATION_STR, "Bearer " + apiKey)
.build();
// 绑定请求 和 事件监听器
factory.newEventSource(request, eventSourceListener);
}
catch (Exception e) {
log.error("请求参数解析异常:{}", e);
}
}
/**
* 文本编码
* @param input
* @param apiKey
* @return
*/
public static EmbeddingResp embeddings(List<String> input, String apiKey){
EmbeddingReq embeddingReq = EmbeddingReq.builder().input(input).build();
Request request = new Request.Builder()
.url(OpenAIConst.HOST + OpenAIConst.EMBEDDING_MAPPING)
.post(RequestBody.create(MediaType.parse(ContentType.JSON.getValue()), JSONUtil.parseObj(embeddingReq).toString()))
.header(AUTHORIZATION_STR, "Bearer " + apiKey)
.build();
Response response = null;
try {
response = OkHttpClientUtil.getClient().newCall(request).execute();
if(!response.isSuccessful()){
OpenAiRespError openAiRespError = OpenAiRespError.get(response.code());
log.error("Embedding异常! {}", openAiRespError.msg);
throw new BaseException(openAiRespError.msg);
}
String body = response.body().string();
return JSONUtil.toBean(body, EmbeddingResp.class);
}
catch (IOException e) {
log.error("okHttpClient异常! {}", e.getMessage());
}
finally {
if(response != null){
response.close();
}
}
return null;
}
/**
* 估计字符串占多少个token
* @param message
* @return
*/
public static int getTokenNum(String message){
return enc.encode(message).size();
}
/**
* 估计一轮上下文对话占多少个token
* @param message
* @return
*/
public static int getMessageTokenNum(String message){
return enc.encode("role: {user}, message: {" + message + "}").size();
}
/**
* 获取apiKey的额度信息
* @param apiKey
* @return
*/
public static BillingUsage getBillingUsage(String apiKey){
Response subResponse = null;
Response usageResponse = null;
try {
subResponse = OkHttpClientUtil.getClient()
.newCall(
new Request.Builder()
.url(OpenAIConst.HOST + OpenAIConst.SUBSCRIPTION_MAPPING)
.get()
.header(AUTHORIZATION_STR, "Bearer " + apiKey)
.build())
.execute();
// openai请求错误
if(!subResponse.isSuccessful()){
OpenAiRespError openAiRespError = OpenAiRespError.get(subResponse.code());
log.error("请求ChatGPT异常! {}", openAiRespError.msg);
throw new BaseException(openAiRespError.msg);
}
// 判断账号是否过期
Map subMap = JSON.parseObject(subResponse.body().string(), Map.class);
long accessUntil = Long.parseLong(String.valueOf(subMap.get("access_until")));
if(accessUntil * GlobalConstant.TEN_K < System.currentTimeMillis()){
log.warn("检查到apiKey:{}过期,过期时间{}", apiKey,
Instant.ofEpochMilli(accessUntil * GlobalConstant.TEN_K).atZone(ZoneId.systemDefault()).toLocalDate());
// 不抛异常,因为特殊的apiKey过期了还能使用
// throw new BaseException(OpenAiRespError.OPENAI_APIKEY_EXPIRED.code, OpenAiRespError.OPENAI_APIKEY_EXPIRED.msg);
}
// 获取总额度
BigDecimal totalAmount = BigDecimal.valueOf(Double.parseDouble(String.valueOf(subMap.get("hard_limit_usd"))));
// 获取已使用额度 (滑动日期窗口获取,因为该死的openai一次只能拿100天的使用额度)
BigDecimal totalUsage = new BigDecimal(0);
LocalDate startDate = LocalDate.now().minusDays(95);
LocalDate endDate = LocalDate.now().plusDays(1);
while(true){
// 查询日期范围内的使用额度
String usageUrl = OpenAIConst.HOST + String.format(
OpenAIConst.USAGE_MAPPING,
DateTimeFormatterUtil.DFT.format(startDate),
DateTimeFormatterUtil.DFT.format(endDate));
usageResponse = OkHttpClientUtil.getClient()
.newCall(new Request.Builder()
.url(usageUrl)
.get()
.header(AUTHORIZATION_STR, "Bearer " + apiKey)
.build())
.execute();
Map usageMap = JSON.parseObject(usageResponse.body().string(), Map.class);
BigDecimal curUsage = BigDecimal.valueOf(Double.parseDouble(String.valueOf(usageMap.get("total_usage"))));
// 当在某次范围内查出的使用额度为0,说明此前长时间没使用过
if(curUsage.compareTo(BigDecimal.ZERO) <= 0){
break;
}
// 累加使用额度
totalUsage = totalUsage.add(curUsage);
// 统计日期窗口向前滑动
endDate = startDate;
startDate = endDate.minusDays(95);
}
return new BillingUsage(
totalAmount,
totalUsage.divide(new BigDecimal("100"), 2, RoundingMode.HALF_UP),
Instant.ofEpochMilli(accessUntil * GlobalConstant.TEN_K).atZone(ZoneId.systemDefault()).toLocalDate());
}
catch (IOException e) {
log.error("okHttpClient异常! {}", e.getMessage());
}
finally {
if(subResponse != null){
subResponse.close();
}
if(usageResponse != null){
usageResponse.close();
}
}
return null;
}
private ChatGPTApi(){}
}
|
[
"com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncoding"
] |
[((1543, 1619), 'com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncoding'), ((2303, 2330), 'cn.hutool.http.ContentType.JSON.getValue'), ((2333, 2366), 'cn.hutool.json.JSONUtil.parseObj(gpt).toString'), ((2529, 2584), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient().newCall(request).execute'), ((2529, 2574), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient().newCall'), ((3809, 3864), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient().newCall(request).execute'), ((3809, 3854), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient().newCall'), ((5218, 5258), 'cn.hutool.json.JSONUtil.parseObj(chatGPTReq).toString'), ((5446, 5473), 'cn.hutool.http.ContentType.JSON.getValue'), ((6027, 6070), 'com.gzhu.funai.api.openai.req.EmbeddingReq.builder().input(input).build'), ((6027, 6062), 'com.gzhu.funai.api.openai.req.EmbeddingReq.builder().input'), ((6248, 6275), 'cn.hutool.http.ContentType.JSON.getValue'), ((6278, 6320), 'cn.hutool.json.JSONUtil.parseObj(embeddingReq).toString'), ((6483, 6538), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient().newCall(request).execute'), ((6483, 6528), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient().newCall'), ((7899, 8252), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient()\n .newCall(\n new Request.Builder()\n .url(OpenAIConst.HOST + OpenAIConst.SUBSCRIPTION_MAPPING)\n .get()\n .header(AUTHORIZATION_STR, "Bearer " + apiKey)\n .build()).execute'), ((7899, 8221), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient().newCall'), ((8976, 9077), 'java.time.Instant.ofEpochMilli(accessUntil * GlobalConstant.TEN_K).atZone(ZoneId.systemDefault()).toLocalDate'), ((8976, 9063), 'java.time.Instant.ofEpochMilli(accessUntil * GlobalConstant.TEN_K).atZone'), ((9673, 9702), 'java.time.LocalDate.now().minusDays'), ((9736, 9763), 'java.time.LocalDate.now().plusDays'), ((9989, 10032), 'com.gzhu.funai.utils.DateTimeFormatterUtil.DFT.format'), ((10058, 10099), 'com.gzhu.funai.utils.DateTimeFormatterUtil.DFT.format'), ((10134, 10459), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient()\n .newCall(new Request.Builder()\n .url(usageUrl)\n .get()\n .header(AUTHORIZATION_STR, "Bearer " + apiKey)\n .build()).execute'), ((10134, 10424), 'com.gzhu.funai.utils.OkHttpClientUtil.getClient().newCall'), ((11310, 11411), 'java.time.Instant.ofEpochMilli(accessUntil * GlobalConstant.TEN_K).atZone(ZoneId.systemDefault()).toLocalDate'), ((11310, 11397), 'java.time.Instant.ofEpochMilli(accessUntil * GlobalConstant.TEN_K).atZone')]
|
package com.abin.mallchat.common.chatai.utils;
import com.abin.mallchat.common.chatai.domain.ChatGPTMsg;
import com.abin.mallchat.common.common.exception.BusinessException;
import com.abin.mallchat.utils.JsonUtils;
import com.fasterxml.jackson.databind.JsonNode;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingType;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import okhttp3.*;
import org.apache.commons.lang3.StringUtils;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
@Slf4j
public class ChatGPTUtils {
private static final Encoding encoding = Encodings.newDefaultEncodingRegistry().getEncoding(EncodingType.CL100K_BASE);
private static final String URL = "https://api.openai.com/v1/chat/completions";
private String model = "gpt-3.5-turbo";
private final Map<String, String> headers;
/**
* 超时30秒
*/
private Integer timeout = -1;
/**
* 参数用于指定生成文本的最大长度。
* 它表示生成的文本中最多包含多少个 token。一个 token 可以是一个单词、一个标点符号或一个空格。
*/
private int maxTokens = 2048;
/**
* 用于控制生成文本的多样性。
* 较高的温度会导致更多的随机性和多样性,但可能会降低生成文本的质量。默认值为 1,建议在 0.7 到 1.3 之间调整。
*/
private Object temperature = 1;
/**
* 用于控制生成文本的多样性。
* 它会根据概率选择最高的几个单词,而不是选择概率最高的单词。默认值为 1,建议在 0.7 到 0.9 之间调整。
*/
private Object topP = 0.9;
/**
* 用于控制生成文本中重复单词的数量。
* 较高的惩罚值会导致更少的重复单词,但可能会降低生成文本的流畅性。默认值为 0,建议在 0 到 2 之间调整。
*/
private Object frequencyPenalty = 0.0;
/**
* 用于控制生成文本中出现特定单词的数量。
* 较高的惩罚值会导致更少的特定单词,但可能会降低生成文本的流畅性。默认值为 0,建议在 0 到 2 之间调整。
*/
private Object presencePenalty = 0.6;
/**
* 提示词
*/
private List<ChatGPTMsg> messages;
// private List<ChatGPTMsg> prompt;
private String proxyUrl;
public ChatGPTUtils(String key) {
HashMap<String, String> _headers_ = new HashMap<>();
_headers_.put("Content-Type", "application/json");
if (StringUtils.isBlank(key)) {
throw new BusinessException("openAi key is blank");
}
_headers_.put("Authorization", "Bearer " + key);
this.headers = _headers_;
}
public static ChatGPTUtils create(String key) {
return new ChatGPTUtils(key);
}
@SneakyThrows
public static String parseText(Response response) {
return parseText(response.body().string());
}
public static String parseText(String body) {
// log.info("body >>> " + body);
try {
return Arrays.stream(body.split("data:"))
.map(String::trim)
.filter(x -> StringUtils.isNotBlank(x) && !"[DONE]".endsWith(x))
.map(x -> Optional.ofNullable(
JsonUtils.toJsonNode(x)
.withArray("choices")
.get(0)
.with("delta")
.findValue("content"))
.map(JsonNode::asText)
.orElse(null)
).filter(Objects::nonNull).collect(Collectors.joining());
} catch (Exception e) {
log.error("parseText error e:", e);
return "闹脾气了,等会再试试吧~";
}
}
public ChatGPTUtils model(String model) {
this.model = model;
return this;
}
public ChatGPTUtils timeout(int timeout) {
this.timeout = timeout;
return this;
}
public ChatGPTUtils maxTokens(int maxTokens) {
this.maxTokens = maxTokens;
return this;
}
public ChatGPTUtils temperature(int temperature) {
this.temperature = temperature;
return this;
}
public ChatGPTUtils topP(int topP) {
this.topP = topP;
return this;
}
public ChatGPTUtils frequencyPenalty(int frequencyPenalty) {
this.frequencyPenalty = frequencyPenalty;
return this;
}
public ChatGPTUtils presencePenalty(int presencePenalty) {
this.presencePenalty = presencePenalty;
return this;
}
public ChatGPTUtils message(List<ChatGPTMsg> messages) {
this.messages = messages;
return this;
}
public ChatGPTUtils proxyUrl(String proxyUrl) {
this.proxyUrl = proxyUrl;
return this;
}
public Response send() throws IOException {
OkHttpClient okHttpClient = new OkHttpClient()
.newBuilder()
.connectTimeout(10, TimeUnit.SECONDS)
.writeTimeout(10, TimeUnit.SECONDS)
.readTimeout(60, TimeUnit.SECONDS)
.build();
Map<String, Object> paramMap = new HashMap<>();
paramMap.put("model", model);
paramMap.put("messages", messages);
paramMap.put("max_tokens", maxTokens);
paramMap.put("temperature", temperature);
paramMap.put("top_p", topP);
paramMap.put("frequency_penalty", frequencyPenalty);
paramMap.put("presence_penalty", presencePenalty);
paramMap.put("stream", true);
log.info("paramMap >>> " + JsonUtils.toStr(paramMap));
Request request = new Request.Builder()
.url(StringUtils.isNotBlank(proxyUrl) ? proxyUrl : URL)
.addHeader("Content-Type", "application/json")
.addHeader("Authorization", headers.get("Authorization"))
.post(RequestBody.create(MediaType.parse("application/json"), JsonUtils.toStr(paramMap)))
.build();
return okHttpClient.newCall(request).execute();
}
public static Integer countTokens(String messages) {
return encoding.countTokens(messages);
}
public static Integer countTokens(List<ChatGPTMsg> msg) {
return countTokens(JsonUtils.toStr(msg));
}
}
|
[
"com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncoding"
] |
[((721, 797), 'com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncoding'), ((3447, 3681), 'com.abin.mallchat.utils.JsonUtils.toJsonNode(x)\n .withArray("choices")\n .get(0)\n .with("delta").findValue'), ((3447, 3623), 'com.abin.mallchat.utils.JsonUtils.toJsonNode(x)\n .withArray("choices")\n .get(0).with'), ((3447, 3572), 'com.abin.mallchat.utils.JsonUtils.toJsonNode(x)\n .withArray("choices").get'), ((3447, 3528), 'com.abin.mallchat.utils.JsonUtils.toJsonNode(x).withArray')]
|
package com.knuddels.jtokkit;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingRegistry;
import com.knuddels.jtokkit.api.EncodingType;
import com.knuddels.jtokkit.api.ModelType;
import java.util.Optional;
/**
* A lazy initialization implementation of {@link EncodingRegistry}. It does not register any encoding until either the
* {@link #getEncoding(EncodingType)} or {@link #getEncoding(String)} method is called.
* When one of these methods is called, the requested {@link EncodingType} is registered.
*/
final class LazyEncodingRegistry extends AbstractEncodingRegistry {
@Override
public Encoding getEncoding(final EncodingType encodingType) {
addEncoding(encodingType);
return super.getEncoding(encodingType);
}
@Override
public Optional<Encoding> getEncoding(final String encodingName) {
EncodingType.fromName(encodingName).ifPresent(this::addEncoding);
return super.getEncoding(encodingName);
}
@Override
public Encoding getEncodingForModel(final ModelType modelType) {
addEncoding(modelType.getEncodingType());
return super.getEncodingForModel(modelType);
}
}
|
[
"com.knuddels.jtokkit.api.EncodingType.fromName(encodingName).ifPresent"
] |
[((881, 945), 'com.knuddels.jtokkit.api.EncodingType.fromName(encodingName).ifPresent')]
|
package com.datasqrl.ai;
import com.datasqrl.ai.api.GraphQLExecutor;
import com.datasqrl.ai.backend.APIChatBackend;
import com.datasqrl.ai.backend.AnnotatedChatMessage;
import com.datasqrl.ai.backend.MessageTruncator;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.ModelType;
import com.theokanning.openai.completion.chat.ChatCompletionChunk;
import com.theokanning.openai.completion.chat.ChatCompletionRequest;
import com.theokanning.openai.completion.chat.ChatFunctionCall;
import com.theokanning.openai.completion.chat.ChatMessage;
import com.theokanning.openai.completion.chat.ChatMessageRole;
import com.theokanning.openai.service.OpenAiService;
import io.reactivex.Flowable;
import java.nio.file.Path;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
import lombok.Value;
/**
* A simple streaming chatbot for the command line.
* The implementation uses OpenAI's GPT models with a default configuration
* and {@link APIChatBackend} to call APIs that pull in requested data
* as well as save and restore chat messages across sessions.
*
* This implementation is based on <a href="https://github.com/TheoKanning/openai-java/blob/main/example/src/main/java/example/OpenAiApiFunctionsWithStreamExample.java">https://github.com/TheoKanning/openai-java</a>
* and meant only for demonstration and testing.
*
* To run the main method, you need to set your OPENAI token as an environment variable.
* The main method expects the name of an {@link Examples} value.
*/
@Value
public class CmdLineChatBot {
OpenAiService service;
APIChatBackend backend;
ChatModel chatModel = ChatModel.GPT35_TURBO;
List<ChatMessage> messages = new ArrayList<>();
/**
* Initializes a command line chat bot
*
* @param openAIKey The OpenAI API key to call the API
* @param backend An initialized backend to use for function execution and chat message persistence
*/
public CmdLineChatBot(String openAIKey, APIChatBackend backend) {
service = new OpenAiService(openAIKey, Duration.ofSeconds(60));
this.backend = backend;
}
/**
* Starts the chatbot on the command line which will accepts questions and produce responses.
* Type "exit" to terminate.
*
* @param instructionMessage The system instruction message for the ChatBot
*/
public void start(String instructionMessage, Map<String, Object> context) {
Scanner scanner = new Scanner(System.in);
ChatMessage systemMessage = new ChatMessage(ChatMessageRole.SYSTEM.value(), instructionMessage);
MessageTruncator messageTruncator = new MessageTruncator(chatModel.getMaxInputTokens(), systemMessage,
Encodings.newDefaultEncodingRegistry().getEncodingForModel(chatModel.getEncodingModel()));
messages.addAll(backend.getChatMessages(context, 30).stream().map(AnnotatedChatMessage::getMessage).collect(
Collectors.toUnmodifiableList()));
System.out.print("First Query: ");
ChatMessage firstMsg = new ChatMessage(ChatMessageRole.USER.value(), scanner.nextLine());
messages.add(firstMsg);
backend.saveChatMessage(firstMsg, context);
while (true) {
ChatCompletionRequest chatCompletionRequest = ChatCompletionRequest
.builder()
.model(chatModel.getOpenAIModel())
.messages(messageTruncator.truncateMessages(messages, backend.getChatFunctions()))
.functions(backend.getChatFunctions())
.functionCall(ChatCompletionRequest.ChatCompletionRequestFunctionCall.of("auto"))
.n(1)
.maxTokens(chatModel.getCompletionLength())
.logitBias(new HashMap<>())
.build();
Flowable<ChatCompletionChunk> flowable = service.streamChatCompletion(chatCompletionRequest);
AtomicBoolean isFirst = new AtomicBoolean(true);
ChatMessage chatMessage = service.mapStreamToAccumulator(flowable)
.doOnNext(accumulator -> {
if (accumulator.isFunctionCall()) {
if (isFirst.getAndSet(false)) {
System.out.println("Executing function " + accumulator.getAccumulatedChatFunctionCall().getName() + "...");
}
} else {
if (isFirst.getAndSet(false)) {
System.out.print("Response: ");
}
if (accumulator.getMessageChunk().getContent() != null) {
System.out.print(accumulator.getMessageChunk().getContent());
}
}
})
.doOnComplete(System.out::println)
.lastElement()
.blockingGet()
.getAccumulatedMessage();
messages.add(chatMessage); // don't forget to update the conversation with the latest response
backend.saveChatMessage(chatMessage, context);
if (chatMessage.getFunctionCall() != null) {
ChatFunctionCall fctCall = chatMessage.getFunctionCall();
//System.out.println("Trying to execute " + fctCall.getName() + " with arguments " + fctCall.getArguments().toPrettyString());
ChatMessage functionResponse = backend.executeAndConvertToMessageHandlingExceptions(fctCall, context);
//System.out.println("Executed " + fctCall.getName() + ".");
messages.add(functionResponse);
backend.saveChatMessage(functionResponse, context);
continue;
}
System.out.print("Next Query: ");
String nextLine = scanner.nextLine();
if (nextLine.equalsIgnoreCase("exit")) {
System.exit(0);
}
ChatMessage nextMsg = new ChatMessage(ChatMessageRole.USER.value(), nextLine);
messages.add(nextMsg);
backend.saveChatMessage(nextMsg, context);
}
}
public static final String DEFAULT_GRAPHQL_ENDPOINT = "http://localhost:8888/graphql";
public static void main(String... args) throws Exception {
if (args==null || args.length==0) throw new IllegalArgumentException("Please provide the name of the example you want to run. One of: " + Arrays.toString(Examples.values()));
Examples example = Examples.valueOf(args[0].trim().toUpperCase());
String openAIToken = System.getenv("OPENAI_TOKEN");
String graphQLEndpoint = DEFAULT_GRAPHQL_ENDPOINT;
if (args.length>1) graphQLEndpoint = args[1];
Map<String,Object> context = Map.of();
if (example.hasUserId()) {
Scanner scanner = new Scanner(System.in);
System.out.print("Enter the User ID: ");
String userid = scanner.nextLine();
context = example.getContext(userid);
}
GraphQLExecutor apiExecutor = new GraphQLExecutor(graphQLEndpoint);
APIChatBackend backend = APIChatBackend.of(Path.of(example.configFile), apiExecutor);
CmdLineChatBot chatBot = new CmdLineChatBot(openAIToken, backend);
chatBot.start(example.systemPrompt, context);
}
}
|
[
"com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncodingForModel"
] |
[((2681, 2711), 'com.theokanning.openai.completion.chat.ChatMessageRole.SYSTEM.value'), ((2849, 2937), 'com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncodingForModel'), ((3180, 3208), 'com.theokanning.openai.completion.chat.ChatMessageRole.USER.value'), ((3633, 3699), 'com.theokanning.openai.completion.chat.ChatCompletionRequest.ChatCompletionRequestFunctionCall.of'), ((5705, 5733), 'com.theokanning.openai.completion.chat.ChatMessageRole.USER.value')]
|
package io.github.cupybara.javalangchains.usecases;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import java.io.File;
import java.io.IOException;
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Comparator;
import org.apache.logging.log4j.LogManager;
import org.apache.lucene.store.Directory;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.EncodingType;
import io.github.cupybara.javalangchains.chains.Chain;
import io.github.cupybara.javalangchains.chains.base.ApplyToStreamInputChain;
import io.github.cupybara.javalangchains.chains.base.logging.LoggingChain;
import io.github.cupybara.javalangchains.chains.data.reader.ReadDocumentsFromPdfChain;
import io.github.cupybara.javalangchains.chains.data.retrieval.LuceneRetrievalChain;
import io.github.cupybara.javalangchains.chains.data.writer.WriteDocumentsToLuceneDirectoryChain;
import io.github.cupybara.javalangchains.chains.llm.openai.chat.OpenAiChatCompletionsChain;
import io.github.cupybara.javalangchains.chains.llm.openai.chat.OpenAiChatCompletionsParameters;
import io.github.cupybara.javalangchains.chains.qa.AnswerWithSources;
import io.github.cupybara.javalangchains.chains.qa.CombineDocumentsChain;
import io.github.cupybara.javalangchains.chains.qa.MapAnswerWithSourcesChain;
import io.github.cupybara.javalangchains.chains.qa.ModifyDocumentsContentChain;
import io.github.cupybara.javalangchains.chains.qa.split.JtokkitTextSplitter;
import io.github.cupybara.javalangchains.chains.qa.split.SplitDocumentsChain;
import io.github.cupybara.javalangchains.util.PromptTemplates;
/**
* tests for a complete qa {@link Chain}
*
* we'll read documents from our demo john doe pdfs at src/test/resources/pdf
* and then ask questions about the protagonist.
*/
class RetrievalQaTest {
private static Path tempIndexPath;
private static Directory directory;
@BeforeAll
static void beforeAll() throws IOException, URISyntaxException {
tempIndexPath = Files.createTempDirectory("lucene");
/*
* We are also using a chain to create the lucene index directory
*/
final Chain<Path, Directory> createLuceneIndexChain = new ReadDocumentsFromPdfChain()
// Optional Chain: split pdfs based on a max token count of 1000
.chain(new SplitDocumentsChain(new JtokkitTextSplitter(
Encodings.newDefaultEncodingRegistry().getEncoding(EncodingType.CL100K_BASE), 1000)))
// Mandatory Chain: write split pdf documents to a lucene directory
.chain(new WriteDocumentsToLuceneDirectoryChain(tempIndexPath));
final Path pdfDirectoryPath = Paths.get(RetrievalQaTest.class.getResource("/pdf/qa").toURI());
directory = createLuceneIndexChain.run(pdfDirectoryPath);
}
@AfterAll
static void afterAll() throws IOException {
directory.close();
Files.walk(tempIndexPath).sorted(Comparator.reverseOrder()).map(Path::toFile).forEach(File::delete);
}
@Test
void testQa() throws IOException {
final OpenAiChatCompletionsParameters openAiChatParameters = new OpenAiChatCompletionsParameters()
.temperature(0D).model("gpt-3.5-turbo");
/*
* Chain 1: The retrievalChain is used to retrieve relevant documents from an
* index by using bm25 similarity
*/
try (final LuceneRetrievalChain retrievalChain = new LuceneRetrievalChain(directory, 2)) {
/*
* Chain 2: The summarizeDocumentsChain is used to summarize documents to only
* contain the most relevant information. This is achieved using an OpenAI LLM
* (gpt-3.5-turbo in this case)
*/
final ModifyDocumentsContentChain summarizeDocumentsChain = new ModifyDocumentsContentChain(
new OpenAiChatCompletionsChain(PromptTemplates.QA_SUMMARIZE, openAiChatParameters,
System.getenv("OPENAI_API_KEY")));
/*
* Chain 3: The combineDocumentsChain is used to combine the retrieved documents
* in a single prompt
*/
final CombineDocumentsChain combineDocumentsChain = new CombineDocumentsChain();
/*
* Chain 4: The openAiChatChain is used to process the combined prompt using an
* OpenAI LLM (gpt-3.5-turbo in this case)
*/
final OpenAiChatCompletionsChain openAiChatChain = new OpenAiChatCompletionsChain(
PromptTemplates.QA_COMBINE, openAiChatParameters, System.getenv("OPENAI_API_KEY"));
/*
* Chain 5: The mapAnswerWithSourcesChain is used to map the llm string output
* to a complex object using a regular expression which splits the sources and
* the answer.
*/
final MapAnswerWithSourcesChain mapAnswerWithSourcesChain = new MapAnswerWithSourcesChain();
// @formatter:off
// we combine all chain links into a self contained QA chain
final Chain<String, AnswerWithSources> qaChain = retrievalChain
.chain(summarizeDocumentsChain)
.chain(new ApplyToStreamInputChain<>(new LoggingChain<>(LoggingChain.defaultLogPrefix("SUMMARIZED_DOCUMENT"))))
.chain(combineDocumentsChain)
.chain(new LoggingChain<>(LoggingChain.defaultLogPrefix("COMBINED_DOCUMENT")))
.chain(openAiChatChain)
.chain(new LoggingChain<>(LoggingChain.defaultLogPrefix("LLM_RESULT")))
.chain(mapAnswerWithSourcesChain);
// @formatter:on
// the QA chain can now be called with a question and delivers an answer
final AnswerWithSources answerToValidQuestion = qaChain.run("who is john doe?");
assertNotNull(answerToValidQuestion, "no answer provided");
assertFalse(answerToValidQuestion.getSources().isEmpty(), "no sources");
LogManager.getLogger().info("answer to valid question: {}", answerToValidQuestion);
}
}
}
|
[
"com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncoding"
] |
[((2565, 2641), 'com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncoding'), ((3034, 3133), 'java.nio.file.Files.walk(tempIndexPath).sorted(Comparator.reverseOrder()).map(Path::toFile).forEach'), ((3034, 3111), 'java.nio.file.Files.walk(tempIndexPath).sorted(Comparator.reverseOrder()).map'), ((3034, 3093), 'java.nio.file.Files.walk(tempIndexPath).sorted'), ((5703, 5785), 'org.apache.logging.log4j.LogManager.getLogger().info')]
|
package com.datasqrl.ai.spring;
import com.datasqrl.ai.Examples;
import com.datasqrl.ai.api.GraphQLExecutor;
import com.datasqrl.ai.backend.APIChatBackend;
import com.datasqrl.ai.backend.AnnotatedChatMessage;
import com.datasqrl.ai.backend.MessageTruncator;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.knuddels.jtokkit.Encodings;
import com.theokanning.openai.completion.chat.ChatCompletionRequest;
import com.theokanning.openai.completion.chat.ChatCompletionRequest.ChatCompletionRequestFunctionCall;
import com.theokanning.openai.completion.chat.ChatFunctionCall;
import com.theokanning.openai.completion.chat.ChatMessage;
import com.theokanning.openai.completion.chat.ChatMessageRole;
import com.theokanning.openai.service.OpenAiService;
import java.io.IOException;
import java.net.URL;
import java.nio.file.Path;
import java.time.Duration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Collectors;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.web.bind.annotation.*;
import java.util.Arrays;
import java.util.List;
@SpringBootApplication
public class SimpleServer {
public static final String DEFAULT_GRAPHQL_ENDPOINT = "http://localhost:8888/graphql";
public static void main(String[] args) {
SpringApplication.run(SimpleServer.class, args);
}
@CrossOrigin(origins = "*")
@RestController
public static class MessageController {
private final Examples example;
OpenAiService service;
GraphQLExecutor apiExecutor;
APIChatBackend backend;
ChatMessage systemMessage;
MessageTruncator messageTruncator;
List functions;
String chartFunctionName="";
public MessageController(@Value("${example:nutshop}") String exampleName) throws IOException {
this.example = Examples.valueOf(exampleName.trim().toUpperCase());
String openAIToken = System.getenv("OPENAI_TOKEN");
this.service = new OpenAiService(openAIToken, Duration.ofSeconds(60));
String graphQLEndpoint = DEFAULT_GRAPHQL_ENDPOINT;
this.apiExecutor = new GraphQLExecutor(graphQLEndpoint);
this.backend = APIChatBackend.of(Path.of(example.getConfigFile()), apiExecutor);
this.systemMessage = new ChatMessage(ChatMessageRole.SYSTEM.value(), example.getSystemPrompt());
this.messageTruncator = new MessageTruncator(example.getModel().getMaxInputTokens(), systemMessage,
Encodings.newDefaultEncodingRegistry().getEncodingForModel(example.getModel().getEncodingModel()));
this.functions = new ArrayList<>();
this.functions.addAll(backend.getChatFunctions());
if (example.isSupportCharts()) {
ObjectMapper objectMapper = new ObjectMapper();
URL url = SimpleServer.class.getClassLoader().getResource("plotfunction.json");
if (url != null) {
try {
JsonNode functionJson = objectMapper.readTree(url);
this.chartFunctionName = functionJson.get("name").textValue();
this.functions.add(functionJson);
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
private Map<String,Object> getContext(String userId) {
return Map.of(example.getUserIdFieldName(), example.getPrepareUserIdFct().apply(
userId));
}
@GetMapping("/messages")
public List<ResponseMessage> getMessages(@RequestParam String userId) {
Map<String,Object> context = getContext(userId);
List<AnnotatedChatMessage> messages = backend.getChatMessages(context, 50);
return messages.stream().filter(msg -> {
ChatMessage m = msg.getMessage();
ChatMessageRole role = ChatMessageRole.valueOf(m.getRole().toUpperCase());
switch (role) {
case USER:
case ASSISTANT:
return true;
}
return false;
}).map(ResponseMessage::of).collect(Collectors.toUnmodifiableList());
}
@PostMapping("/messages")
public ResponseMessage postMessage(@RequestBody InputMessage message) {
Map<String,Object> context = getContext(message.getUserId());
List<ChatMessage> messages = new ArrayList<>(30);
backend.getChatMessages(context, 20).stream().map(AnnotatedChatMessage::getMessage)
.forEach(messages::add);
System.out.printf("Retrieved %d messages\n", messages.size());
ChatMessage chatMessage = new ChatMessage(ChatMessageRole.USER.value(), message.getContent());
messages.add(chatMessage);
backend.saveChatMessage(chatMessage, context);
while (true) {
System.out.println("Calling OpenAI");
ChatCompletionRequest chatCompletionRequest = ChatCompletionRequest
.builder()
.model(example.getModel().getOpenAIModel())
.messages(messageTruncator.truncateMessages(messages, backend.getChatFunctions()))
.functions(functions)
.functionCall(ChatCompletionRequestFunctionCall.of("auto"))
.n(1)
.maxTokens(example.getModel().getCompletionLength())
.logitBias(new HashMap<>())
.build();
ChatMessage responseMessage = service.createChatCompletion(chatCompletionRequest).getChoices().get(0).getMessage();
messages.add(responseMessage); // don't forget to update the conversation with the latest response
backend.saveChatMessage(responseMessage, context);
ChatFunctionCall functionCall = responseMessage.getFunctionCall();
if (functionCall != null && !functionCall.getName().equalsIgnoreCase(chartFunctionName)) {
System.out.println("Executing " + functionCall.getName() + " with arguments " + functionCall.getArguments().toPrettyString());
ChatMessage functionResponse = backend.executeAndConvertToMessageHandlingExceptions(
functionCall, context);
//System.out.println("Executed " + fctCall.getName() + ".");
messages.add(functionResponse);
backend.saveChatMessage(functionResponse, context);
} else {
//The text answer
return ResponseMessage.of(responseMessage);
}
}
}
}
}
|
[
"com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncodingForModel"
] |
[((2444, 2474), 'com.theokanning.openai.completion.chat.ChatMessageRole.SYSTEM.value'), ((2620, 2717), 'com.knuddels.jtokkit.Encodings.newDefaultEncodingRegistry().getEncodingForModel'), ((4611, 4639), 'com.theokanning.openai.completion.chat.ChatMessageRole.USER.value')]
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 15