Commit 19f705f0 authored by Mohamad Bashar Desoki's avatar Mohamad Bashar Desoki

Use Ollama4j Library

parent 9dbeb09e
......@@ -15,7 +15,8 @@ This project creates a Telegram bot that utilizes a large language model (LLM) t
1. Create a Telegram Bot
* Visit [From BotFather to 'Hello World'](https://core.telegram.org/bots/tutorial) on Telegram and create a new bot.
* You'll receive a bot token. Store this securely (e.g., environment variables)
2. [Llama3 70b free API](https://huggingface.co/spaces/FumesAI/llama-3-API)
2. [Deploy local llama3](https://ollama.com/download)
3. [Use Java library (wrapper/binding) for Ollama server.](https://github.com/amithkoujalgi/ollama4j/tree/main)
### Configuration
......
......@@ -33,6 +33,14 @@
<version>1.2.17</version>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>io.github.amithkoujalgi</groupId>
<artifactId>ollama4j</artifactId>
<version>1.0.64</version>
</dependency>
</dependencies>
</project>
\ No newline at end of file
package org.example;
import io.github.amithkoujalgi.ollama4j.core.exceptions.OllamaBaseException;
import org.telegram.telegrambots.bots.TelegramLongPollingBot;
import org.telegram.telegrambots.meta.api.methods.send.SendMessage;
import org.telegram.telegrambots.meta.api.objects.Update;
import org.telegram.telegrambots.meta.exceptions.TelegramApiException;
import java.io.IOException;
public class Bot extends TelegramLongPollingBot {
// https://core.telegram.org/bots/tutorial
// https://core.telegram.org/bots/tutorial
// https://amithkoujalgi.github.io/ollama4j/docs/intro
private String prompt;
@Override
// public String getBotUsername() {
// return "Put your Telegram Username here";
// }
public String getBotUsername() {
return "Put your Telegram Username here";
}
@Override
// public String getBotToken() {
// return "Put your Telegram Token here";
// }
public String getBotToken() {
return "Put your Telegram Token here";
}
private String prompts;
@Override
public void onUpdateReceived(Update update) {
var msg = update.getMessage();
var user = msg.getFrom();
var id = user.getId();
llama3 l3 = new llama3();
prompts = msg.getText();
String res = l3.llmChat(msg.getText());
prompt = msg.getText();
String response;
if (res.length() > 0)
response = res + "\n \uD83D\uDC50\uD83C\uDFFB" ;
else
response = " \n \uD83D\uDC50\uD83C\uDFFB please try again! \uD83D\uDE0A";
System.out.println(user.getFirstName() + " wrote " + prompt);
sendText(id, response);
// String pre_response = "Please Wait for Response ..." + "\n \uD83D\uDC50\uD83C\uDFFB";
// sendText(id, pre_response);
System.out.println(user.getFirstName() + " wrote " + msg.getText());
try {
String response = Ollama.getResponse(prompt);
sendText(id, response);
} catch (OllamaBaseException e) {
sendText(id, " \n \uD83D\uDC50\uD83C\uDFFB please try again later! \uD83D\uDE0A");
throw new RuntimeException(e);
} catch (IOException e) {
sendText(id, " \n \uD83D\uDC50\uD83C\uDFFB please try again later! \uD83D\uDE0A");
throw new RuntimeException(e);
} catch (InterruptedException e) {
sendText(id, " \n \uD83D\uDC50\uD83C\uDFFB please try again later! \uD83D\uDE0A");
throw new RuntimeException(e);
}
}
public void sendText(Long who, String what){
public void sendText(Long who, String what) {
SendMessage sm = SendMessage.builder()
.chatId(who.toString()) //Who are we sending a message to
.text(what).build(); //Message content
......
package org.example;
import io.github.amithkoujalgi.ollama4j.core.OllamaAPI;
import io.github.amithkoujalgi.ollama4j.core.OllamaStreamHandler;
import io.github.amithkoujalgi.ollama4j.core.exceptions.OllamaBaseException;
import io.github.amithkoujalgi.ollama4j.core.models.OllamaResult;
import io.github.amithkoujalgi.ollama4j.core.utils.OptionsBuilder;
import java.io.IOException;
public class Ollama {
static final String LLAMA3 = "llama3";
static final String host = "http://localhost:11434/";
public static String getResponse(String prompt) throws OllamaBaseException, IOException, InterruptedException {
OllamaAPI ollamaAPI = new OllamaAPI(host);
ollamaAPI.setRequestTimeoutSeconds(120);
OllamaResult result =
ollamaAPI.generate(LLAMA3, prompt, new OptionsBuilder().build());
String response;
if (result != null) {
response = result.getResponse() + "\n \uD83D\uDC50\uD83C\uDFFB";
return response;
} else {
response = " \n \uD83D\uDC50\uD83C\uDFFB please try again later! \uD83D\uDE0A";
return response;
}
}
public void getStreamResponse(String prompt) throws OllamaBaseException, IOException, InterruptedException {
OllamaAPI ollamaAPI = new OllamaAPI(host);
// define a stream handler (Consumer<String>)
OllamaStreamHandler streamHandler = (s) -> {
System.out.println(s);
};
// Should be called using seperate thread to gain non blocking streaming effect.
OllamaResult result = ollamaAPI.generate(LLAMA3,
"What is the capital of France? And what's France's connection with Mona Lisa?",
new OptionsBuilder().build(), streamHandler);
System.out.println("Full response: " + result.getResponse());
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment