Skip to content

Commit

Permalink
remove curl dependency
Browse files Browse the repository at this point in the history
  • Loading branch information
JamesHWade committed Aug 31, 2024
1 parent 0a028fa commit 2ff5473
Show file tree
Hide file tree
Showing 10 changed files with 82 additions and 67 deletions.
1 change: 0 additions & 1 deletion DESCRIPTION
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ Imports:
bslib (>= 0.8.0),
cli,
colorspace,
curl,
glue,
htmltools,
htmlwidgets,
Expand Down
1 change: 0 additions & 1 deletion NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ import(shiny)
importFrom(R6,R6Class)
importFrom(glue,glue)
importFrom(htmltools,div)
importFrom(htmltools,htmlDependency)
importFrom(htmltools,tag)
importFrom(htmltools,tagList)
importFrom(htmltools,tags)
Expand Down
1 change: 1 addition & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
- API calls now run async with ExtendedTask. #224
- New styling of chat app. #224
- Add code syntax highlighting to chat app. #224
- Replace curl calls with httr2. #224

## gptstudio 0.4.0

Expand Down
11 changes: 7 additions & 4 deletions R/api-transcribe-audio.R
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@ transcribe_audio <- function(audio_input, api_key = Sys.getenv("OPENAI_API_KEY")
writeBin(parsed$data, temp_webm)
system_result <- # nolint
system2("ffmpeg",
args = c("-i", temp_webm, "-acodec", "pcm_s16le", "-ar", "44100", temp_wav), # nolint
stdout = TRUE,
stderr = TRUE
args = c("-i", temp_webm, "-acodec", "pcm_s16le", "-ar", "44100", temp_wav), # nolint
stdout = TRUE,
stderr = TRUE
)

if (!file.exists(temp_wav)) {
Expand All @@ -66,7 +66,10 @@ transcribe_audio <- function(audio_input, api_key = Sys.getenv("OPENAI_API_KEY")
req <- request("https://api.openai.com/v1/audio/transcriptions") %>%
req_auth_bearer_token(api_key) %>%
req_body_multipart(
file = curl::form_file(temp_wav),
file = structure(list(path = temp_wav,
type = NULL,
name = NULL),
class = "form_file"),
model = "whisper-1",
response_format = "text"
)
Expand Down
27 changes: 8 additions & 19 deletions R/service-ollama.R
Original file line number Diff line number Diff line change
Expand Up @@ -49,25 +49,14 @@ body_to_json_str <- function(x) {


ollama_perform_stream <- function(request, parser) {
request_body <- request %>%
purrr::pluck("body")

request_url <- request %>%
purrr::pluck("url")

request_handle <- curl::new_handle() %>%
curl::handle_setopt(postfields = body_to_json_str(request_body))

curl_response <- curl::curl_fetch_stream(
url = request_url,
handle = request_handle,
fun = function(x) parser$parse_ndjson(rawToChar(x))
)

response_json(
url = curl_response$url,
method = "POST",
body = list(response = parser$lines)
req_perform_stream(
request,
callback = function(x) {
parser$parse_ndjson(rawToChar(x))
TRUE
},
buffer_kb = 0.01,
round = "line"
)
}

Expand Down
74 changes: 42 additions & 32 deletions R/service-openai_streaming.R
Original file line number Diff line number Diff line change
Expand Up @@ -13,59 +13,70 @@
#' By default, it is fetched from the "OPENAI_API_KEY" environment variable.
#' Please note that the OpenAI API key is sensitive information and should be
#' treated accordingly.
#' @return The same as `curl::curl_fetch_stream`
#' @return The same as `httr2::req_perform_stream`
stream_chat_completion <-
function(messages = list(list(role = "user", content = "Hi there!")),
element_callback = cat,
element_callback = openai_handler,
model = "gpt-4o-mini",
openai_api_key = Sys.getenv("OPENAI_API_KEY")) {
# Set the API endpoint URL
url <- paste0(getOption("gptstudio.openai_url"), "/chat/completions")

# Set the request headers
headers <- list(
"Content-Type" = "application/json",
"Authorization" = paste0("Bearer ", openai_api_key)
)

# Set the request body
body <- list(
"model" = model,
"stream" = TRUE,
"messages" = messages
)

# Create a new curl handle object
handle <- curl::new_handle() %>%
curl::handle_setheaders(.list = headers) %>%
curl::handle_setopt(postfields = jsonlite::toJSON(body, auto_unbox = TRUE)) # request body

# Make the streaming request using curl_fetch_stream()
curl::curl_fetch_stream(
url = url,
fun = function(x) {
element <- rawToChar(x)
element_callback(element) # Do whatever element_callback does
},
handle = handle
)
# Prepare the request
req <- request(url) %>%

Check warning on line 33 in R/service-openai_streaming.R

View workflow job for this annotation

GitHub Actions / lint

file=R/service-openai_streaming.R,line=33,col=5,[object_usage_linter] local variable 'req' assigned but may not be used
req_headers(
"Content-Type" = "application/json",
"Authorization" = paste0("Bearer ", openai_api_key)
) %>%
req_body_json(body) %>%
req_perform_stream(
callback = function(x) {
element <- rawToChar(x)
element_callback(element)
TRUE
},
round = "line",
buffer_kb = 0.01
)
}


openai_handler <- function(x) {
lines <- stringr::str_split(x, "\n")[[1]]
lines <- lines[lines != ""]
lines <- stringr::str_replace_all(lines, "^data: ", "")
lines <- lines[lines != "[DONE]"]
if (length(lines) == 0) {
return()
}
json <- jsonlite::parse_json(lines)
if (!is.null(json$choices[[1]]$finish_reason)) {
return()
} else {
cat(json$choices[[1]]$delta$content)
}
}

#' Stream handler for chat completions
#'
#' R6 class that allows to handle chat completions chunk by chunk.
#' It also adds methods to retrieve relevant data. This class DOES NOT make the request.
#' R6 class that allows to handle chat completions chunk by chunk. It also adds
#' methods to retrieve relevant data. This class DOES NOT make the request.
#'
#' Because `curl::curl_fetch_stream` blocks the R console until the stream finishes,
#' this class can take a shiny session object to handle communication with JS
#' without recurring to a `shiny::observe` inside a module server.
#' Because `httr2::req_perform_stream` blocks the R console until the stream
#' finishes, this class can take a shiny session object to handle communication
#' with JS without recurring to a `shiny::observe` inside a module server.
#'
#' @param session The shiny session it will send the message to (optional).
#' @param user_prompt The prompt for the chat completion.
#' Only to be displayed in an HTML tag containing the prompt. (Optional).
#' @param parsed_event An already parsed server-sent event to append to the events field.
#' @param user_prompt The prompt for the chat completion. Only to be displayed
#' in an HTML tag containing the prompt. (Optional).
#' @param parsed_event An already parsed server-sent event to append to the
#' events field.
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON
OpenaiStreamParser <- R6::R6Class( # nolint
Expand All @@ -92,7 +103,6 @@ OpenaiStreamParser <- R6::R6Class( # nolint
append_parsed_sse = function(parsed_event) {
# ----- here you can do whatever you want with the event data -----
if (is.null(parsed_event$data) || parsed_event$data == "[DONE]") {
cli::cli_alert_info("Skipping")
return()
}

Expand Down
1 change: 1 addition & 0 deletions inst/WORDLIST
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ grey
httr
huggingface
instruc
js
json
linux
magrittr
Expand Down
12 changes: 12 additions & 0 deletions inst/assets/css/audioRecorder.css
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
av-settings-menu {
display: block;
width: min-content;
}

/* The normal treatment of .dropdown-item.active is a little too much */
av-settings-menu .dropdown-item.active,
av-settings-menu .dropdown-menu > li > a.active {
color: inherit;
background-color: inherit;
font-weight: bold;
}
17 changes: 9 additions & 8 deletions man/OpenaiStreamParser.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions man/stream_chat_completion.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit 2ff5473

Please sign in to comment.