diff --git a/DESCRIPTION b/DESCRIPTION index 6f65e260..93d41ff9 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -25,7 +25,6 @@ Imports: bslib (>= 0.8.0), cli, colorspace, - curl, glue, htmltools, htmlwidgets, diff --git a/NAMESPACE b/NAMESPACE index 8a0fc5c1..add55d17 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -64,7 +64,6 @@ import(shiny) importFrom(R6,R6Class) importFrom(glue,glue) importFrom(htmltools,div) -importFrom(htmltools,htmlDependency) importFrom(htmltools,tag) importFrom(htmltools,tagList) importFrom(htmltools,tags) diff --git a/NEWS.md b/NEWS.md index e28990c4..8b0aaf39 100644 --- a/NEWS.md +++ b/NEWS.md @@ -10,6 +10,7 @@ - API calls now run async with ExtendedTask. #224 - New styling of chat app. #224 - Add code syntax highlighting to chat app. #224 +- Replace curl calls with httr2. #224 ## gptstudio 0.4.0 diff --git a/R/api-transcribe-audio.R b/R/api-transcribe-audio.R index 68e85912..29af8ffe 100644 --- a/R/api-transcribe-audio.R +++ b/R/api-transcribe-audio.R @@ -54,9 +54,9 @@ transcribe_audio <- function(audio_input, api_key = Sys.getenv("OPENAI_API_KEY") writeBin(parsed$data, temp_webm) system_result <- # nolint system2("ffmpeg", - args = c("-i", temp_webm, "-acodec", "pcm_s16le", "-ar", "44100", temp_wav), # nolint - stdout = TRUE, - stderr = TRUE + args = c("-i", temp_webm, "-acodec", "pcm_s16le", "-ar", "44100", temp_wav), # nolint + stdout = TRUE, + stderr = TRUE ) if (!file.exists(temp_wav)) { @@ -66,7 +66,10 @@ transcribe_audio <- function(audio_input, api_key = Sys.getenv("OPENAI_API_KEY") req <- request("https://api.openai.com/v1/audio/transcriptions") %>% req_auth_bearer_token(api_key) %>% req_body_multipart( - file = curl::form_file(temp_wav), + file = structure(list(path = temp_wav, + type = NULL, + name = NULL), + class = "form_file"), model = "whisper-1", response_format = "text" ) diff --git a/R/service-ollama.R b/R/service-ollama.R index c4e3f885..d8acd532 100644 --- a/R/service-ollama.R +++ b/R/service-ollama.R @@ -49,25 +49,14 @@ body_to_json_str <- function(x) { ollama_perform_stream <- function(request, parser) { - request_body <- request %>% - purrr::pluck("body") - - request_url <- request %>% - purrr::pluck("url") - - request_handle <- curl::new_handle() %>% - curl::handle_setopt(postfields = body_to_json_str(request_body)) - - curl_response <- curl::curl_fetch_stream( - url = request_url, - handle = request_handle, - fun = function(x) parser$parse_ndjson(rawToChar(x)) - ) - - response_json( - url = curl_response$url, - method = "POST", - body = list(response = parser$lines) + req_perform_stream( + request, + callback = function(x) { + parser$parse_ndjson(rawToChar(x)) + TRUE + }, + buffer_kb = 0.01, + round = "line" ) } diff --git a/R/service-openai_streaming.R b/R/service-openai_streaming.R index 90664dd7..d45d19a7 100644 --- a/R/service-openai_streaming.R +++ b/R/service-openai_streaming.R @@ -13,59 +13,70 @@ #' By default, it is fetched from the "OPENAI_API_KEY" environment variable. #' Please note that the OpenAI API key is sensitive information and should be #' treated accordingly. -#' @return The same as `curl::curl_fetch_stream` +#' @return The same as `httr2::req_perform_stream` stream_chat_completion <- function(messages = list(list(role = "user", content = "Hi there!")), - element_callback = cat, + element_callback = openai_handler, model = "gpt-4o-mini", openai_api_key = Sys.getenv("OPENAI_API_KEY")) { # Set the API endpoint URL url <- paste0(getOption("gptstudio.openai_url"), "/chat/completions") - # Set the request headers - headers <- list( - "Content-Type" = "application/json", - "Authorization" = paste0("Bearer ", openai_api_key) - ) - - # Set the request body body <- list( "model" = model, "stream" = TRUE, "messages" = messages ) - # Create a new curl handle object - handle <- curl::new_handle() %>% - curl::handle_setheaders(.list = headers) %>% - curl::handle_setopt(postfields = jsonlite::toJSON(body, auto_unbox = TRUE)) # request body - # Make the streaming request using curl_fetch_stream() - curl::curl_fetch_stream( - url = url, - fun = function(x) { - element <- rawToChar(x) - element_callback(element) # Do whatever element_callback does - }, - handle = handle - ) + # Prepare the request + req <- request(url) %>% + req_headers( + "Content-Type" = "application/json", + "Authorization" = paste0("Bearer ", openai_api_key) + ) %>% + req_body_json(body) %>% + req_perform_stream( + callback = function(x) { + element <- rawToChar(x) + element_callback(element) + TRUE + }, + round = "line", + buffer_kb = 0.01 + ) } - +openai_handler <- function(x) { + lines <- stringr::str_split(x, "\n")[[1]] + lines <- lines[lines != ""] + lines <- stringr::str_replace_all(lines, "^data: ", "") + lines <- lines[lines != "[DONE]"] + if (length(lines) == 0) { + return() + } + json <- jsonlite::parse_json(lines) + if (!is.null(json$choices[[1]]$finish_reason)) { + return() + } else { + cat(json$choices[[1]]$delta$content) + } +} #' Stream handler for chat completions #' -#' R6 class that allows to handle chat completions chunk by chunk. -#' It also adds methods to retrieve relevant data. This class DOES NOT make the request. +#' R6 class that allows to handle chat completions chunk by chunk. It also adds +#' methods to retrieve relevant data. This class DOES NOT make the request. #' -#' Because `curl::curl_fetch_stream` blocks the R console until the stream finishes, -#' this class can take a shiny session object to handle communication with JS -#' without recurring to a `shiny::observe` inside a module server. +#' Because `httr2::req_perform_stream` blocks the R console until the stream +#' finishes, this class can take a shiny session object to handle communication +#' with JS without recurring to a `shiny::observe` inside a module server. #' #' @param session The shiny session it will send the message to (optional). -#' @param user_prompt The prompt for the chat completion. -#' Only to be displayed in an HTML tag containing the prompt. (Optional). -#' @param parsed_event An already parsed server-sent event to append to the events field. +#' @param user_prompt The prompt for the chat completion. Only to be displayed +#' in an HTML tag containing the prompt. (Optional). +#' @param parsed_event An already parsed server-sent event to append to the +#' events field. #' @importFrom R6 R6Class #' @importFrom jsonlite fromJSON OpenaiStreamParser <- R6::R6Class( # nolint @@ -92,7 +103,6 @@ OpenaiStreamParser <- R6::R6Class( # nolint append_parsed_sse = function(parsed_event) { # ----- here you can do whatever you want with the event data ----- if (is.null(parsed_event$data) || parsed_event$data == "[DONE]") { - cli::cli_alert_info("Skipping") return() } diff --git a/inst/WORDLIST b/inst/WORDLIST index 011ebb2f..fd48d2ae 100644 --- a/inst/WORDLIST +++ b/inst/WORDLIST @@ -59,6 +59,7 @@ grey httr huggingface instruc +js json linux magrittr diff --git a/inst/assets/css/audioRecorder.css b/inst/assets/css/audioRecorder.css new file mode 100644 index 00000000..a1381f05 --- /dev/null +++ b/inst/assets/css/audioRecorder.css @@ -0,0 +1,12 @@ +av-settings-menu { + display: block; + width: min-content; +} + +/* The normal treatment of .dropdown-item.active is a little too much */ +av-settings-menu .dropdown-item.active, +av-settings-menu .dropdown-menu > li > a.active { + color: inherit; + background-color: inherit; + font-weight: bold; +} diff --git a/man/OpenaiStreamParser.Rd b/man/OpenaiStreamParser.Rd index c9b1a9ee..603de9bb 100644 --- a/man/OpenaiStreamParser.Rd +++ b/man/OpenaiStreamParser.Rd @@ -9,12 +9,12 @@ Stream handler for chat completions Stream handler for chat completions } \details{ -R6 class that allows to handle chat completions chunk by chunk. -It also adds methods to retrieve relevant data. This class DOES NOT make the request. +R6 class that allows to handle chat completions chunk by chunk. It also adds +methods to retrieve relevant data. This class DOES NOT make the request. -Because \code{curl::curl_fetch_stream} blocks the R console until the stream finishes, -this class can take a shiny session object to handle communication with JS -without recurring to a \code{shiny::observe} inside a module server. +Because \code{httr2::req_perform_stream} blocks the R console until the stream +finishes, this class can take a shiny session object to handle communication +with JS without recurring to a \code{shiny::observe} inside a module server. } \section{Super class}{ \code{\link[SSEparser:SSEparser]{SSEparser::SSEparser}} -> \code{OpenaiStreamParser} @@ -60,8 +60,8 @@ Start a StreamHandler. Recommended to be assigned to the \code{stream_handler} n \describe{ \item{\code{session}}{The shiny session it will send the message to (optional).} -\item{\code{user_prompt}}{The prompt for the chat completion. -Only to be displayed in an HTML tag containing the prompt. (Optional).} +\item{\code{user_prompt}}{The prompt for the chat completion. Only to be displayed +in an HTML tag containing the prompt. (Optional).} } \if{html}{\out{}} } @@ -79,7 +79,8 @@ send a custom message to a shiny session, escaping shiny's reactivity. \subsection{Arguments}{ \if{html}{\out{
}} \describe{ -\item{\code{parsed_event}}{An already parsed server-sent event to append to the events field.} +\item{\code{parsed_event}}{An already parsed server-sent event to append to the +events field.} } \if{html}{\out{
}} } diff --git a/man/stream_chat_completion.Rd b/man/stream_chat_completion.Rd index 2fea3b43..25ed5736 100644 --- a/man/stream_chat_completion.Rd +++ b/man/stream_chat_completion.Rd @@ -6,7 +6,7 @@ \usage{ stream_chat_completion( messages = list(list(role = "user", content = "Hi there!")), - element_callback = cat, + element_callback = openai_handler, model = "gpt-4o-mini", openai_api_key = Sys.getenv("OPENAI_API_KEY") ) @@ -27,7 +27,7 @@ Please note that the OpenAI API key is sensitive information and should be treated accordingly.} } \value{ -The same as \code{curl::curl_fetch_stream} +The same as \code{httr2::req_perform_stream} } \description{ \code{stream_chat_completion} sends the prepared chat completion request to the