sendMessage method

Future<MessageModel> sendMessage(
  1. MessageModel request
)

Implementation

Future<MessageModel> sendMessage(MessageModel request) async {
  try {
    http.Response response = (await client.post(
      Uri.parse("${serverUrl}chatbot/"),
      body: json.encode(request.toJson()),
      headers: <String, String>{
        'Content-Type': 'application/json; charset=UTF-8',
        'assistant': prefs.read(GS_KEY_ASSISTANT_ID)
      },
    ))
        .withUTFEncoding();

    if (response.statusCode == 200) {
      final responseData =
          AiResponseModel.fromJson(json.decode(response.body));
      request.threadId = responseData.threadId ?? "";
      return MessageModel(request.message, request.userId, request.threadId,
          timestampInMillis: request.timestampInMillis,
          response: responseData);
    } else {
      return MessageModel(
        request.message,
        request.userId,
        request.threadId,
        timestampInMillis: request.timestampInMillis,
        response: AiResponseModel(
            id: request.response?.id,
            userId: request.response?.userId,
            threadId: request.response?.threadId,
            gptResponse: request.response?.gptResponse),
      );
    }
  } on Exception catch (e) {
    Fimber.e(e.toString());
    return MessageModel(
      request.message,
      request.userId,
      request.threadId,
      timestampInMillis: request.timestampInMillis,
      response: AiResponseModel(
          id: request.response?.id,
          userId: request.response?.userId,
          threadId: request.response?.threadId,
          gptResponse: request.response?.gptResponse),
    );
  } on Error catch (e) {
    Fimber.e(e.stackTrace.toString());
    return MessageModel(
      request.message,
      request.userId,
      request.threadId,
      timestampInMillis: request.timestampInMillis,
      response: AiResponseModel(
          id: request.response?.id,
          userId: request.response?.userId,
          threadId: request.response?.threadId,
          gptResponse: request.response?.gptResponse),
    );
  }
}