Skip to content

ai.conf

The following are the spec and example files for ai.conf.

# This file contains settings and values that you can use to configure
# the Lynx AI Agent for Splunk.

[install]

is_configured = <boolean>
* Specifies whether the app is already configured on this search head.
* When set to false, opening the app will redirect the user to the configuration page.
* Default: false

[backend]

server = <string>
* The server hosting the backend API for the agent.
* Can include a port number if needed.
* Default: api.lynxai.dev

schema = <http|https>
* Schema to use for the backend API.
* HTTP is unencrypted and not recommended.
* Default: https

onprem_mode = <boolean>
* Set this to true if the specified backend server is hosted on-premises.
* If true, this requires that you also set at least one AI model stanza (see below).
* Default: false

[systemRules]

RULE-<name> = <string>
* A rule which is applied to all user conversations.
* From the point of view of the agent, the rule appears as the first message in the conversation.

[riskyCommands]

allow_list = <comma-separated list>
* Comma-separated list of risky commands the agent may be allowed to execute.
* Supported allow-list entries:
* map,collect,mcollect,meventcollect,outputcsv,outputlookup
* Explicitly disallowed:
* delete,dump,run,runshellscript,script,sendalert,sendemail,tscollect
* Everything in both lists is blocked unless added here
* Default: None

[riskyCommand::<command>]
* Use this stanza style to mark additional Splunk commands as risky.
* You will have to specify a category and a description for the UI to display.
* You can have multiple instances of this stanza.

category = <string>
* Category for the custom risky command.
* This will be used in the title of the risky command notification.

description = <string>
* Description for the custom risky command.
* Explain why this command should not be auto-executed by the agent.

[aiModel::<unique_name>]
* Only required if onprem_mode is set to true.
* Set this with an internal name for the model.

name = <string>
* This is the model name the backend will use to request data from the inference engine.
* Set this to the model name as it is served by vLLM or LiteLLM.

display_name = <string>
* Model name to display in the UI.
* This is the name that will be shown to the user.

reasoning = <boolean>
* Whether the model is expected to output reasoning tokens in the response.
* Note: The actual reasoning support is determined by the inference engine.

context_window = <integer>
* This value is used to show the current context fullness in the chat conversation.
* Not actually used by the backend, but displayed in the UI.

[format]

auto_format_spl_query = <boolean>
* Toggles automatic formatting of SPL queries in chat responses.
* When enabled, the agent:
* 1. Inserts newlines after each pipe.
* 2. Normalizes spacing around pipes.
* 3. Removes trailing pipes.
* Default: true

[search]

earliest_time = <string>
* The default earliest time for search execution.
* Supported format: [+|-]<time_integer><time_unit>@<time_unit>
* Default: -15m@m

latest_time = <string>
* The default latest time for search execution.
* Supported format: [+|-]<time_integer><time_unit>@<time_unit>
* Default: now
[systemRules]
RULE-emdash = Avoid using em dashes (—) in responses
RULE-timepicker = Dashboards must always include a time picker

[riskyCommands]
allow_list = collect,outputlookup

[riskyCommand::foreach]
category = Performance Impact
description = Can cause significant performance degradation

[aiModel::glm-4.7]
name = z-ai/glm-4.7
display_name = GLM 4.7
reasoning = false
context_window = 128000

[aiModel::glm-4.7-thinking]
name = z-ai/glm-4.7
display_name = GLM 4.7
reasoning = true
context_window = 200000

[search]
earliest_time = -24h@h
latest_time = now