Skip to content

< Back


DefaultSamplingPipeline

Namespace: LLama.Sampling

An implementation of ISamplePipeline which mimics the default llama.cpp sampling

1
public class DefaultSamplingPipeline : BaseSamplingPipeline, ISamplingPipeline, System.IDisposable

Inheritance ObjectBaseSamplingPipelineDefaultSamplingPipeline
Implements ISamplingPipeline, IDisposable
Attributes NullableContextAttribute, NullableAttribute

Properties

LogitBias

Bias values to add to certain logits

1
public IReadOnlyDictionary<LLamaToken, float> LogitBias { get; set; }

Property Value

IReadOnlyDictionary<LLamaToken, Single>

RepeatPenalty

Repetition penalty, as described in https://arxiv.org/abs/1909.05858

1
public float RepeatPenalty { get; set; }

Property Value

Single

FrequencyPenalty

Frequency penalty as described by OpenAI: https://platform.openai.com/docs/api-reference/chat/create
Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.

1
public float FrequencyPenalty { get; set; }

Property Value

Single

PresencePenalty

Presence penalty as described by OpenAI: https://platform.openai.com/docs/api-reference/chat/create
Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.

1
public float PresencePenalty { get; set; }

Property Value

Single

PenaltyCount

How many tokens should be considered for penalties

1
public int PenaltyCount { get; set; }

Property Value

Int32

PenalizeNewline

Whether the newline token should be protected from being modified by penalty

1
public bool PenalizeNewline { get; set; }

Property Value

Boolean

PreventEOS

Whether the EOS token should be suppressed. Setting this to 'true' prevents EOS from being sampled

1
public bool PreventEOS { get; set; }

Property Value

Boolean

Temperature

Temperature to apply (higher temperature is more "creative")

1
public float Temperature { get; set; }

Property Value

Single

TopK

Number of tokens to keep in TopK sampling

1
public int TopK { get; set; }

Property Value

Int32

TypicalP

P value for locally typical sampling

1
public float TypicalP { get; set; }

Property Value

Single

TopP

P value for TopP sampling

1
public float TopP { get; set; }

Property Value

Single

MinP

P value for MinP sampling

1
public float MinP { get; set; }

Property Value

Single

Grammar

Grammar to apply to constrain possible tokens

1
public Grammar Grammar { get; set; }

Property Value

Grammar

MinKeep

The minimum number of tokens to keep for samplers which remove tokens

1
public int MinKeep { get; set; }

Property Value

Int32

Seed

Seed to use for random sampling

1
public uint Seed { get; set; }

Property Value

UInt32

GrammarOptimization

Selected grammar optimization mode

1
public GrammarOptimizationMode GrammarOptimization { get; set; }

Property Value

GrammarOptimizationMode

Constructors

DefaultSamplingPipeline()

1
public DefaultSamplingPipeline()

Methods

Dispose()

1
public void Dispose()

Reset()

1
public void Reset()

Accept(LLamaToken)

1
public void Accept(LLamaToken token)

Parameters

token LLamaToken

CreateChain(SafeLLamaContextHandle)

1
protected SafeLLamaSamplerChainHandle CreateChain(SafeLLamaContextHandle context)

Parameters

context SafeLLamaContextHandle

Returns

SafeLLamaSamplerChainHandle

Sample(SafeLLamaContextHandle, Int32)

1
public LLamaToken Sample(SafeLLamaContextHandle ctx, int index)

Parameters

ctx SafeLLamaContextHandle

index Int32

Returns

LLamaToken


< Back