Batch Operations
Handle high-volume messaging with batch operations for sending messages, importing contacts, and managing templates.
Overview
When dealing with large volumes:
- Batch message sending - Send to multiple recipients in one request
- Bulk contact import - Import thousands of contacts efficiently
- Rate limit management - Stay within API limits
- Queue architecture - Process large jobs asynchronously
Large-Scale Sending Flow
Batch Message Sending
Multiple Recipients
Send the same message to up to 1000 recipients in one request:
curl -X POST "https://api.sent.dm/v3/messages" \
-H "x-api-key: $SENT_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"to": [
"+1234567890",
"+1987654321",
"+1555555555"
],
"template": {
"id": "tmpl_announcement",
"parameters": {
"announcement": "New feature launched!"
}
},
"channel": ["sms", "whatsapp"]
}'const response = await client.messages.send({
to: [
'+1234567890',
'+1987654321',
'+1555555555'
// Up to 1000 recipients
],
template: {
id: 'tmpl_announcement',
parameters: {
announcement: 'New feature launched!'
}
}
});
// Track individual message IDs
const messageIds = response.data.recipients.map(r => r.message_id);
console.log(`Sent ${messageIds.length} messages`);response = client.messages.send(
to=[
"+1234567890",
"+1987654321",
"+1555555555"
# Up to 1000 recipients
],
template={
"id": "tmpl_announcement",
"parameters": {
"announcement": "New feature launched!"
}
}
)
message_ids = [r.message_id for r in response.data.recipients]
print(f"Sent {len(message_ids)} messages")response, err := client.Messages.Send(context.Background(), sentdm.MessageSendParams{
To: []string{
"+1234567890",
"+1987654321",
"+1555555555",
// Up to 1000 recipients
},
Channel: []string{"sms", "whatsapp"},
Template: sentdm.MessageSendParamsTemplate{
ID: sentdm.String("tmpl_announcement"),
Parameters: map[string]interface{}{
"announcement": "New feature launched!",
},
},
})
// Track individual message IDs
messageIDs := make([]string, len(response.Data.Recipients))
for i, r := range response.Data.Recipients {
messageIDs[i] = r.MessageID
}
fmt.Printf("Sent %d messages\n", len(messageIDs))MessageSendParams params = MessageSendParams.builder()
.addTo("+1234567890")
.addTo("+1987654321")
.addTo("+1555555555")
.addChannel("sms")
.addChannel("whatsapp")
.template(MessageSendParams.Template.builder()
.id("tmpl_announcement")
.parameters(MessageSendParams.Template.Parameters.builder()
.putAdditionalProperty("announcement", JsonValue.from("New feature launched!"))
.build())
.build())
.build();
var response = client.messages().send(params);
// Track individual message IDs
List<String> messageIds = response.data().recipients().stream()
.map(r -> r.messageId())
.toList();
System.out.println("Sent " + messageIds.size() + " messages");MessageSendParams parameters = new()
{
To = new List<string> {
"+1234567890",
"+1987654321",
"+1555555555"
// Up to 1000 recipients
},
Channels = new List<string> { "sms", "whatsapp" },
Template = new MessageSendParamsTemplate
{
Id = "tmpl_announcement",
Parameters = new Dictionary<string, string>
{
{ "announcement", "New feature launched!" }
}
}
};
var response = await client.Messages.SendAsync(parameters);
// Track individual message IDs
var messageIds = response.Data.Recipients.Select(r => r.MessageId).ToList();
Console.WriteLine($"Sent {messageIds.Count} messages");$result = $client->messages->send(
to: [
'+1234567890',
'+1987654321',
'+1555555555'
// Up to 1000 recipients
],
template: [
'id' => 'tmpl_announcement',
'parameters' => [
'announcement' => 'New feature launched!'
]
],
channels: ['sms', 'whatsapp']
);
// Track individual message IDs
$message_ids = array_map(fn($r) => $r->message_id, $result->data->recipients);
echo "Sent " . count($message_ids) . " messages\n";result = sent_dm.messages.send(
to: [
"+1234567890",
"+1987654321",
"+1555555555"
# Up to 1000 recipients
],
template: {
id: "tmpl_announcement",
parameters: {
announcement: "New feature launched!"
}
},
channels: ["sms", "whatsapp"]
)
# Track individual message IDs
message_ids = result.data.recipients.map(&:message_id)
puts "Sent #{message_ids.length} messages"Batch Response
{
"success": true,
"data": {
"status": "QUEUED",
"template_id": "7ba7b820-9dad-11d1-80b4-00c04fd430c8",
"template_name": "product_announcement",
"recipients": [
{ "message_id": "msg_001", "to": "+1234567890", "channel": "sms" },
{ "message_id": "msg_002", "to": "+1987654321", "channel": "whatsapp" },
{ "message_id": "msg_003", "to": "+1555555555", "channel": "sms" }
]
},
"error": null,
"meta": {
"request_id": "req_batch_001",
"timestamp": "2026-03-04T11:28:25.2096416+00:00",
"version": "v3"
}
}Batch requests count against rate limits per request (not per recipient). Each POST /v3/messages call counts as one request toward the 200 req/min limit, regardless of how many recipients are included.
Large-Scale Sending (10,000+ Recipients)
For campaigns with tens of thousands of recipients, implement a queue-based approach:
// batchProcessor.ts
async function sendCampaign(recipients: string[], templateId: string) {
const BATCH_SIZE = 1000; // Maximum recipients per request
const DELAY_BETWEEN_BATCHES = 1000; // 1 second
const results = {
sent: 0,
failed: 0,
messageIds: []
};
// Process in batches
for (let i = 0; i < recipients.length; i += BATCH_SIZE) {
const batch = recipients.slice(i, i + BATCH_SIZE);
try {
const response = await client.messages.send({
to: batch,
template: { id: templateId }
});
// In sandbox mode, all recipients are accepted. Use webhooks for actual delivery status
results.sent += response.data.recipients.length;
results.messageIds.push(...response.data.recipients.map(r => r.message_id));
console.log(`Batch ${i / BATCH_SIZE + 1} complete: ${batch.length} messages`);
// Rate limiting delay (except for last batch)
if (i + BATCH_SIZE < recipients.length) {
await sleep(DELAY_BETWEEN_BATCHES);
}
} catch (error) {
console.error(`Batch ${i / BATCH_SIZE + 1} failed:`, error);
results.failed += batch.length;
}
}
return results;
}
// Usage
const recipients = await getAllCustomerPhoneNumbers(); // 5000 numbers
const results = await sendCampaign(recipients, 'tmpl_monthly_newsletter');
console.log(`Campaign complete: ${results.sent} sent, ${results.failed} failed`);Bulk Contact Import
CSV Import
Format your CSV file:
phone_number
+1234567890
+1987654321
+1555555555Import script:
import { parse } from 'csv-parse';
import fs from 'fs';
async function importContacts(csvPath: string) {
const parser = fs.createReadStream(csvPath).pipe(parse({
columns: true,
skip_empty_lines: true
}));
const results = { created: 0, failed: 0, errors: [] };
const BATCH_SIZE = 50;
let batch = [];
for await (const record of parser) {
batch.push({
phoneNumber: record.phone_number
});
if (batch.length >= BATCH_SIZE) {
const result = await processBatch(batch);
results.created += result.created;
results.failed += result.failed;
results.errors.push(...result.errors);
batch = [];
// Rate limit protection
await sleep(100);
}
}
// Process remaining
if (batch.length > 0) {
const result = await processBatch(batch);
results.created += result.created;
results.failed += result.failed;
}
return results;
}
async function processBatch(batch: any[]) {
const results = { created: 0, failed: 0, errors: [] };
await Promise.all(batch.map(async (contact) => {
try {
await client.contacts.create(contact);
results.created++;
} catch (error) {
results.failed++;
results.errors.push({ contact, error: error.message });
}
}));
return results;
}Rate Limit Management
Understanding Limits
| Endpoint | Limit | Window |
|---|---|---|
POST /v3/messages | 200 requests (10 for sensitive endpoints) | 1 minute |
POST /v3/contacts | 200 requests | 1 minute |
GET /v3/* | 200 requests | 1 minute |
Implementing Rate Limiting
class RateLimiter {
private tokens: number;
private lastRefill: number;
private readonly maxTokens: number;
private readonly refillRate: number;
constructor(maxTokens: number, refillRate: number) {
this.maxTokens = maxTokens;
this.tokens = maxTokens;
this.refillRate = refillRate;
this.lastRefill = Date.now();
}
async acquire(): Promise<void> {
this.refill();
if (this.tokens >= 1) {
this.tokens--;
return;
}
// Wait for token
const waitTime = (1 - this.tokens) * (60000 / this.refillRate);
await sleep(waitTime);
return this.acquire();
}
private refill() {
const now = Date.now();
const elapsed = now - this.lastRefill;
const tokensToAdd = (elapsed / 60000) * this.refillRate;
this.tokens = Math.min(this.maxTokens, this.tokens + tokensToAdd);
this.lastRefill = now;
}
}
// Usage
const limiter = new RateLimiter(200, 200); // 200 requests per minute (standard)
async function sendWithRateLimit(phoneNumber: string, templateId: string) {
await limiter.acquire();
return client.messages.send({ to: [phoneNumber], template: { id: templateId } });
}Handling Rate Limit Errors
async function sendWithBackoff(phoneNumber: string, templateId: string, maxRetries = 3) {
for (let attempt = 0; attempt <= maxRetries; attempt++) {
try {
return await client.messages.send({ to: [phoneNumber], template: { id: templateId } });
} catch (error) {
if (error.status === 429) {
// Get retry-after header or use exponential backoff
const retryAfter = error.headers['retry-after'] || Math.pow(2, attempt);
console.log(`Rate limited. Waiting ${retryAfter} seconds...`);
await sleep(retryAfter * 1000);
continue;
}
throw error;
}
}
throw new Error('Max retries exceeded');
}Queue-Based Architecture
For production systems handling high volume:
// messageQueue.ts
import { Queue } from 'bullmq';
const messageQueue = new Queue('messages', {
connection: { host: 'localhost', port: 6379 }
});
// Add message to queue
export async function queueMessage(phoneNumber: string, templateId: string) {
await messageQueue.add('send-message', {
phoneNumber,
templateId
}, {
attempts: 3,
backoff: { type: 'exponential', delay: 5000 },
removeOnComplete: 100
});
}
// Worker process (separate service)
import { Worker } from 'bullmq';
const worker = new Worker('messages', async (job) => {
const { phoneNumber, templateId } = job.data;
try {
const result = await client.messages.send({
to: [phoneNumber],
template: { id: templateId }
});
// Store message ID for tracking
await db.messages.create({
phoneNumber,
templateId,
sentMessageId: result.data.recipients[0].message_id,
status: 'pending'
});
return result;
} catch (error) {
// Log for investigation
await db.failedMessages.create({
phoneNumber,
templateId,
error: error.message,
timestamp: new Date()
});
throw error;
}
}, {
connection: { host: 'localhost', port: 6379 },
concurrency: 10 // Process 10 messages concurrently
});Performance Optimization
Connection Pooling
import { Agent } from 'https';
const httpsAgent = new Agent({
keepAlive: true,
maxSockets: 50,
maxFreeSockets: 10,
timeout: 60000,
freeSocketTimeout: 30000
});
const client = new SentDm({
httpAgent: httpsAgent
});Batching with Async Generators
async function* batchGenerator<T>(items: T[], batchSize: number) {
for (let i = 0; i < items.length; i += batchSize) {
yield items.slice(i, i + batchSize);
}
}
// Usage
const recipients = await getAllRecipients();
for await (const batch of batchGenerator(recipients, 1000)) {
await client.messages.send({
to: batch.map(r => r.phoneNumber),
template: { id: 'tmpl_announcement' }
});
}Monitoring Bulk Operations
Progress Tracking
async function sendWithProgress(recipients: string[], templateId: string) {
const total = recipients.length;
let processed = 0;
for (let i = 0; i < recipients.length; i += 1000) {
const batch = recipients.slice(i, i + 1000);
await client.messages.send({
to: batch,
template: { id: templateId }
});
processed += batch.length;
const percent = ((processed / total) * 100).toFixed(1);
console.log(`Progress: ${processed}/${total} (${percent}%)`);
}
}Metrics
metrics.counter('batch_send_total', { status: 'success' });
metrics.counter('batch_send_total', { status: 'failed' });
metrics.histogram('batch_size');
metrics.histogram('batch_duration_seconds');Best Practices
1. Always Handle Partial Failures
const response = await client.messages.send({ to: recipients, template: { id } });
// Note: Response only confirms acceptance. Subscribe to webhooks for delivery status
// For sandbox mode testing, you may want to validate numbers before sending
if (sandbox && response.data.recipients.length < recipients.length) {
console.warn('Some recipients were rejected');
}2. Implement Idempotency
const idempotencyKey = `campaign_${campaignId}_${batchNumber}`;
// Send via Idempotency-Key header, not request body
await client.messages.send({...}, { headers: { 'Idempotency-Key': idempotencyKey } });3. Monitor Your Queue Depth
const queueStats = await messageQueue.getJobCounts();
if (queueStats.waiting > 10000) {
await alertTeam('High queue depth - consider scaling workers');
}4. Test with Small Batches First
// Test with 10 recipients
const testResults = await sendCampaign(recipients.slice(0, 10), templateId);
if (testResults.failed === 0) {
// Proceed with full campaign
await sendCampaign(recipients, templateId);
}