Overview
The GetBill API implements rate limiting to ensure fair usage and maintain service quality for all users. Rate limits are applied per OAuth client and are based on a sliding window algorithm.
Current Rate Limits
Standard Endpoints 1,000 requests per hour Applies to most read operations like listing debts, getting followups, etc.
Write Operations 500 requests per hour Applies to create, update, and delete operations.
Rate limits may be adjusted based on your subscription plan and usage patterns. Contact support for higher limits if needed.
Every API response includes rate limit information in the headers:
X-RateLimit-Limit: 1000
X-RateLimit-Remaining: 847
X-RateLimit-Reset: 1641993600
X-RateLimit-Window: 3600
The total number of requests allowed in the current window
The number of requests remaining in the current window
Unix timestamp when the rate limit window resets
The length of the rate limit window in seconds
Rate Limit Exceeded Response
When you exceed the rate limit, you’ll receive a 429 Too Many Requests response:
{
"error" : true ,
"message" : "Rate limit exceeded. Please try again later." ,
"code" : 429 ,
"details" : {
"retry_after" : 1800 ,
"limit" : 1000 ,
"reset_time" : "2024-01-15T10:30:00Z"
}
}
Number of seconds to wait before making another request
The rate limit that was exceeded
ISO 8601 timestamp when the rate limit resets
Handling Rate Limits
class RateLimitAwareClient {
constructor ( accessToken ) {
this . accessToken = accessToken ;
this . rateLimitRemaining = null ;
this . rateLimitReset = null ;
}
async makeRequest ( url , options = {}) {
// Check if we're close to rate limit
if ( this . rateLimitRemaining !== null && this . rateLimitRemaining < 10 ) {
const waitTime = this . getWaitTime ();
if ( waitTime > 0 ) {
console . log ( `Rate limit low, waiting ${ waitTime } ms` );
await this . sleep ( waitTime );
}
}
const response = await fetch ( url , {
... options ,
headers: {
'Authorization' : `Bearer ${ this . accessToken } ` ,
'Content-Type' : 'application/json' ,
... options . headers
}
});
// Update rate limit info from headers
this . updateRateLimitInfo ( response . headers );
if ( response . status === 429 ) {
const error = await response . json ();
const retryAfter = error . details ?. retry_after || 60 ;
console . log ( `Rate limited, waiting ${ retryAfter } seconds` );
await this . sleep ( retryAfter * 1000 );
return this . makeRequest ( url , options ); // Retry
}
return response ;
}
updateRateLimitInfo ( headers ) {
this . rateLimitRemaining = parseInt ( headers . get ( 'X-RateLimit-Remaining' )) || null ;
this . rateLimitReset = parseInt ( headers . get ( 'X-RateLimit-Reset' )) || null ;
}
getWaitTime () {
if ( ! this . rateLimitReset ) return 0 ;
const now = Math . floor ( Date . now () / 1000 );
const resetTime = this . rateLimitReset ;
return Math . max ( 0 , ( resetTime - now ) * 1000 );
}
sleep ( ms ) {
return new Promise ( resolve => setTimeout ( resolve , ms ));
}
}
2. Implement Exponential Backoff
async function makeRequestWithBackoff ( url , options , maxRetries = 3 ) {
for ( let i = 0 ; i < maxRetries ; i ++ ) {
try {
const response = await fetch ( url , options );
if ( response . status === 429 ) {
const backoffTime = Math . min ( 1000 * Math . pow ( 2 , i ), 60000 ); // Max 60 seconds
console . log ( `Rate limited, backing off for ${ backoffTime } ms` );
await new Promise ( resolve => setTimeout ( resolve , backoffTime ));
continue ;
}
return response ;
} catch ( error ) {
if ( i === maxRetries - 1 ) throw error ;
const backoffTime = 1000 * Math . pow ( 2 , i );
await new Promise ( resolve => setTimeout ( resolve , backoffTime ));
}
}
}
3. Queue Requests
For high-volume applications, implement a request queue:
class RequestQueue {
constructor ( rateLimit = 1000 , windowMs = 3600000 ) {
this . queue = [];
this . processing = false ;
this . rateLimit = rateLimit ;
this . windowMs = windowMs ;
this . requestTimes = [];
}
async enqueue ( requestFn ) {
return new Promise (( resolve , reject ) => {
this . queue . push ({ requestFn , resolve , reject });
this . processQueue ();
});
}
async processQueue () {
if ( this . processing || this . queue . length === 0 ) return ;
this . processing = true ;
while ( this . queue . length > 0 ) {
if ( ! this . canMakeRequest ()) {
const waitTime = this . getWaitTime ();
console . log ( `Rate limit reached, waiting ${ waitTime } ms` );
await this . sleep ( waitTime );
continue ;
}
const { requestFn , resolve , reject } = this . queue . shift ();
try {
this . recordRequest ();
const result = await requestFn ();
resolve ( result );
} catch ( error ) {
reject ( error );
}
}
this . processing = false ;
}
canMakeRequest () {
this . cleanupOldRequests ();
return this . requestTimes . length < this . rateLimit ;
}
recordRequest () {
this . requestTimes . push ( Date . now ());
}
cleanupOldRequests () {
const cutoff = Date . now () - this . windowMs ;
this . requestTimes = this . requestTimes . filter ( time => time > cutoff );
}
getWaitTime () {
if ( this . requestTimes . length === 0 ) return 0 ;
const oldestRequest = Math . min ( ... this . requestTimes );
const windowEnd = oldestRequest + this . windowMs ;
return Math . max ( 0 , windowEnd - Date . now ());
}
sleep ( ms ) {
return new Promise ( resolve => setTimeout ( resolve , ms ));
}
}
// Usage
const queue = new RequestQueue ();
// Queue multiple requests
const promises = [
queue . enqueue (() => api . getDebt ( 'id1' )),
queue . enqueue (() => api . getDebt ( 'id2' )),
queue . enqueue (() => api . getDebt ( 'id3' ))
];
const results = await Promise . all ( promises );
Best Practices
Batch Operations When possible, batch multiple operations into single requests to reduce API calls.
Cache Responses Cache API responses when appropriate to reduce redundant requests.
Use Webhooks Use webhooks for real-time updates instead of frequent polling.
Optimize Pagination Use larger page sizes (up to 100) to reduce the number of requests needed.
Efficient Data Fetching
// ✅ Good: Batch requests and use appropriate page sizes
async function getRecentDebts () {
const response = await api . getDebts ({
page: 1 ,
limit: 100 ,
created_after: '2024-01-01'
});
return response . data ;
}
// ❌ Bad: Multiple small requests
async function getRecentDebts () {
const results = [];
for ( let i = 1 ; i <= 10 ; i ++ ) {
const response = await api . getDebts ({ page: i , limit: 10 });
results . push ( ... response . data );
}
return results ;
}
Caching Strategy
class CachedAPIClient {
constructor ( accessToken , cacheTimeout = 300000 ) { // 5 minutes
this . client = new GetBillAPI ( accessToken );
this . cache = new Map ();
this . cacheTimeout = cacheTimeout ;
}
async getDebt ( id ) {
const cacheKey = `debt: ${ id } ` ;
const cached = this . cache . get ( cacheKey );
if ( cached && Date . now () - cached . timestamp < this . cacheTimeout ) {
return cached . data ;
}
const debt = await this . client . getDebt ( id );
this . cache . set ( cacheKey , {
data: debt ,
timestamp: Date . now ()
});
return debt ;
}
invalidateCache ( pattern ) {
for ( const key of this . cache . keys ()) {
if ( key . includes ( pattern )) {
this . cache . delete ( key );
}
}
}
}
Rate Limit Tiers
Different subscription plans may have different rate limits:
Plan Read Operations Write Operations Burst Limit Starter 500/hour 100/hour 10/minute Professional 1,000/hour 500/hour 50/minute Enterprise 5,000/hour 2,000/hour 200/minute
Burst limits allow short periods of higher activity but are enforced over a 1-minute window.
Monitoring Usage
Track your API usage to optimize requests:
class UsageTracker {
constructor () {
this . requestCount = 0 ;
this . startTime = Date . now ();
}
trackRequest ( endpoint , method ) {
this . requestCount ++ ;
console . log ( `Request # ${ this . requestCount } : ${ method } ${ endpoint } ` );
if ( this . requestCount % 100 === 0 ) {
this . reportUsage ();
}
}
reportUsage () {
const elapsed = Date . now () - this . startTime ;
const rate = ( this . requestCount / elapsed ) * 3600000 ; // Requests per hour
console . log ( `Current rate: ${ rate . toFixed ( 2 ) } requests/hour` );
}
}
Getting Higher Limits
If you need higher rate limits:
Optimize your current usage - Implement caching, batching, and efficient pagination
Contact support - Reach out to contact@getbill.io with:
Your current usage patterns
Business justification for higher limits
Details about your integration
Consider webhooks - Use webhooks for real-time updates instead of polling
Upgrade your plan - Higher-tier plans include increased rate limits
Status Monitoring
Monitor rate limit status programmatically:
class RateLimitMonitor {
constructor ( apiClient ) {
this . apiClient = apiClient ;
this . metrics = {
requests: 0 ,
rateLimited: 0 ,
averageRemaining: 0
};
}
async monitoredRequest ( url , options ) {
this . metrics . requests ++ ;
try {
const response = await this . apiClient . makeRequest ( url , options );
const remaining = parseInt ( response . headers . get ( 'X-RateLimit-Remaining' ));
if ( ! isNaN ( remaining )) {
this . updateAverageRemaining ( remaining );
}
return response ;
} catch ( error ) {
if ( error . status === 429 ) {
this . metrics . rateLimited ++ ;
}
throw error ;
}
}
updateAverageRemaining ( remaining ) {
const alpha = 0.1 ; // Smoothing factor
this . metrics . averageRemaining =
alpha * remaining + ( 1 - alpha ) * this . metrics . averageRemaining ;
}
getHealthScore () {
if ( this . metrics . requests === 0 ) return 1 ;
const rateLimitedRatio = this . metrics . rateLimited / this . metrics . requests ;
const remainingRatio = Math . max ( 0 , this . metrics . averageRemaining / 1000 );
return Math . max ( 0 , 1 - rateLimitedRatio ) * remainingRatio ;
}
}