Parallel Test - How to implement different Thresholds for different scenario

I want to execute multiple k6 test scripts in parallel. They are in separate files with their separate thresholds. I am developing custom thresholds for each API to monitor their duration, waiting, and blocked time. Now some of the API’s are common in all the tests scripts files.

// Test file 1

import { check, group } from 'k6';
import "./libs/shim/core.js";
import "./libs/shim/urijs.js";
import http from "k6/http";
import { Rate, Trend, Counter } from 'k6/measureMetrics';
import { sleep } from 'k6';
import {measureMetric} from "./measureMetric.js";
 
 
 //const iWaitTime = 3000;
 let API1 = new measureMetric("API1");
 let API2 = new measureMetric("API2");
 let API3 = new measureMetric("API3");
 
 
 export let options = {
     thresholds: {    
     
     http_req_failed: ['rate<0.01'],   // 99% availability 
     http_req_waiting: ['p(95)<3000'], // 99% latency
 
     API1_Duration: ['p(95)<3000'],
     API1_Blocked: ['p(95)<3000'],
     API1_Waiting: ['p(95)<3000'],
     'checks{scenario1:scene1}': ['rate0.9'],  //90% checks success
         
     
     API2_Duration: ['p(95)<3000'],
     API2_Blocked: ['p(95)<3000'],
     API2_Waiting: ['p(95)<3000'],
     'checks{scenario1:scene2}': ['rate0.9'],  //rate of successful checks should be higher than 90% for      
 
     'group_duration{group:::grpAPI}' : ['avg < 5000'],
     'checks{scenario1:files}': ['rate0.9'],
     checks: ['rate0.9'], //rate of successful checks should be higher than 90%
     'checks{scenario1:scene3}': ['rate0.9'],
 
 
     API3_Duration: ['p(95)<3000'],
     API3_Blocked: ['p(95)<3000'],
     API3_Waiting: ['p(95)<3000'],
},
   
 };
 
 const url = "https://abcd.com";
 const params = {
   headers: {
     'Authorization':'abcs',    
     'Content-Type': 'application/json'
   }};
 
 
 export default function() {
 
   let accountNo;
   //1.api 1 
   let name = { 'Act1' };
   let endPoint = "/abcd/";  
   let res = http.post(url+endPoint, JSON.stringify(name), params);
   check(res, {
     'is status 201': (r) = r.status === 201,
   },
   {scenario1 : 'scene1'},
   );
   
   API1.trackDuration(res);
   API1.trackBlocked(res);
   API1.trackWait(res);  
   accountNo = res.json().accountNo;  
   console.log(res.body);
   
 
   //2.API 2
 
   data = {"abcd"};
 
   endPoint = "abcder";
   res = http.put(url+endPoint, JSON.stringify(data), params);
   console.log(res.body);
   check(res, {
     'is status 201': (r) = r.status === 201,
   },
   {scenario1 : 'scene2'},
   );
   API2.trackDuration(res);
   API2.trackBlocked(res);
   API2.trackWait(res);
 
   sleep(5);
   
   //13. API 3
   data = {"abcd"};
 
   endPoint = "ghggh";   
   res = http.put(url+endPoint,JSON.stringify(data), params);  
   console.log("edit account"); 
   check(res, {
     'is status 201': (r) = r.status === 201,
   },
   {scenario1 : 'scene3'},
   );
   API3.trackDuration(res);
   API3.trackBlocked(res);
   API3.trackWait(res);   
 
   }

//Test 2 file

 import { check, group } from 'k6';
 import "./libs/shim/core.js";
 import "./libs/shim/urijs.js";
 import http from "k6/http";
 import { Rate, Trend, Counter } from 'k6/metrics';
 import { sleep } from 'k6';
 import {measureMetric} from "./measureMetric.js";
 
 
 //const iWaitTime = 3000;
 let API1 = new measureMetric("API1");
 let API2 = new measureMetric("API2");
 let API4 = new measureMetric("API4");
 
 
 export let options = {
     thresholds: {    
     
     http_req_failed: ['rate<0.01'],   // 99% availability 
     http_req_waiting: ['p(95)<3000'], // 99% latency
 
     API1_Duration: ['p(95)<5000'],
     API1_Blocked: ['p(95)<5000'],
     API1_Waiting: ['p(95)<5000'],
     'checks{scenario1:scene1}': ['rate0.9'],  //90% checks success
         
     
     API2_Duration: ['p(95)<1000'],
     API2_Blocked: ['p(95)<1000'],
     API2_Waiting: ['p(95)<1000'],
     'checks{scenario1:scene2}': ['rate0.9'],  //rate of successful checks should be higher than 90% 
 
     'group_duration{group:::grpAPI}' : ['avg < 5000'],
     'checks{scenario1:files}': ['rate0.9'],
     checks: ['rate0.9'], //rate of successful checks should be higher than 90%
     'checks{scenario1:xx}': ['rate0.9'],
 
 
     API4_Duration: ['p(95)<3000'],
     API4_Blocked: ['p(95)<3000'],
     API4_Waiting: ['p(95)<3000'],
 
   },
   
 };
 
 const url = "https://abcd.com";
 const params = {
   headers: {
     'Authorization':'abcs',    
     'Content-Type': 'application/json'
   }};
 
 
 export default function() {
 
   let accountNo;
   //1.api 1 
   let name = { 'Act1' };
   let endPoint = "/abcd/";  
   let res = http.post(url+endPoint, JSON.stringify(name), params);
   check(res, {
     'is status 201': (r) = r.status === 201,
   },
   {scenario1 : 'scene1'},
   );
   
   API1.trackDuration(res);
   API1.trackBlocked(res);
   API1.trackWait(res);  
   accountNo = res.json().accountNo;  
   console.log(res.body);
   
 
   //2.API 2
 
   data = {"abcd"};
 
   endPoint = "abcder";
   res = http.put(url+endPoint, JSON.stringify(data), params);
   console.log(res.body);
   check(res, {
     'is status 201': (r) = r.status === 201,
   },
   {scenario1 : 'scene2'},
   );
   API2.trackDuration(res);
   API2.trackBlocked(res);
   API2.trackWait(res);
 
   sleep(5);
   
   //13. API 3
   data = {"abcd"};
 
   endPoint = "ghggh";   
   res = http.put(url+endPoint,JSON.stringify(data), params);  
   console.log("edit account"); 
   check(res, {
     'is status 201': (r) = r.status === 201,
   },
   {scenario1 : 'scene4'},
   );
   API4.trackDuration(res);
   API4.trackBlocked(res);
   API4.trackWait(res);   
 
   }

How do I run them in parallel ? I want to track each common api (like which test/scenario ran it. It will help me to develop dashboards) . How do I do that? I developed following parallel test but its not sufficient. When I run it it gives me the breakup of API 1 but not the others. So I don’t know what is happening?

Can anyone help?

 import "./libs/shim/core.js";
 import "./libs/shim/urijs.js";
 import "./Test1.js";
 import "./Test2.js";
 export {default as script1} from './Test1.js';
 export {default as script2} from './Test2.js';
 export {options as options1} from './Test1.js';
 export {options as options2} from './Test2.js';
 
 export let options = {
     scenarios: {
         'scenario1': {
             executor: 'constant-vus',
             vus: 10,            
             duration: '1m',
             exec: 'script1',
         },
         'scenario2': {
             executor: 'per-vu-iterations',
 			vus: 10,
 			iterations: 20,
 			maxDuration: '10m',
             exec: 'script2',
         },
     }
 }

Can anyone please help?

Hi @Jay

Currently you are running 2 scenarios starting at the same time and I think that can be considered “parallel”. It must be noted, however, that all metrics are tagged with scenarios by default so you could use that in your thresholds. E.g.:

// instead of
// API1_Duration: ['p(95)<3000'],
// use:
'API1_Duration{scenario:scenario1}': ['p(95)<3000'],

This should make thresholds specific per scenario, instead of general for the test run.
Relevant docs are here.
Would that help?

Otherwise, it’s not fully clear what exactly is “not sufficient” from your perspective: could you please add the output you get and what you expect to see instead of it? Also, in your example, it’s not clear how exactly you’re setting thresholds given that you have two sets of them and neither is being set in the test options.

1 Like

Thanks so much. That is helpful.