chore: optimised updateDependencyGraph code (#41117)

## Description
Added code optimisations around updateDependencyGraph by caching ast
parsing and made lower level code optimisations by using sets. Observed
a 40% reduction of updateDependencyGraph in a customer app. In addition
made optimisations to linkAffectedChildNodesToParent where we aren't
recomputing the result for the same node.
_or_  
Fixes `Issue URL`
> [!WARNING]  
> _If no issue exists, please create an issue first, and check with the
maintainers if the issue is valid._

## Automation

/ok-to-test tags="@tag.All"

### 🔍 Cypress test results
<!-- This is an auto-generated comment: Cypress test results  -->
> [!TIP]
> 🟢 🟢 🟢 All cypress tests have passed! 🎉 🎉 🎉
> Workflow run:
<https://github.com/appsmithorg/appsmith/actions/runs/16398467407>
> Commit: e5d8a165ac49fb205f5bb344979d09d1ebc2a225
> <a
href="https://internal.appsmith.com/app/cypress-dashboard/rundetails-65890b3c81d7400d08fa9ee5?branch=master&workflowId=16398467407&attempt=2"
target="_blank">Cypress dashboard</a>.
> Tags: `@tag.All`
> Spec:
> <hr>Sun, 20 Jul 2025 12:41:19 UTC
<!-- end of auto-generated comment: Cypress test results  -->


## Communication
Should the DevRel and Marketing teams inform users about this change?
- [ ] Yes
- [ ] No


<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
## Summary by CodeRabbit

## Summary by CodeRabbit

* **New Features**
* Improved dependency management with a new utility for comparing sets,
enhancing accuracy in tracking changes.
* **Chores**
* Optimized internal logic for handling dependencies to improve
performance and maintainability.
* Enhanced code parsing efficiency with caching to speed up repeated
analyses.
* Refined sorting logic to better handle duplicates and improve
processing speed.
<!-- end of auto-generated comment: release notes by coderabbit.ai -->
This commit is contained in:
Vemparala Surya Vamsi 2025-07-21 12:54:23 +05:30 committed by GitHub
parent 16ea831fac
commit 95c70aabb5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 71 additions and 40 deletions

View File

@ -403,29 +403,45 @@ export interface IdentifierInfo {
variables: string[];
isError: boolean;
}
// Extracted function to sanitize, wrap, parse code, and call ancestorWalk, now with caching
const sanitizedWrappedAncestorWalkCache = new Map<string, NodeList>();
function getSanitizedWrappedAncestorWalk(
code: string,
evaluationVersion: number,
): NodeList {
const cacheKey = `${evaluationVersion}::${code}`;
if (sanitizedWrappedAncestorWalkCache.has(cacheKey)) {
return sanitizedWrappedAncestorWalkCache.get(cacheKey)!;
}
const sanitizedScript = sanitizeScript(code, evaluationVersion);
// We sanitize and wrap the code because all code/script gets wrapped with a function during evaluation.
// Some syntax won't be valid unless they're at the RHS of a statement.
// Since we're assigning all code/script to RHS during evaluation, we do the same here.
// So that during ast parse, those errors are neglected.
// e.g. IIFE without braces:
// function() { return 123; }() -> is invalid
// let result = function() { return 123; }() -> is valid
const wrappedCode = wrapCode(sanitizedScript);
const ast = getAST(wrappedCode);
const result = ancestorWalk(ast);
sanitizedWrappedAncestorWalkCache.set(cacheKey, result);
return result;
}
export const extractIdentifierInfoFromCode = (
code: string,
evaluationVersion: number,
invalidIdentifiers?: Record<string, unknown>,
): IdentifierInfo => {
let ast: Node = { end: 0, start: 0, type: "" };
try {
const sanitizedScript = sanitizeScript(code, evaluationVersion);
/* wrapCode - Wrapping code in a function, since all code/script get wrapped with a function during evaluation.
Some syntax won't be valid unless they're at the RHS of a statement.
Since we're assigning all code/script to RHS during evaluation, we do the same here.
So that during ast parse, those errors are neglected.
*/
/* e.g. IIFE without braces
function() { return 123; }() -> is invalid
let result = function() { return 123; }() -> is valid
*/
const wrappedCode = wrapCode(sanitizedScript);
ast = getAST(wrappedCode);
const { functionalParams, references, variableDeclarations }: NodeList =
ancestorWalk(ast);
getSanitizedWrappedAncestorWalk(code, evaluationVersion);
const referencesArr = Array.from(references).filter((reference) => {
// To remove references derived from declared variables and function params,
// We extract the topLevelIdentifier Eg. Api1.name => Api1

View File

@ -96,19 +96,26 @@ export class DependencyMapUtils {
) {
const dependencies = dependencyMap.rawDependencies;
// We don't want to process the same node multiple times
// STEP 1: Collect all unique nodes that need processing
const nodesToProcess = new Set<string>();
for (const [node, deps] of dependencies.entries()) {
if (affectedSet.has(node)) {
DependencyMapUtils.makeParentsDependOnChild(dependencyMap, node);
nodesToProcess.add(node); // Just add to set, don't call function yet
}
deps.forEach((dep) => {
for (const dep of deps) {
if (affectedSet.has(dep)) {
DependencyMapUtils.makeParentsDependOnChild(dependencyMap, dep);
nodesToProcess.add(dep); // Just add to set, don't call function yet
}
}
});
}
return dependencyMap;
// STEP 2: Process each unique node exactly once
for (const nodeToProcess of nodesToProcess) {
DependencyMapUtils.makeParentsDependOnChild(dependencyMap, nodeToProcess);
}
}
static makeParentsDependOnChild = (

View File

@ -1,5 +1,4 @@
import { difference } from "lodash";
import { isChildPropertyPath } from "utils/DynamicBindingUtils";
import { isChildPropertyPath, getDifferences } from "utils/DynamicBindingUtils";
export type TDependencies = Map<string, Set<string>>;
export default class DependencyMap {
@ -107,9 +106,9 @@ export default class DependencyMap {
const newNodeDependencies = validDependencies;
// dependencies removed from path
const removedNodeDependencies = difference(
Array.from(previousNodeDependencies),
Array.from(newNodeDependencies),
const removedNodeDependencies = getDifferences(
previousNodeDependencies,
newNodeDependencies,
);
// Remove node from the inverseDependencies of removed deps
@ -122,9 +121,9 @@ export default class DependencyMap {
const newNodeInvalidDependencies = invalidDependencies;
// invalid dependencies removed from path
const removedNodeInvalidDependencies = difference(
Array.from(previousNodeInvalidDependencies),
Array.from(newNodeInvalidDependencies),
const removedNodeInvalidDependencies = getDifferences(
previousNodeInvalidDependencies,
newNodeInvalidDependencies,
);
// Remove node from the inverseDependencies of removed invalidDeps

View File

@ -640,3 +640,13 @@ export function getEntityName(
if (isJSAction(entity)) return entityConfig.name;
}
export function getDifferences<T>(a: Set<T>, b: Set<T>): T[] {
const diff: T[] = [];
for (const val of a) {
if (!b.has(val)) diff.push(val);
}
return diff;
}

View File

@ -77,7 +77,6 @@ import {
isObject,
isUndefined,
set,
union,
unset,
} from "lodash";
@ -1008,30 +1007,30 @@ export default class DataTreeEvaluator {
changes: Array<string>,
inverseMap: Record<string, string[]>,
): Array<string> {
let finalSortOrder: Array<string> = [];
let computeSortOrder = true;
// Initialize parents with the current sent of property paths that need to be evaluated
let parents = changes;
let subSortOrderArray: Array<string>;
let visitedNodes: string[] = [];
const visitedNodesSet = new Set<string>();
// Remove duplicates from this list. Since we explicitly walk down the tree and implicitly (by fetching parents) walk
// up the tree, there are bound to be many duplicates.
const uniqueKeysInSortOrder = new Set<string>();
while (computeSortOrder) {
// Get all the nodes that would be impacted by the evaluation of the nodes in parents array in sorted order
subSortOrderArray = this.getEvaluationSortOrder(parents, inverseMap);
visitedNodes = union(visitedNodes, parents);
// Add all the sorted nodes in the final list
finalSortOrder = union(finalSortOrder, subSortOrderArray);
// Add all parents and subSortOrderArray nodes to their respective sets
for (const node of parents) visitedNodesSet.add(node);
for (const node of subSortOrderArray) uniqueKeysInSortOrder.add(node);
parents = getImmediateParentsOfPropertyPaths(subSortOrderArray);
// If we find parents of the property paths in the sorted array, we should continue finding all the nodes dependent
// on the parents
computeSortOrder = difference(parents, visitedNodes).length > 0;
computeSortOrder = parents.some((parent) => !visitedNodesSet.has(parent));
}
// Remove duplicates from this list. Since we explicitly walk down the tree and implicitly (by fetching parents) walk
// up the tree, there are bound to be many duplicates.
const uniqueKeysInSortOrder = new Set(finalSortOrder);
// if a property path evaluation gets triggered by diff top order changes
// this could lead to incorrect sort order in spite of the bfs traversal
const sortOrderPropertyPaths: string[] = [];