chore: Datasource configuration migration to storage scaffold (#23666)

This PR is a skeleton for DS migration to storage
This commit is contained in:
Nidhi 2023-05-24 18:47:24 +05:30 committed by GitHub
parent 1dc13270c6
commit 6bbe054e9d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 184 additions and 2 deletions

View File

@ -0,0 +1,153 @@
package com.appsmith.server.migrations.db.ce;
import com.appsmith.external.models.Datasource;
import com.appsmith.external.models.DatasourceStorage;
import com.appsmith.external.models.QDatasource;
import com.appsmith.server.constants.FieldName;
import com.appsmith.server.domains.Workspace;
import com.appsmith.server.migrations.solutions.DatasourceStorageMigrationSolution;
import com.appsmith.server.migrations.utils.CompatibilityUtils;
import lombok.extern.slf4j.Slf4j;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.data.mongodb.core.MongoTemplate;
import org.springframework.data.mongodb.core.query.Criteria;
import org.springframework.data.mongodb.core.query.Query;
import org.springframework.data.mongodb.core.query.Update;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static com.appsmith.server.repositories.ce.BaseAppsmithRepositoryCEImpl.fieldName;
import static org.springframework.data.mongodb.core.query.Criteria.where;
import static org.springframework.data.mongodb.core.query.Query.query;
/**
* In this migration, we are moving configurations from each valid datasource
* to a new collection. The datasource will cease to have configurations after this point
*/
@Slf4j
//@ChangeUnit(order = "012", id = "migrate-configurations-to-data-storage", author = " ")
public class Migration012TransferToDatasourceStorage {
private final MongoTemplate mongoTemplate;
private final String migrationFlag = "hasDatasourceStorage";
private final DatasourceStorageMigrationSolution solution = new DatasourceStorageMigrationSolution();
public Migration012TransferToDatasourceStorage(MongoTemplate mongoTemplate) {
this.mongoTemplate = mongoTemplate;
}
// @RollbackExecution
public void rollbackExecution() {
// We're handling rollbacks using marker fields, so we don't need to implement this
}
// @Execution
public void executeMigration() {
// First fetch all datasource ids and workspace ids for datasources that
// do not have `hasDatasourceStorage` value set as true
final Query datasourceQuery = query(findDatasourceIdsToUpdate()).cursorBatchSize(1024);
datasourceQuery.fields().include(
fieldName(QDatasource.datasource.id),
fieldName(QDatasource.datasource.workspaceId));
final Query performanceOptimizedQuery = CompatibilityUtils
.optimizeQueryForNoCursorTimeout(mongoTemplate, datasourceQuery, Datasource.class);
final List<Datasource> datasourcesWithIds =
mongoTemplate.find(performanceOptimizedQuery, Datasource.class);
// Prepare a map of datasourceId to workspaceId
final Map<String, String> datasourceIdMap = datasourcesWithIds.stream().parallel()
.collect(Collectors.toMap(Datasource::getId, Datasource::getWorkspaceId));
// Fetch all environment ids and workspace ids that are default in their workspaces
// Store them in a map of workspaceId to environmentId
Map<String, String> environmentsMap = solution.getDefaultEnvironmentsMap(mongoTemplate);
// Now go back to streaming through each datasource that has
// `hasDatasourceStorage` value set as true
Query datasourcesToUpdateQuery = query(findDatasourceIdsToUpdate()).cursorBatchSize(1024);
datasourcesToUpdateQuery.fields().include(
fieldName(QDatasource.datasource.id),
fieldName(QDatasource.datasource.workspaceId),
fieldName(QDatasource.datasource.isConfigured),
fieldName(QDatasource.datasource.gitSyncId),
fieldName(QDatasource.datasource.invalids),
fieldName(QDatasource.datasource.hasDatasourceStorage),
fieldName(QDatasource.datasource.datasourceConfiguration)
);
final Query performanceOptimizedUpdateQuery = CompatibilityUtils
.optimizeQueryForNoCursorTimeout(mongoTemplate, datasourcesToUpdateQuery, Datasource.class);
mongoTemplate
.stream(performanceOptimizedUpdateQuery, Datasource.class)
.forEach(datasource -> {
// For each of these datasources, we want to build a new datasource storage
// Fetch the correct environment id to use with this datasource storage
String environmentId = solution
.getEnvironmentIdForDatasource(environmentsMap, datasource.getWorkspaceId());
// If none exists, this is an error scenario, log the error and skip the datasource
if (environmentId == null) {
log.error("Could not find default environment id for workspace id: {}, skipping datasource id: {}",
datasource.getWorkspaceId(), datasource.getId());
return;
}
DatasourceStorage datasourceStorage = new DatasourceStorage(datasource, environmentId);
log.debug("Creating datasource storage for datasource id: {} with environment id: {}",
datasource.getId(), environmentId);
// Insert the populated datasource storage into database
try {
mongoTemplate.insert(datasourceStorage);
} catch (DuplicateKeyException e) {
log.warn("Looks like the datasource storage already exists for datasource id: {}",
datasource.getId());
log.warn("We will attempt to reset the datasource again.");
}
// Once the datasource storage exists, delete the older config inside datasource
// And set `hasDatasourceStorage` value to true
mongoTemplate.updateFirst(
new Query()
.addCriteria(where(fieldName(QDatasource.datasource.id)).is(datasource.getId())),
new Update()
.set(migrationFlag, true)
.unset(fieldName(QDatasource.datasource.datasourceConfiguration))
.unset(fieldName(QDatasource.datasource.invalids))
.unset(fieldName(QDatasource.datasource.isConfigured)),
Datasource.class);
});
}
private Criteria findDatasourceIdsToUpdate() {
return new Criteria().andOperator(
// Check for migration flag
new Criteria().orOperator(
where(migrationFlag).exists(false),
where(migrationFlag).is(false)
),
//Older check for deleted
new Criteria().orOperator(
where(FieldName.DELETED).exists(false),
where(FieldName.DELETED).is(false)
),
//New check for deleted
new Criteria().orOperator(
where(FieldName.DELETED_AT).exists(false),
where(FieldName.DELETED_AT).is(null)
)
);
}
}

View File

@ -0,0 +1,9 @@
package com.appsmith.server.migrations.solutions;
import com.appsmith.server.migrations.solutions.ce.DatasourceStorageMigrationSolutionCE;
import lombok.NoArgsConstructor;
@NoArgsConstructor
public class DatasourceStorageMigrationSolution extends DatasourceStorageMigrationSolutionCE {
}

View File

@ -0,0 +1,18 @@
package com.appsmith.server.migrations.solutions.ce;
import com.appsmith.server.constants.FieldName;
import org.springframework.data.mongodb.core.MongoTemplate;
import java.util.Map;
public class DatasourceStorageMigrationSolutionCE {
public Map<String, String> getDefaultEnvironmentsMap(MongoTemplate mongoTemplate) {
return Map.of();
}
public String getEnvironmentIdForDatasource(Map<String, String> wsIdToEnvIdMap,
String workspaceId) {
return FieldName.UNUSED_ENVIRONMENT_ID;
}
}

View File

@ -173,7 +173,8 @@ class ActionExecutionSolutionCEImplTest {
@Test
public void testExecuteAction_withoutExecuteActionDTOPart_failsValidation() {
final Mono<ActionExecutionResult> actionExecutionResultMono = actionExecutionSolution.executeAction(Flux.empty(), null, null);
final Mono<ActionExecutionResult> actionExecutionResultMono = actionExecutionSolution
.executeAction(Flux.empty(), null, FieldName.UNUSED_ENVIRONMENT_ID);
StepVerifier
.create(actionExecutionResultMono)
@ -198,7 +199,8 @@ class ActionExecutionSolutionCEImplTest {
final Flux<Part> partsFlux = BodyExtractors.toParts()
.extract(mock, this.context);
final Mono<ActionExecutionResult> actionExecutionResultMono = actionExecutionSolution.executeAction(partsFlux, null, null);
final Mono<ActionExecutionResult> actionExecutionResultMono = actionExecutionSolution
.executeAction(partsFlux, null, FieldName.UNUSED_ENVIRONMENT_ID);
StepVerifier
.create(actionExecutionResultMono)