@@ -25,9 +25,8 @@ import org.apache.hudi.common.config.{HoodieMetadataConfig, RecordMergeMode, Typ
25
25
import org .apache .hudi .common .model .{DefaultHoodieRecordPayload , HoodieRecordMerger , HoodieRecordPayload , HoodieTableType , OverwriteWithLatestAvroPayload , TableServiceType }
26
26
import org .apache .hudi .common .table .{HoodieTableConfig , HoodieTableMetaClient , HoodieTableVersion }
27
27
import org .apache .hudi .common .table .timeline .InstantComparison .{compareTimestamps , GREATER_THAN_OR_EQUALS }
28
- import org .apache .hudi .common .table .timeline .versioning .TimelineLayoutVersion
29
28
import org .apache .hudi .common .util .{Option , StringUtils }
30
- import org .apache .hudi .config .{HoodieArchivalConfig , HoodieCleanConfig , HoodieCompactionConfig , HoodieLockConfig , HoodieWriteConfig }
29
+ import org .apache .hudi .config .{HoodieCleanConfig , HoodieCompactionConfig , HoodieLockConfig , HoodieWriteConfig }
31
30
import org .apache .hudi .keygen .NonpartitionedKeyGenerator
32
31
import org .apache .hudi .keygen .constant .KeyGeneratorType
33
32
import org .apache .hudi .table .upgrade .{SparkUpgradeDowngradeHelper , UpgradeDowngrade }
@@ -286,94 +285,6 @@ class TestSevenToEightUpgrade extends RecordLevelIndexTestBase {
286
285
assertEquals(1 , df2.filter(" price = 35" ).count())
287
286
}
288
287
289
- @ Test
290
- def testV6TableUpgradeToV9ToV6 (): Unit = {
291
- val partitionFields = " partition:simple"
292
-
293
- val hudiOptsV6 = commonOpts ++ Map (
294
- TABLE_TYPE .key -> HoodieTableType .COPY_ON_WRITE .name(),
295
- KEYGENERATOR_CLASS_NAME .key -> KeyGeneratorType .CUSTOM .getClassName,
296
- PARTITIONPATH_FIELD .key -> partitionFields,
297
- " hoodie.metadata.enable" -> " true" ,
298
- PAYLOAD_CLASS_NAME .key -> classOf [OverwriteWithLatestAvroPayload ].getName,
299
- RECORD_MERGE_MODE .key -> RecordMergeMode .COMMIT_TIME_ORDERING .name,
300
- HoodieWriteConfig .WRITE_TABLE_VERSION .key -> " 6" ,
301
- HoodieWriteConfig .TIMELINE_LAYOUT_VERSION_NUM .key() -> Integer .toString(TimelineLayoutVersion .VERSION_1 ),
302
- HoodieWriteConfig .AUTO_UPGRADE_VERSION .key -> " false" ,
303
- HoodieMetadataConfig .COMPACT_NUM_DELTA_COMMITS .key -> " 15" ,
304
- HoodieCleanConfig .CLEANER_COMMITS_RETAINED .key -> " 3" ,
305
- HoodieArchivalConfig .MIN_COMMITS_TO_KEEP .key -> " 4" ,
306
- HoodieArchivalConfig .MAX_COMMITS_TO_KEEP .key -> " 5"
307
- )
308
-
309
- doWriteAndValidateDataAndRecordIndex(hudiOptsV6,
310
- operation = INSERT_OPERATION_OPT_VAL ,
311
- saveMode = SaveMode .Overwrite ,
312
- validate = false )
313
-
314
- for (i <- 1 to 10 ) {
315
- doWriteAndValidateDataAndRecordIndex(hudiOptsV6,
316
- operation = INSERT_OPERATION_OPT_VAL ,
317
- saveMode = SaveMode .Append ,
318
- validate = false )
319
- }
320
- metaClient = getLatestMetaClient(true )
321
-
322
- assertEquals(HoodieTableVersion .SIX , metaClient.getTableConfig.getTableVersion)
323
- assertEquals(" partition" , HoodieTableConfig .getPartitionFieldPropForKeyGenerator(metaClient.getTableConfig).get())
324
-
325
- val archivePath = new org.apache.hudi.storage.StoragePath (metaClient.getArchivePath, " .commits_.archive*" )
326
- val archivedFiles = metaClient.getStorage.globEntries(archivePath)
327
- println(s " Archived files found ${archivedFiles.size()}" )
328
-
329
- metaClient = HoodieTableMetaClient .builder()
330
- .setBasePath(basePath)
331
- .setConf(storage.getConf())
332
- .build()
333
-
334
- val hudiOptsUpgrade = hudiOptsV6 ++ Map (
335
- HoodieWriteConfig .WRITE_TABLE_VERSION .key -> HoodieTableVersion .current().versionCode().toString
336
- ) - HoodieWriteConfig .AUTO_UPGRADE_VERSION .key
337
-
338
- doWriteAndValidateDataAndRecordIndex(hudiOptsUpgrade,
339
- operation = UPSERT_OPERATION_OPT_VAL ,
340
- saveMode = SaveMode .Append ,
341
- validate = false )
342
-
343
- metaClient = HoodieTableMetaClient .builder()
344
- .setBasePath(basePath)
345
- .setConf(storage.getConf())
346
- .build()
347
-
348
- assertEquals(HoodieTableVersion .current(), metaClient.getTableConfig.getTableVersion)
349
- assertEquals(partitionFields, HoodieTableConfig .getPartitionFieldPropForKeyGenerator(metaClient.getTableConfig).get())
350
-
351
- val archivedFilesAfterUpgrade = metaClient.getStorage.globEntries(archivePath)
352
-
353
- assertTrue(archivedFilesAfterUpgrade.size() > 0 ,
354
- " Even after upgrade, fresh table with ~12 commits should have archived files" )
355
-
356
- val hudiOptsDowngrade = hudiOptsV6 ++ Map (
357
- HoodieWriteConfig .WRITE_TABLE_VERSION .key -> HoodieTableVersion .SIX .versionCode().toString
358
- )
359
-
360
- new UpgradeDowngrade (metaClient, getWriteConfig(hudiOptsDowngrade, basePath), context, SparkUpgradeDowngradeHelper .getInstance)
361
- .run(HoodieTableVersion .SIX , null )
362
-
363
- metaClient = HoodieTableMetaClient .builder()
364
- .setBasePath(basePath)
365
- .setConf(storage.getConf())
366
- .build()
367
-
368
- assertEquals(HoodieTableVersion .SIX , metaClient.getTableConfig.getTableVersion)
369
- assertEquals(" partition" , HoodieTableConfig .getPartitionFieldPropForKeyGenerator(metaClient.getTableConfig).get())
370
-
371
- val v6ArchivePath = new org.apache.hudi.storage.StoragePath (metaClient.getArchivePath, " .commits_.archive*" )
372
- val v6ArchivedFiles = metaClient.getStorage.globEntries(v6ArchivePath)
373
-
374
- assertTrue(v6ArchivedFiles.size() > 0 , " Downgrade should have archived files in V6 format" )
375
- }
376
-
377
288
private def getWriteConfig (hudiOpts : Map [String , String ], basePath : String ): HoodieWriteConfig = {
378
289
val props = TypedProperties .fromMap(hudiOpts.asJava)
379
290
HoodieWriteConfig .newBuilder()
0 commit comments