Skip to content
This repository was archived by the owner on Feb 27, 2025. It is now read-only.

Commit 6d9e49f

Browse files
committed
minor fix
1 parent ef94937 commit 6d9e49f

File tree

1 file changed

+6
-4
lines changed

1 file changed

+6
-4
lines changed

src/main/scala/com/microsoft/sqlserver/jdbc/spark/utils/BulkCopyUtils.scala

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -293,25 +293,27 @@ object BulkCopyUtils extends Logging {
293293
val autoCols = getAutoCols(conn, dbtable)
294294

295295
val columnsToWriteSet = columnsToWrite.split(",").toSet
296+
logDebug(s"columnsToWrite: $columnsToWriteSet")
296297

297298
val prefix = "Spark Dataframe and SQL Server table have differing"
298299

299300
// auto columns should not exist in df
300301
assertIfCheckEnabled(dfCols.length + autoCols.length == tableCols.length, strictSchemaCheck,
301302
s"${prefix} numbers of columns")
302303

303-
if (columnsToWriteSet.isEmpty()) {
304-
val result = new Array[ColumnMetadata](tableCols.length - autoCols.length)
305-
} else {
304+
// if columnsToWrite provided by user, use it for metadata mapping. If not, use sql table.
305+
if (columnsToWrite == "") {
306306
val result = new Array[ColumnMetadata](columnsToWriteSet.size)
307+
} else {
308+
val result = new Array[ColumnMetadata](tableCols.length - autoCols.length)
307309
}
308310

309311
var nonAutoColIndex = 0
310312

311313
for (i <- 0 to tableCols.length-1) {
312314
val tableColName = tableCols(i).name
313315
var dfFieldIndex = -1
314-
if (!columnsToWriteSet.isEmpty() && !columnsToWriteSet.contains(tableColName)) {
316+
if (!columnsToWriteSet.isEmpty && !columnsToWriteSet.contains(tableColName)) {
315317
// if columnsToWrite provided, and column name not in it, skip column mapping and ColumnMetadata
316318
logDebug(s"skipping col index $i col name $tableColName, user not provided in columnsToWrite list")
317319
} else if (autoCols.contains(tableColName)) {

0 commit comments

Comments
 (0)