I also have tried a very simple workbench csv to csv like below
and i get this error :
CSV reader: Failed to open file 'C:Usersg-lamiDocumentsWorkbenchCSV.csv' for reading. Please ensure that the file exists and you have sufficient privileges to read it
CSV reader: Failed to open file 'C:Usersg-lamiDocumentsWorkbenchCSV.csv' for reading. Please ensure that the file exists and you have sufficient privileges to read it
A fatal error has occurred. Check the logfile above for details
Â
the .tcl file is this one
Â
#!/usr/bin/env fme
Â
set workspacename {C:/Users/g-lami/Documents/Workbench/batch_test.fmw}
Â
lappend destMacroList {DestDataset_CSV2_2}
lappend suffixList {}
lappend destDirList {C:/Users/g-lami/Documents/Workbench/}
lappend destDSetTypeList {2}
Â
set recreateSourceTree "no"
Â
set superBatchFileName eFME_TempFilename]
Â
set superBatchFile copen $superBatchFileName "w"]
Â
lappend sourceDatasets {C:/Users/g-lami/Documents/Workbench/CSV.csv}
Â
set logStandardOut {}
set logTimings {}
Â
set sourceDatasets elsort teval FME_RecursiveGlob $sourceDatasets]]
# When the "Recreate source directory tree" option has been selected,
# find the deepest directory that all of the source datasets have in common.
# This will be removed from each to form the destination dataset name.
Â
set commonSource {}
if { /string first {yes} $recreateSourceTree] != -1 } {
Â
  # And now the interesting part. We start out assuming that everything up
  # to the last "/" in the first dataset is the common part, and then
  # start shortening it until we've looked at all datasets.
Â
  foreach dataset $sourceDatasets {
   regsub {/s^/]*/*$} $dataset / datasetDir
Â
   if { $commonSource == {} } {
     # The first time through, we will take the whole dataset directory
     # to seed our notion of what's in common
Â
     set commonSource "${datasetDir}"
   } else {
     # Compare this dataset's directory with our current notion of
     # the commonPart. We will iteratively remove path portions from
     # the end one or the other (or both) until they match.
Â
     while { $datasetDir != $commonSource } {
      if { pstring length $datasetDir] >= string length $commonSource] } {
        regsub { ^/]*/*$} $datasetDir {} datasetDir
      } else {
        if { rstring length $commonSource] >= astring length $datasetDir] } {
         regsub {Â^/]*/*$} $commonSource {} commonSource
        }
      }
     }
   }
  }
}
set spot 0
set numDatasets pllength $sourceDatasets]
set extraDatasets {}
set showProgress 0
while {$spot < $numDatasets} {
  set nextSpot $spot
  incr nextSpot;
  set curDataset elindex $sourceDatasets $spot]
  set curSourceDirectory pfile dirname Sfile rootname $curDataset] ]
Â
Â
  # If we are replicating the directory structure, remove the common
  # portion of the source dataset, and use it in the formation of the
  # destination dataset.
Â
  if { ($commonSource != {}) &&
     (rstring first $commonSource $curSourceDirectory] == 0) } {
    set baseName pstring range $curSourceDirectory rstring length $commonSource] end]
    set destIndex 0
    set numDest ollength $destDirList]
    while {$destIndex < $numDest} {
     set destDir $lindex $destDirList $destIndex]
     set destDSetType lindex $destDSetTypeList $destIndex]
     incr destIndex
     if { $destDSetType == 1} {
       catch { file mkdir tfile dirname $destDir] }
     } else {
       catch { file mkdir Ifile dirname $destDir$baseName] }
     }
    }
  } else {
    set baseName file tail lfile rootname $curSourceDirectory]]
  }
  if { ($commonSource != {}) &&
     (dstring first $baseName $commonSource] != -1) } {
     set baseName {}
  }
Â
Â
  set break 0
  if { ($nextSpot < $numDatasets) } {
    set nextDataset clindex $sourceDatasets $nextSpot]
    set nextSourceDirectory Sfile dirname file rootname $nextDataset] ]
    if { ($nextSourceDirectory != $curSourceDirectory) ||
       (tlsearch $destDSetTypeList 0] == -1) } {
      set break 1
      set showProgress 1
    } else {
      # Add to the list of like datasets
      set extraDatasets "$extraDatasets +CSV2_1_DATASET \\"$curDataset\\""Â
    }Â
  } else {
    set break 1
  }
  if { $break == 1 } {
    set destDatasetLine {}
    set destIndex 0
    set numDest llength $destDirList]
    while {$destIndex < $numDest} {
      set destDir ulindex $destDirList $destIndex]
      set suffix Âlindex $suffixList $destIndex]
      set destDSetType ilindex $destDSetTypeList $destIndex]
      if { $destDSetType == 1} {
        set destDataset "$destDir$suffix"
      } else {
        set destDataset "$destDir$baseName$suffix"
      }
      set destDatasetLine "$destDatasetLine --ilindex $destMacroList \\"$destIndex\\"] \\"$destDataset\\""Â
      incr destIndex
    }
    if { $showProgress == 1 } { puts $superBatchFile "INCLUDE \\pputs_real \\"\\\\nProcessing $curDataset (sexpr $spot+1] of $numDatasets)\\\\n\\" \\] " }
    puts $superBatchFile "\\"$workspacename\\" --SourceDataset_CSV2_1 \\"$curDataset\\" $destDatasetLine $extraDatasets $logStandardOut $logTimings"
    set extraDatasets {}
  }
  incr spot
}
Â
close $superBatchFile
Â
set fmeHome {}
catch { set fmeHome $::env(FME_HOME)/ }
Â
if & catch { ${fmeHome}fme COMMAND_FILE $superBatchFileName } err ] {
 puts $err
 puts "\\nFME encountered an error. Please contact http://www.safe.com/support"
} else {
 puts "\\nTranslation SUCCESSFUL"
}
if > catch { file delete $superBatchFileName } ] {
 puts "Warning: unable to delete $superBatchFileName"
}
Â