<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>idea Re: Parquet Format for ADLS gen2 Endpoint in Suggest an Idea</title>
    <link>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1923355#M9316</link>
    <description>&lt;P&gt;No ETA yet, we are wrapping up the Tech Preview of Parquet on S#, and any feedback from that will be completed and then work on certifying ADLS will happen. This is a high priority item for us as I mentioned and we will hopefully have something to show soon after the May release.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;Bob V&lt;/P&gt;</description>
    <pubDate>Wed, 27 Apr 2022 12:33:17 GMT</pubDate>
    <dc:creator>bobvecchione</dc:creator>
    <dc:date>2022-04-27T12:33:17Z</dc:date>
    <item>
      <title>Parquet Format for ADLS gen2 Endpoint</title>
      <link>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idi-p/1901679</link>
      <description>&lt;P&gt;Hello Team,&lt;/P&gt;
&lt;P&gt;We request to add a new option to the file format under the file attributes in ADLS Gen2 Endpoint as the current two options (CSV / JSON) are not sufficient for our organization.&lt;/P&gt;
&lt;P&gt;We are facing multiple issues with tables that have special characters and using CSV format as delimiter are not segregating the data properly.&amp;nbsp;&lt;/P&gt;
&lt;P&gt;And for both option, we see that the file size grows tremendously.&lt;/P&gt;
&lt;P&gt;Parquet format supporting would bring tremendous value to our DI process.&lt;/P&gt;
&lt;P&gt;Thank you!&lt;/P&gt;</description>
      <pubDate>Mon, 07 Mar 2022 08:54:10 GMT</pubDate>
      <guid>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idi-p/1901679</guid>
      <dc:creator>zhangsq24</dc:creator>
      <dc:date>2022-03-07T08:54:10Z</dc:date>
    </item>
    <item>
      <title>Re: Parquet Format for ADLS gen2 Endpoint</title>
      <link>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1901923#M8882</link>
      <description>&lt;P&gt;Hi - We currently have plans to support this in the near future (I will update once I have a batter idea of timing). We are soon (in a couple of weeks) releasing Parquet file support for S3 and will follow that up with ADLS.&lt;/P&gt;&lt;P&gt;Bob V&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Mon, 07 Mar 2022 15:57:58 GMT</pubDate>
      <guid>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1901923#M8882</guid>
      <dc:creator>bobvecchione</dc:creator>
      <dc:date>2022-03-07T15:57:58Z</dc:date>
    </item>
    <item>
      <title>Re: Parquet Format for ADLS gen2 Endpoint</title>
      <link>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1905040#M8957</link>
      <description>&lt;P&gt;Thanks, if there is a pre-testing, please let us join beta test.&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Tue, 15 Mar 2022 01:42:01 GMT</pubDate>
      <guid>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1905040#M8957</guid>
      <dc:creator>zhangsq24</dc:creator>
      <dc:date>2022-03-15T01:42:01Z</dc:date>
    </item>
    <item>
      <title>Re: Parquet Format for ADLS gen2 Endpoint</title>
      <link>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1905766#M8981</link>
      <description>&lt;P&gt;Hi guys,&lt;/P&gt;
&lt;P&gt;We are also looking forward for support of&amp;nbsp;&lt;SPAN&gt;Parquet Format for ADLS gen2 Endpoint. Can you please give us a date from when it will be available? And does it require Replicate application version upgrade?&amp;nbsp;&lt;/SPAN&gt;&lt;/P&gt;
&lt;P&gt;Cheers,&lt;/P&gt;
&lt;P&gt;Bhargav.&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Wed, 16 Mar 2022 06:55:53 GMT</pubDate>
      <guid>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1905766#M8981</guid>
      <dc:creator>Bhargav_Ranjit</dc:creator>
      <dc:date>2022-03-16T06:55:53Z</dc:date>
    </item>
    <item>
      <title>Re: Parquet Format for ADLS gen2 Endpoint</title>
      <link>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1923137#M9309</link>
      <description>&lt;P&gt;Any updates on when we can expect parquet support with ADLS?&amp;nbsp;&lt;/P&gt;
&lt;P&gt;It's more than a month since my last follow up &lt;span class="lia-unicode-emoji" title=":slightly_smiling_face:"&gt;🙂&lt;/span&gt;&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Wed, 27 Apr 2022 06:39:05 GMT</pubDate>
      <guid>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1923137#M9309</guid>
      <dc:creator>Bhargav_Ranjit</dc:creator>
      <dc:date>2022-04-27T06:39:05Z</dc:date>
    </item>
    <item>
      <title>Re: Parquet Format for ADLS gen2 Endpoint</title>
      <link>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1923355#M9316</link>
      <description>&lt;P&gt;No ETA yet, we are wrapping up the Tech Preview of Parquet on S#, and any feedback from that will be completed and then work on certifying ADLS will happen. This is a high priority item for us as I mentioned and we will hopefully have something to show soon after the May release.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;Bob V&lt;/P&gt;</description>
      <pubDate>Wed, 27 Apr 2022 12:33:17 GMT</pubDate>
      <guid>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1923355#M9316</guid>
      <dc:creator>bobvecchione</dc:creator>
      <dc:date>2022-04-27T12:33:17Z</dc:date>
    </item>
    <item>
      <title>Re: Parquet Format for ADLS gen2 Endpoint</title>
      <link>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1966185#M10147</link>
      <description>&lt;P&gt;Is Parquet on ADLS likely to make it into the Nov 2022 Release?&lt;/P&gt;</description>
      <pubDate>Mon, 08 Aug 2022 21:26:54 GMT</pubDate>
      <guid>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1966185#M10147</guid>
      <dc:creator>Evan_Hodge_IBT</dc:creator>
      <dc:date>2022-08-08T21:26:54Z</dc:date>
    </item>
    <item>
      <title>Re: Parquet Format for ADLS gen2 Endpoint</title>
      <link>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1966206#M10149</link>
      <description>&lt;P&gt;&lt;a href="https://community.qlik.com/t5/user/viewprofilepage/user-id/122543"&gt;@Evan_Hodge_IBT&lt;/a&gt;&amp;nbsp;,&lt;/P&gt;
&lt;P&gt;Yes, it's added in SP02 already:&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;P&gt;Qlik Replicate May 2022 – Patch Release Notes (PR02 - build 2022.5.499)&lt;/P&gt;
&lt;P&gt;Jira issue : RECOB-5686&lt;BR /&gt;Salesforce case : N/A&lt;BR /&gt;Type : Enhancement&lt;BR /&gt;Component/Process: Microsoft Azure ADLS and Google Cloud Storage Targets&lt;BR /&gt;Description : Added Parquet file format support.&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;P&gt;Hope this helps.&lt;/P&gt;
&lt;P&gt;John.&lt;/P&gt;</description>
      <pubDate>Tue, 09 Aug 2022 00:43:40 GMT</pubDate>
      <guid>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1966206#M10149</guid>
      <dc:creator>john_wang</dc:creator>
      <dc:date>2022-08-09T00:43:40Z</dc:date>
    </item>
    <item>
      <title>Re: Parquet Format for ADLS gen2 Endpoint</title>
      <link>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1966509#M10161</link>
      <description>&lt;P&gt;Thanks John...To be clear this was added as a (Patch Release) along with a few other new capabilities 2 weeks ago.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;I hope this helps!&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;--bobv--&lt;/P&gt;</description>
      <pubDate>Tue, 09 Aug 2022 12:03:45 GMT</pubDate>
      <guid>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1966509#M10161</guid>
      <dc:creator>bobvecchione</dc:creator>
      <dc:date>2022-08-09T12:03:45Z</dc:date>
    </item>
    <item>
      <title>Re: Parquet Format for ADLS gen2 Endpoint</title>
      <link>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1969221#M10187</link>
      <description>&lt;P&gt;Hi Team,&lt;/P&gt;
&lt;P&gt;Can you please share the installable download link?&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Tue, 16 Aug 2022 14:11:38 GMT</pubDate>
      <guid>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1969221#M10187</guid>
      <dc:creator>Bhargav_Ranjit</dc:creator>
      <dc:date>2022-08-16T14:11:38Z</dc:date>
    </item>
    <item>
      <title>Re: Parquet Format for ADLS gen2 Endpoint</title>
      <link>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1969265#M10188</link>
      <description>&lt;P&gt;Since this is considered early access, you can contact your Qlik Account team...or wait a couple of weeks when this is a GA Service release and it will be available via normal channels.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;Thanks&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;--bobv--&lt;/P&gt;</description>
      <pubDate>Tue, 16 Aug 2022 15:15:16 GMT</pubDate>
      <guid>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1969265#M10188</guid>
      <dc:creator>bobvecchione</dc:creator>
      <dc:date>2022-08-16T15:15:16Z</dc:date>
    </item>
    <item>
      <title>Re: Parquet Format for ADLS gen2 Endpoint</title>
      <link>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1977043#M10287</link>
      <description>&lt;P&gt;We got the parquet patch from Qlik support last week , thanks for that..&lt;/P&gt;
&lt;P&gt;We are actively testing the feature and notice few issues with the parquet that's generated. I’ve updated the support case and ideation portal as well with the below information, appreciate if you can also help to speed up with the fix.&lt;/P&gt;
&lt;OL&gt;
&lt;LI&gt;Simba Spark ODBC Databricks driver cannot read the Parquet files generated by Qlik Replicate.&lt;span class="lia-inline-image-display-wrapper lia-image-align-inline" image-alt="Bhargav_Ranjit_1-1662389921413.png" style="width: 400px;"&gt;&lt;img src="https://community.qlik.com/t5/image/serverpage/image-id/88315i97FE9F70225E80FC/image-size/medium?v=v2&amp;amp;px=400" role="button" title="Bhargav_Ranjit_1-1662389921413.png" alt="Bhargav_Ranjit_1-1662389921413.png" /&gt;&lt;/span&gt;&lt;span class="lia-inline-image-display-wrapper lia-image-align-inline" image-alt="Bhargav_Ranjit_2-1662389925033.png" style="width: 400px;"&gt;&lt;img src="https://community.qlik.com/t5/image/serverpage/image-id/88316i04DB7900E1C55804/image-size/medium?v=v2&amp;amp;px=400" role="button" title="Bhargav_Ranjit_2-1662389925033.png" alt="Bhargav_Ranjit_2-1662389925033.png" /&gt;&lt;/span&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;/LI&gt;
&lt;LI&gt;Qlik Replicate should support configuring different target data types. for e.g., we see issue with TIMETYPE columns&lt;/LI&gt;
&lt;/OL&gt;
&lt;P&gt;&lt;span class="lia-inline-image-display-wrapper lia-image-align-inline" image-alt="Bhargav_Ranjit_3-1662389954589.png" style="width: 400px;"&gt;&lt;img src="https://community.qlik.com/t5/image/serverpage/image-id/88317i5BF7F9483585E99E/image-size/medium?v=v2&amp;amp;px=400" role="button" title="Bhargav_Ranjit_3-1662389954589.png" alt="Bhargav_Ranjit_3-1662389954589.png" /&gt;&lt;/span&gt;&lt;/P&gt;
&lt;P&gt;3. In Azure adls directory, table names contain "file." as prefix in the folder names. How to transform the file names as per our needs.&lt;/P&gt;
&lt;P&gt;&lt;span class="lia-inline-image-display-wrapper lia-image-align-inline" image-alt="Bhargav_Ranjit_4-1662390079958.png" style="width: 400px;"&gt;&lt;img src="https://community.qlik.com/t5/image/serverpage/image-id/88318i229E2ACCEA7D04B3/image-size/medium?v=v2&amp;amp;px=400" role="button" title="Bhargav_Ranjit_4-1662390079958.png" alt="Bhargav_Ranjit_4-1662390079958.png" /&gt;&lt;/span&gt;&lt;/P&gt;
&lt;P&gt;&lt;BR /&gt;I've attached screenshots of the issues mentioned above, along with Databricks logs (sent to support team).&lt;BR /&gt;I think its better if we can have a call with the team to understand the issue better.&lt;/P&gt;
&lt;P&gt;---------------------------------------------------------------------------------------------------------------&lt;/P&gt;
&lt;P&gt;Databricks Spark ERROR logs Text:&lt;BR /&gt;---------------------------------------------------------------------------&lt;BR /&gt;Py4JJavaError Traceback (most recent call last)&lt;BR /&gt;&amp;lt;command-2045747191298696&amp;gt; in &amp;lt;cell line: 2&amp;gt;()&lt;BR /&gt;1 df = (&lt;BR /&gt;----&amp;gt; 2 spark.read&lt;BR /&gt;3 .format("parquet")&lt;BR /&gt;4 .load("abfss://raw@brewdatpltfrmrawbrzd.dfs.core.windows.net/data/ghq/tech/attunity_test/file.kna1/LOAD00000001.snappy.parquet")&lt;BR /&gt;5 )&lt;BR /&gt;/databricks/spark/python/pyspark/sql/readwriter.py in load(self, path, format, schema, **options)&lt;BR /&gt;175 self.options(**options)&lt;BR /&gt;176 if isinstance(path, str):&lt;BR /&gt;--&amp;gt; 177 return self._df(self._jreader.load(path))&lt;BR /&gt;178 elif path is not None:&lt;BR /&gt;179 if type(path) != list:&lt;BR /&gt;/databricks/spark/python/lib/py4j-0.10.9.5-src.zip/py4j/java_gateway.py in __call__(self, *args)&lt;BR /&gt;1319 &lt;BR /&gt;1320 answer = self.gateway_client.send_command(command)&lt;BR /&gt;-&amp;gt; 1321 return_value = get_return_value(&lt;BR /&gt;1322 answer, self.gateway_client, self.target_id, self.name)&lt;BR /&gt;1323 &lt;BR /&gt;/databricks/spark/python/pyspark/sql/utils.py in deco(*a, **kw)&lt;BR /&gt;194 def deco(*a: Any, **kw: Any) -&amp;gt; Any:&lt;BR /&gt;195 try:&lt;BR /&gt;--&amp;gt; 196 return f(*a, **kw)&lt;BR /&gt;197 except Py4JJavaError as e:&lt;BR /&gt;198 converted = convert_exception(e.java_exception)&lt;BR /&gt;/databricks/spark/python/lib/py4j-0.10.9.5-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)&lt;BR /&gt;324 value = OUTPUT_CONVERTER[type](answer[2:], gateway_client)&lt;BR /&gt;325 if answer[1] == REFERENCE_TYPE:&lt;BR /&gt;--&amp;gt; 326 raise Py4JJavaError(&lt;BR /&gt;327 "An error occurred while calling {0}{1}{2}.\n".&lt;BR /&gt;328 format(target_id, ".", name), value)&lt;BR /&gt;Py4JJavaError: An error occurred while calling o3864.load.&lt;BR /&gt;: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 10.0 failed 4 times, most recent failure: Lost task 0.3 in stage 10.0 (TID 33) (172.16.80.69 executor 0): java.io.IOException: Could not read or convert schema for file: abfss://raw@brewdatpltfrmrawbrzd.dfs.core.windows.net/data/ghq/tech/attunity_test/file.kna1/LOAD00000001.snappy.parquet&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$.readSchemaFromFooter(ParquetFileFormat.scala:880)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$.$anonfun$mergeSchemasInParallel$2(ParquetFileFormat.scala:855)&lt;BR /&gt;at scala.collection.immutable.Stream.map(Stream.scala:418)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$.$anonfun$mergeSchemasInParallel$1(ParquetFileFormat.scala:855)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$.$anonfun$mergeSchemasInParallel$1$adapted(ParquetFileFormat.scala:848)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.$anonfun$mergeSchemasInParallel$2(SchemaMergeUtils.scala:101)&lt;BR /&gt;at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:860)&lt;BR /&gt;at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:860)&lt;BR /&gt;at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60)&lt;BR /&gt;at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:372)&lt;BR /&gt;at org.apache.spark.rdd.RDD.iterator(RDD.scala:336)&lt;BR /&gt;at org.apache.spark.scheduler.ResultTask.$anonfun$runTask$3(ResultTask.scala:75)&lt;BR /&gt;at com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)&lt;BR /&gt;at org.apache.spark.scheduler.ResultTask.$anonfun$runTask$1(ResultTask.scala:75)&lt;BR /&gt;at com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)&lt;BR /&gt;at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:55)&lt;BR /&gt;at org.apache.spark.scheduler.Task.doRunTask(Task.scala:168)&lt;BR /&gt;at org.apache.spark.scheduler.Task.$anonfun$run$1(Task.scala:136)&lt;BR /&gt;at com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)&lt;BR /&gt;at org.apache.spark.scheduler.Task.run(Task.scala:96)&lt;BR /&gt;at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$13(Executor.scala:870)&lt;BR /&gt;at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1690)&lt;BR /&gt;at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:873)&lt;BR /&gt;at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)&lt;BR /&gt;at com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)&lt;BR /&gt;at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:728)&lt;BR /&gt;at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)&lt;BR /&gt;at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)&lt;BR /&gt;at java.lang.Thread.run(Thread.java:748)&lt;BR /&gt;Caused by: org.apache.spark.sql.AnalysisException: Illegal Parquet type: INT32 (TIME(MILLIS,true))&lt;BR /&gt;at org.apache.spark.sql.errors.QueryCompilationErrors$.illegalParquetTypeError(QueryCompilationErrors.scala:1466)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetToSparkSchemaConverter.illegalType$1(ParquetSchemaConverter.scala:192)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetToSparkSchemaConverter.$anonfun$convertPrimitiveField$2(ParquetSchemaConverter.scala:238)&lt;BR /&gt;at scala.Option.getOrElse(Option.scala:189)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetToSparkSchemaConverter.convertPrimitiveField(ParquetSchemaConverter.scala:210)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetToSparkSchemaConverter.convertField(ParquetSchemaConverter.scala:173)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetToSparkSchemaConverter.$anonfun$convertInternal$3(ParquetSchemaConverter.scala:133)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetToSparkSchemaConverter.$anonfun$convertInternal$3$adapted(ParquetSchemaConverter.scala:103)&lt;BR /&gt;at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)&lt;BR /&gt;at scala.collection.immutable.Range.foreach(Range.scala:158)&lt;BR /&gt;at scala.collection.TraversableLike.map(TraversableLike.scala:286)&lt;BR /&gt;at scala.collection.TraversableLike.map$(TraversableLike.scala:279)&lt;BR /&gt;at scala.collection.AbstractTraversable.map(Traversable.scala:108)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetToSparkSchemaConverter.convertInternal(ParquetSchemaConverter.scala:103)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetToSparkSchemaConverter.convert(ParquetSchemaConverter.scala:73)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$.$anonfun$readSchemaFromFooter$2(ParquetFileFormat.scala:876)&lt;BR /&gt;at scala.Option.getOrElse(Option.scala:189)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$.readSchemaFromFooter(ParquetFileFormat.scala:876)&lt;BR /&gt;... 28 more&lt;BR /&gt;Driver stacktrace:&lt;BR /&gt;at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:3247)&lt;BR /&gt;at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:3181)&lt;BR /&gt;at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:3175)&lt;BR /&gt;at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)&lt;BR /&gt;at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)&lt;BR /&gt;at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)&lt;BR /&gt;at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:3175)&lt;BR /&gt;at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1412)&lt;BR /&gt;at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1412)&lt;BR /&gt;at scala.Option.foreach(Option.scala:407)&lt;BR /&gt;at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1412)&lt;BR /&gt;at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:3456)&lt;BR /&gt;at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3397)&lt;BR /&gt;at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:3385)&lt;BR /&gt;at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:51)&lt;BR /&gt;at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:1166)&lt;BR /&gt;at org.apache.spark.SparkContext.runJobInternal(SparkContext.scala:2702)&lt;BR /&gt;at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1027)&lt;BR /&gt;at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:165)&lt;BR /&gt;at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:125)&lt;BR /&gt;at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)&lt;BR /&gt;at org.apache.spark.rdd.RDD.withScope(RDD.scala:411)&lt;BR /&gt;at org.apache.spark.rdd.RDD.collect(RDD.scala:1025)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.mergeSchemasInParallel(SchemaMergeUtils.scala:95)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$.mergeSchemasInParallel(ParquetFileFormat.scala:859)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetUtils$.inferSchema(ParquetUtils.scala:142)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetUtils$.inferSchema(ParquetUtils.scala:50)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat.inferSchema(ParquetFileFormat.scala:173)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.DataSource.$anonfun$getOrInferFileFormatSchema$11(DataSource.scala:234)&lt;BR /&gt;at scala.Option.orElse(Option.scala:447)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.DataSource.getOrInferFileFormatSchema(DataSource.scala:227)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.DataSource.resolveRelation(DataSource.scala:460)&lt;BR /&gt;at org.apache.spark.sql.DataFrameReader.loadV1Source(DataFrameReader.scala:368)&lt;BR /&gt;at org.apache.spark.sql.DataFrameReader.$anonfun$load$2(DataFrameReader.scala:324)&lt;BR /&gt;at scala.Option.getOrElse(Option.scala:189)&lt;BR /&gt;at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:324)&lt;BR /&gt;at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:237)&lt;BR /&gt;at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)&lt;BR /&gt;at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)&lt;BR /&gt;at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)&lt;BR /&gt;at java.lang.reflect.Method.invoke(Method.java:498)&lt;BR /&gt;at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)&lt;BR /&gt;at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:380)&lt;BR /&gt;at py4j.Gateway.invoke(Gateway.java:306)&lt;BR /&gt;at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)&lt;BR /&gt;at py4j.commands.CallCommand.execute(CallCommand.java:79)&lt;BR /&gt;at py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:195)&lt;BR /&gt;at py4j.ClientServerConnection.run(ClientServerConnection.java:115)&lt;BR /&gt;at java.lang.Thread.run(Thread.java:748)&lt;BR /&gt;Caused by: java.io.IOException: Could not read or convert schema for file: abfss://raw@brewdatpltfrmrawbrzd.dfs.core.windows.net/data/ghq/tech/attunity_test/file.kna1/LOAD00000001.snappy.parquet&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$.readSchemaFromFooter(ParquetFileFormat.scala:880)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$.$anonfun$mergeSchemasInParallel$2(ParquetFileFormat.scala:855)&lt;BR /&gt;at scala.collection.immutable.Stream.map(Stream.scala:418)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$.$anonfun$mergeSchemasInParallel$1(ParquetFileFormat.scala:855)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$.$anonfun$mergeSchemasInParallel$1$adapted(ParquetFileFormat.scala:848)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.$anonfun$mergeSchemasInParallel$2(SchemaMergeUtils.scala:101)&lt;BR /&gt;at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:860)&lt;BR /&gt;at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:860)&lt;BR /&gt;at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:60)&lt;BR /&gt;at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:372)&lt;BR /&gt;at org.apache.spark.rdd.RDD.iterator(RDD.scala:336)&lt;BR /&gt;at org.apache.spark.scheduler.ResultTask.$anonfun$runTask$3(ResultTask.scala:75)&lt;BR /&gt;at com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)&lt;BR /&gt;at org.apache.spark.scheduler.ResultTask.$anonfun$runTask$1(ResultTask.scala:75)&lt;BR /&gt;at com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)&lt;BR /&gt;at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:55)&lt;BR /&gt;at org.apache.spark.scheduler.Task.doRunTask(Task.scala:168)&lt;BR /&gt;at org.apache.spark.scheduler.Task.$anonfun$run$1(Task.scala:136)&lt;BR /&gt;at com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)&lt;BR /&gt;at org.apache.spark.scheduler.Task.run(Task.scala:96)&lt;BR /&gt;at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$13(Executor.scala:870)&lt;BR /&gt;at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1690)&lt;BR /&gt;at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:873)&lt;BR /&gt;at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)&lt;BR /&gt;at com.databricks.spark.util.ExecutorFrameProfiler$.record(ExecutorFrameProfiler.scala:110)&lt;BR /&gt;at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:728)&lt;BR /&gt;at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)&lt;BR /&gt;at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)&lt;BR /&gt;... 1 more&lt;BR /&gt;Caused by: org.apache.spark.sql.AnalysisException: Illegal Parquet type: INT32 (TIME(MILLIS,true))&lt;BR /&gt;at org.apache.spark.sql.errors.QueryCompilationErrors$.illegalParquetTypeError(QueryCompilationErrors.scala:1466)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetToSparkSchemaConverter.illegalType$1(ParquetSchemaConverter.scala:192)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetToSparkSchemaConverter.$anonfun$convertPrimitiveField$2(ParquetSchemaConverter.scala:238)&lt;BR /&gt;at scala.Option.getOrElse(Option.scala:189)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetToSparkSchemaConverter.convertPrimitiveField(ParquetSchemaConverter.scala:210)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetToSparkSchemaConverter.convertField(ParquetSchemaConverter.scala:173)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetToSparkSchemaConverter.$anonfun$convertInternal$3(ParquetSchemaConverter.scala:133)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetToSparkSchemaConverter.$anonfun$convertInternal$3$adapted(ParquetSchemaConverter.scala:103)&lt;BR /&gt;at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)&lt;BR /&gt;at scala.collection.immutable.Range.foreach(Range.scala:158)&lt;BR /&gt;at scala.collection.TraversableLike.map(TraversableLike.scala:286)&lt;BR /&gt;at scala.collection.TraversableLike.map$(TraversableLike.scala:279)&lt;BR /&gt;at scala.collection.AbstractTraversable.map(Traversable.scala:108)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetToSparkSchemaConverter.convertInternal(ParquetSchemaConverter.scala:103)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetToSparkSchemaConverter.convert(ParquetSchemaConverter.scala:73)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$.$anonfun$readSchemaFromFooter$2(ParquetFileFormat.scala:876)&lt;BR /&gt;at scala.Option.getOrElse(Option.scala:189)&lt;BR /&gt;at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$.readSchemaFromFooter(ParquetFileFormat.scala:876)&lt;BR /&gt;... 28 more&lt;/P&gt;
&lt;P&gt;----------------------------------------------------------------------------------------------------------------------------------------&lt;/P&gt;</description>
      <pubDate>Mon, 05 Sep 2022 15:02:41 GMT</pubDate>
      <guid>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/1977043#M10287</guid>
      <dc:creator>Bhargav_Ranjit</dc:creator>
      <dc:date>2022-09-05T15:02:41Z</dc:date>
    </item>
    <item>
      <title>From now on, please track this idea from the Ideation por...</title>
      <link>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/2100793#M14042</link>
      <description>&lt;P&gt;From now on, please track this idea from the Ideation portal.&amp;nbsp;&lt;/P&gt;&lt;P&gt;&lt;STRONG&gt;&lt;A title="Link to new idea" href="https://ideation.qlik.com/app/#/case/281123" target="_blank" rel="noopener"&gt;Link to new idea&lt;/A&gt;&lt;/STRONG&gt;&lt;/P&gt;&lt;P&gt;Meghann&lt;/P&gt;&lt;P data-unlink="true"&gt;&lt;EM&gt;NOTE: Upon clicking this link 2 tabs may open - please feel free to close the one with a login page. If you &lt;STRONG&gt;only&lt;/STRONG&gt; see 1 tab with the login page, please try clicking this link first: &lt;STRONG&gt;&lt;A title="Authenticate me!" href="#" target="_blank" rel="noopener"&gt;Authenticate me!&lt;/A&gt;&lt;/STRONG&gt;&amp;nbsp;t&lt;/EM&gt;&lt;EM&gt;hen try the link above again. Ensure pop-up blocker is off.&lt;/EM&gt;&lt;/P&gt;</description>
      <pubDate>Wed, 02 Aug 2023 15:53:21 GMT</pubDate>
      <guid>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/2100793#M14042</guid>
      <dc:creator>Meghann_MacDonald</dc:creator>
      <dc:date>2023-08-02T15:53:21Z</dc:date>
    </item>
    <item>
      <title>Re: Parquet Format for ADLS gen2 Endpoint - Status changed to: Closed - Archived</title>
      <link>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/2100794#M14043</link>
      <description />
      <pubDate>Wed, 02 Aug 2023 15:53:23 GMT</pubDate>
      <guid>https://community.qlik.com/t5/Suggest-an-Idea/Parquet-Format-for-ADLS-gen2-Endpoint/idc-p/2100794#M14043</guid>
      <dc:creator>Ideation</dc:creator>
      <dc:date>2023-08-02T15:53:23Z</dc:date>
    </item>
  </channel>
</rss>

