<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Using Delta Lake components in standard jobs with S3 storage in Talend Studio</title>
    <link>https://community.qlik.com/t5/Talend-Studio/Using-Delta-Lake-components-in-standard-jobs-with-S3-storage/m-p/2517420#M147535</link>
    <description>&lt;P&gt;Hello,&lt;/P&gt;&lt;P&gt;i'm trying to setup a data lakehouse on AWS S3 using delta lake tables. I don't need the big data framework so i want to use a standard job, but i can't understand clearly how to set up a connection with delta lake component to use S3, if possible. I can't find any explanation in the documentation, which frankly is very poor about this topic and made me confused.&lt;/P&gt;&lt;P&gt;Does a standard job need Databricks or it still use spark? Is talend (java) the compute engine instead? I see i should use a jdbc driver and i wonder how do i configure the jdbc string for connection, can anyone give me any help or point me to a guide or additional documentation?&lt;BR /&gt;&lt;BR /&gt;Thanks in advance,&lt;BR /&gt;Vincenzo.&lt;/P&gt;</description>
    <pubDate>Sat, 10 May 2025 15:11:33 GMT</pubDate>
    <dc:creator>vintac</dc:creator>
    <dc:date>2025-05-10T15:11:33Z</dc:date>
    <item>
      <title>Using Delta Lake components in standard jobs with S3 storage</title>
      <link>https://community.qlik.com/t5/Talend-Studio/Using-Delta-Lake-components-in-standard-jobs-with-S3-storage/m-p/2517420#M147535</link>
      <description>&lt;P&gt;Hello,&lt;/P&gt;&lt;P&gt;i'm trying to setup a data lakehouse on AWS S3 using delta lake tables. I don't need the big data framework so i want to use a standard job, but i can't understand clearly how to set up a connection with delta lake component to use S3, if possible. I can't find any explanation in the documentation, which frankly is very poor about this topic and made me confused.&lt;/P&gt;&lt;P&gt;Does a standard job need Databricks or it still use spark? Is talend (java) the compute engine instead? I see i should use a jdbc driver and i wonder how do i configure the jdbc string for connection, can anyone give me any help or point me to a guide or additional documentation?&lt;BR /&gt;&lt;BR /&gt;Thanks in advance,&lt;BR /&gt;Vincenzo.&lt;/P&gt;</description>
      <pubDate>Sat, 10 May 2025 15:11:33 GMT</pubDate>
      <guid>https://community.qlik.com/t5/Talend-Studio/Using-Delta-Lake-components-in-standard-jobs-with-S3-storage/m-p/2517420#M147535</guid>
      <dc:creator>vintac</dc:creator>
      <dc:date>2025-05-10T15:11:33Z</dc:date>
    </item>
    <item>
      <title>Re: Using Delta Lake components in standard jobs with S3 storage</title>
      <link>https://community.qlik.com/t5/Talend-Studio/Using-Delta-Lake-components-in-standard-jobs-with-S3-storage/m-p/2517605#M147541</link>
      <description>&lt;P&gt;Hi &lt;a href="https://community.qlik.com/t5/user/viewprofilepage/user-id/338100"&gt;@patricia845&lt;/a&gt; ,&lt;BR /&gt;thank you very much for you answer, but actually looking at jdbc driver documentation, it asks for connection to a databricks istance not just a pure S3 storage: &lt;A href="https://docs.databricks.com/aws/en/integrations/jdbc/configure" target="_blank"&gt;https://docs.databricks.com/aws/en/integrations/jdbc/configure&lt;/A&gt; do you have any reference to eventually different jdbc drivers you mentioned?&lt;BR /&gt;&lt;BR /&gt;Regards,&lt;/P&gt;&lt;P&gt;Vincenzo.&lt;/P&gt;</description>
      <pubDate>Tue, 13 May 2025 07:30:51 GMT</pubDate>
      <guid>https://community.qlik.com/t5/Talend-Studio/Using-Delta-Lake-components-in-standard-jobs-with-S3-storage/m-p/2517605#M147541</guid>
      <dc:creator>vintac</dc:creator>
      <dc:date>2025-05-13T07:30:51Z</dc:date>
    </item>
  </channel>
</rss>

