Skip to content

Commit

Permalink
Update vendored DuckDB sources to 7e40a24
Browse files Browse the repository at this point in the history
  • Loading branch information
duckdblabs-bot committed Nov 27, 2024
1 parent 7e40a24 commit 582658a
Show file tree
Hide file tree
Showing 5 changed files with 18 additions and 18 deletions.
8 changes: 1 addition & 7 deletions src/duckdb/extension/parquet/parquet_extension.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -183,10 +183,7 @@ struct ParquetWriteBindData : public TableFunctionData {
duckdb_parquet::CompressionCodec::type codec = duckdb_parquet::CompressionCodec::SNAPPY;
vector<pair<string, string>> kv_metadata;
idx_t row_group_size = DEFAULT_ROW_GROUP_SIZE;

//! If row_group_size_bytes is not set, we default to row_group_size * BYTES_PER_ROW
static constexpr const idx_t BYTES_PER_ROW = 1024;
idx_t row_group_size_bytes;
idx_t row_group_size_bytes = NumericLimits<idx_t>::Maximum();

//! How/Whether to encrypt the data
shared_ptr<ParquetEncryptionConfig> encryption_config;
Expand Down Expand Up @@ -1313,9 +1310,6 @@ unique_ptr<FunctionData> ParquetWriteBind(ClientContext &context, CopyFunctionBi
throw BinderException("ROW_GROUP_SIZE_BYTES does not work while preserving insertion order. Use \"SET "
"preserve_insertion_order=false;\" to disable preserving insertion order.");
}
} else {
// We always set a max row group size bytes so we don't use too much memory
bind_data->row_group_size_bytes = bind_data->row_group_size * ParquetWriteBindData::BYTES_PER_ROW;
}

if (compression_level_set && bind_data->codec != CompressionCodec::ZSTD) {
Expand Down
12 changes: 8 additions & 4 deletions src/duckdb/src/common/local_file_system.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -527,7 +527,8 @@ int64_t LocalFileSystem::GetFileSize(FileHandle &handle) {
int fd = handle.Cast<UnixFileHandle>().fd;
struct stat s;
if (fstat(fd, &s) == -1) {
return -1;
throw IOException("Failed to get file size for file \"%s\": %s", {{"errno", std::to_string(errno)}},
handle.path, strerror(errno));
}
return s.st_size;
}
Expand All @@ -536,7 +537,8 @@ time_t LocalFileSystem::GetLastModifiedTime(FileHandle &handle) {
int fd = handle.Cast<UnixFileHandle>().fd;
struct stat s;
if (fstat(fd, &s) == -1) {
return -1;
throw IOException("Failed to get last modified time for file \"%s\": %s", {{"errno", std::to_string(errno)}},
handle.path, strerror(errno));
}
return s.st_mtime;
}
Expand Down Expand Up @@ -967,7 +969,8 @@ int64_t LocalFileSystem::GetFileSize(FileHandle &handle) {
HANDLE hFile = handle.Cast<WindowsFileHandle>().fd;
LARGE_INTEGER result;
if (!GetFileSizeEx(hFile, &result)) {
return -1;
auto error = LocalFileSystem::GetLastErrorAsString();
throw IOException("Failed to get file size for file \"%s\": %s", handle.path, error);
}
return result.QuadPart;
}
Expand All @@ -978,7 +981,8 @@ time_t LocalFileSystem::GetLastModifiedTime(FileHandle &handle) {
// https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfiletime
FILETIME last_write;
if (GetFileTime(hFile, nullptr, nullptr, &last_write) == 0) {
return -1;
auto error = LocalFileSystem::GetLastErrorAsString();
throw IOException("Failed to get last modified time for file \"%s\": %s", handle.path, error);
}

// https://stackoverflow.com/questions/29266743/what-is-dwlowdatetime-and-dwhighdatetime
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -470,13 +470,14 @@ void CSVSniffer::RefineCandidates() {
bool finished_file = cur_candidate->FinishedFile();
if (finished_file || i == options.sample_size_chunks) {
// we finished the file or our chunk sample successfully
successful_candidates.push_back(std::move(cur_candidate));
if (!cur_candidate->GetResult().error) {
successful_candidates.push_back(std::move(cur_candidate));
}
done = true;
break;
continue;
}
if ((!!RefineCandidateNextChunk(*cur_candidate) || cur_candidate->GetResult().error)) {
if (RefineCandidateNextChunk(*cur_candidate) && !cur_candidate->GetResult().error) {
successful_candidates.push_back(std::move(cur_candidate));
break;
}
}
candidates = std::move(successful_candidates);
Expand Down
6 changes: 3 additions & 3 deletions src/duckdb/src/function/table/version/pragma_version.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#ifndef DUCKDB_PATCH_VERSION
#define DUCKDB_PATCH_VERSION "4-dev2261"
#define DUCKDB_PATCH_VERSION "4-dev2273"
#endif
#ifndef DUCKDB_MINOR_VERSION
#define DUCKDB_MINOR_VERSION 1
Expand All @@ -8,10 +8,10 @@
#define DUCKDB_MAJOR_VERSION 1
#endif
#ifndef DUCKDB_VERSION
#define DUCKDB_VERSION "v1.1.4-dev2261"
#define DUCKDB_VERSION "v1.1.4-dev2273"
#endif
#ifndef DUCKDB_SOURCE_ID
#define DUCKDB_SOURCE_ID "ed90e384ef"
#define DUCKDB_SOURCE_ID "aa2fe677d6"
#endif
#include "duckdb/function/table/system_functions.hpp"
#include "duckdb/main/database.hpp"
Expand Down
1 change: 1 addition & 0 deletions src/duckdb/src/optimizer/join_order/relation_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ static bool OperatorNeedsRelation(LogicalOperatorType op_type) {
case LogicalOperatorType::LOGICAL_DELIM_GET:
case LogicalOperatorType::LOGICAL_AGGREGATE_AND_GROUP_BY:
case LogicalOperatorType::LOGICAL_WINDOW:
case LogicalOperatorType::LOGICAL_SAMPLE:
return true;
default:
return false;
Expand Down

0 comments on commit 582658a

Please sign in to comment.