mirror of
https://github.com/zeux/pugixml.git
synced 2024-12-26 21:04:25 +08:00
tests: Only use load_file_special_folder test on macOS
The behavior on Linux is very different between kernel versions, and it triggers an unexpected OOM during sanitizer runs because somehow the size is reported to be LONG_MAX. It's not clear that it helps us cover any paths we don't cover otherwise - it would be nice to be able to test failing to load a multi-gigabyte file on a 32-bit system, but we can't do this easily atm anyway.
This commit is contained in:
parent
1a9c3f66db
commit
7664bbf9af
@ -590,14 +590,12 @@ TEST(document_load_file_wide_out_of_memory)
|
||||
CHECK(result.status == status_out_of_memory || result.status == status_file_not_found);
|
||||
}
|
||||
|
||||
#if defined(__linux__) || defined(__APPLE__)
|
||||
#if defined(__APPLE__)
|
||||
TEST(document_load_file_special_folder)
|
||||
{
|
||||
xml_document doc;
|
||||
xml_parse_result result = doc.load_file(".");
|
||||
// status_out_of_memory is somewhat counter-intuitive but on Linux ftell returns LONG_MAX for directories
|
||||
// on some Debian systems the folder is also read as empty, hence status_no_document_element check
|
||||
CHECK(result.status == status_file_not_found || result.status == status_io_error || result.status == status_out_of_memory || result.status == status_no_document_element);
|
||||
CHECK(result.status == status_io_error);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user