From 499f2eb9c7e58d30cb5a11ce5d88589a44a4df7b Mon Sep 17 00:00:00 2001
From: Fengting Chen
Date: Fri, 19 Oct 2018 11:35:28 -0700
Subject: [PATCH 001/488] Fix the issue by freeing the temporary lobs created
by readCLob() and writeCLOB(). Put the temporary lob release together with
OCIDescriptorFree().
---
gdal/frmts/georaster/georaster_wrapper.cpp | 16 +++---
gdal/frmts/georaster/oci_wrapper.cpp | 62 +++++++++++++++++++---
gdal/frmts/georaster/oci_wrapper.h | 1 +
3 files changed, 65 insertions(+), 14 deletions(-)
diff --git a/gdal/frmts/georaster/georaster_wrapper.cpp b/gdal/frmts/georaster/georaster_wrapper.cpp
index 0594e700fc8a..201670513f37 100644
--- a/gdal/frmts/georaster/georaster_wrapper.cpp
+++ b/gdal/frmts/georaster/georaster_wrapper.cpp
@@ -526,7 +526,7 @@ GeoRasterWrapper* GeoRasterWrapper::Open( const char* pszStringId, bool bUpdate
// Clean up
// -------------------------------------------------------------------
- OCIDescriptorFree( phLocator, OCI_DTYPE_LOB );
+ poStmt->FreeLob(phLocator);
CPLFree( pszXML );
delete poStmt;
@@ -966,7 +966,7 @@ bool GeoRasterWrapper::Create( char* pszDescription,
sDataTable = szBindRDT;
nRasterId = nBindRID;
- OCIDescriptorFree( phLocator, OCI_DTYPE_LOB );
+ poStmt->FreeLob(phLocator);
delete poStmt;
@@ -3185,13 +3185,13 @@ bool GeoRasterWrapper::SetNoData( int nLayer, const char* pszValue )
if( ! poStmt->Execute() )
{
- OCIDescriptorFree( phLocatorR, OCI_DTYPE_LOB );
- OCIDescriptorFree( phLocatorW, OCI_DTYPE_LOB );
+ poStmt->FreeLob(phLocatorR);
+ poStmt->FreeLob(phLocatorW);
delete poStmt;
return false;
}
- OCIDescriptorFree( phLocatorW, OCI_DTYPE_LOB );
+ poStmt->FreeLob(phLocatorW);
// ------------------------------------------------------------
// Read the XML metadata from db to memory with nodata updates
@@ -3206,7 +3206,7 @@ bool GeoRasterWrapper::SetNoData( int nLayer, const char* pszValue )
CPLFree( pszXML );
}
- OCIDescriptorFree( phLocatorR, OCI_DTYPE_LOB );
+ poStmt->FreeLob(phLocatorR);
bFlushMetadata = true;
delete poStmt;
@@ -3596,12 +3596,12 @@ bool GeoRasterWrapper::FlushMetadata()
if( ! poStmt->Execute() )
{
- OCIDescriptorFree( phLocator, OCI_DTYPE_LOB );
+ poStmt->FreeLob(phLocator);
delete poStmt;
return false;
}
- OCIDescriptorFree( phLocator, OCI_DTYPE_LOB );
+ poStmt->FreeLob(phLocator);
delete poStmt;
diff --git a/gdal/frmts/georaster/oci_wrapper.cpp b/gdal/frmts/georaster/oci_wrapper.cpp
index da47d888e472..c83daa2709ef 100644
--- a/gdal/frmts/georaster/oci_wrapper.cpp
+++ b/gdal/frmts/georaster/oci_wrapper.cpp
@@ -1117,15 +1117,19 @@ void OWStatement::WriteCLob( OCILobLocator** pphLocator, char* pszData )
{
nNextCol++;
- CheckError( OCIDescriptorAlloc(
+ if (CheckError( OCIDescriptorAlloc(
poConnection->hEnv,
(void**) pphLocator,
OCI_DTYPE_LOB,
(size_t) 0,
(dvoid **) nullptr),
- hError );
+ hError ))
+ {
+ CPLDebug("OCI", "Error in WriteCLob");
+ return;
+ }
- CheckError( OCILobCreateTemporary(
+ if (CheckError( OCILobCreateTemporary(
poConnection->hSvcCtx,
poConnection->hError,
(OCILobLocator*) *pphLocator,
@@ -1134,11 +1138,15 @@ void OWStatement::WriteCLob( OCILobLocator** pphLocator, char* pszData )
(ub1) OCI_TEMP_CLOB,
false,
OCI_DURATION_SESSION ),
- hError );
+ hError ))
+ {
+ CPLDebug("OCI", "Error in WriteCLob creating temporary lob");
+ return;
+ }
ub4 nAmont = (ub4) strlen(pszData);
- CheckError( OCILobWrite(
+ if (CheckError( OCILobWrite(
poConnection->hSvcCtx,
hError,
*pphLocator,
@@ -1151,7 +1159,11 @@ void OWStatement::WriteCLob( OCILobLocator** pphLocator, char* pszData )
nullptr,
(ub2) 0,
(ub1) SQLCS_IMPLICIT ),
- hError );
+ hError ))
+ {
+ CPLDebug("OCI", "Error in WriteCLob writing the lob");
+ return;
+ }
}
void OWStatement::Define( OCIArray** pphData )
@@ -1583,6 +1595,44 @@ char* OWStatement::ReadCLob( OCILobLocator* phLocator )
return pszBuffer;
}
+// Free OCIDescriptor for the LOB, if it is temporary lob, it is freed too.
+void OWStatement::FreeLob(OCILobLocator* phLocator)
+{
+ boolean is_temporary;
+
+ if (phLocator == nullptr)
+ return;
+
+ if( CheckError( OCILobIsTemporary(
+ poConnection->hEnv,
+ hError,
+ phLocator,
+ &is_temporary),
+ hError))
+ {
+ CPLDebug("OCI", "OCILobIsTemporary failed");
+ OCIDescriptorFree( phLocator, OCI_DTYPE_LOB );
+ return;
+ }
+
+ if(is_temporary)
+ {
+ if( CheckError( OCILobFreeTemporary(
+ poConnection->hSvcCtx,
+ hError,
+ phLocator),
+ hError))
+ {
+ CPLDebug("OCI", "OCILobFreeTemporary failed");
+ OCIDescriptorFree( phLocator, OCI_DTYPE_LOB );
+ return;
+ }
+
+ }
+
+ OCIDescriptorFree( phLocator, OCI_DTYPE_LOB );
+}
+
void OWStatement::BindName( const char* pszName, int* pnData )
{
OCIBind* hBind = nullptr;
diff --git a/gdal/frmts/georaster/oci_wrapper.h b/gdal/frmts/georaster/oci_wrapper.h
index cdb519ae6e9f..03314fb40ff4 100644
--- a/gdal/frmts/georaster/oci_wrapper.h
+++ b/gdal/frmts/georaster/oci_wrapper.h
@@ -417,6 +417,7 @@ class OWStatement
unsigned long ReadBlob( OCILobLocator* phLocator,
void* pBuffer, unsigned long nOffset,
unsigned long nSize );
+ void FreeLob(OCILobLocator* phLocator);
char* ReadCLob( OCILobLocator* phLocator );
void WriteCLob( OCILobLocator** pphLocator, char* pszData );
bool WriteBlob( OCILobLocator* phLocator,
From b10430acb1303d18052fc20ebc36de01e01398fd Mon Sep 17 00:00:00 2001
From: Sander Jansen
Date: Thu, 25 Oct 2018 14:49:58 -0500
Subject: [PATCH 002/488] VRT: Fix issue #1048 opening VRT with large number of
bands
---
gdal/frmts/vrt/vrtdataset.cpp | 59 +++++++++++++++++++++++++++++++++++
gdal/frmts/vrt/vrtdataset.h | 2 ++
gdal/frmts/vrt/vrtsources.cpp | 10 ++++--
gdal/gcore/gdal_proxy.h | 5 +++
gdal/gcore/gdalproxypool.cpp | 10 ++++++
5 files changed, 84 insertions(+), 2 deletions(-)
diff --git a/gdal/frmts/vrt/vrtdataset.cpp b/gdal/frmts/vrt/vrtdataset.cpp
index 0a6207976fbd..219978c5f9b8 100644
--- a/gdal/frmts/vrt/vrtdataset.cpp
+++ b/gdal/frmts/vrt/vrtdataset.cpp
@@ -36,6 +36,8 @@
#include
#include
+#include "gdal_proxy.h"
+
/*! @cond Doxygen_Suppress */
@@ -1384,6 +1386,59 @@ int VRTDataset::CheckCompatibleForDatasetIO()
return nSources != 0;
}
+
+/************************************************************************/
+/* ExpandProxyBands() */
+/************************************************************************/
+/* In ProxyPoolDatasets, by default only one band is initialized. When using
+ * VRTDataset::IRasterIO and CheckCompatibleForDatasetIO is True, we need to have
+ * all bands initialized (but only for the last band in the VRTDataset). This function
+ * assumes CheckCompatibleForDatasetIO() has already been run and returned succesfull.
+ */
+void VRTDataset::ExpandProxyBands()
+{
+ VRTSourcedRasterBand * poLastBand = reinterpret_cast(papoBands[nBands - 1]);
+
+ CPLAssert(poLastBand != nullptr); // CheckCompatibleForDatasetIO()
+
+ int nSources = poLastBand->nSources;
+
+ for (int iSource = 0; iSource < nSources; iSource++)
+ {
+ VRTSimpleSource* poSource = reinterpret_cast(poLastBand->papoSources[iSource]);
+
+ CPLAssert(poSource != nullptr); // CheckCompatibleForDatasetIO()
+
+ GDALProxyPoolDataset * dataset = dynamic_cast(poSource->GetBand()->GetDataset());
+
+ if (dataset == nullptr)
+ {
+ continue; // only GDALProxyPoolDataset needs to be expanded
+ }
+
+ if (dataset->GetBands()[0] != nullptr)
+ {
+ continue; // first band already set, so just assume all the others are set as well
+ }
+
+ for (int iBand = 1; iBand <= nBands - 1; iBand++ )
+ {
+ VRTSourcedRasterBand * srcband = reinterpret_cast(papoBands[iBand - 1]);
+ VRTSimpleSource* src = reinterpret_cast(srcband->papoSources[iSource]);
+ GDALRasterBand * rasterband = src->GetBand();
+
+ int nBlockXSize, nBlockYSize;
+
+ rasterband->GetBlockSize(&nBlockXSize, &nBlockYSize);
+
+ dataset->AddSrcBand(iBand, rasterband->GetRasterDataType(), nBlockXSize, nBlockYSize);
+ }
+ }
+}
+
+
+
+
/************************************************************************/
/* GetSingleSimpleSource() */
/* */
@@ -1593,6 +1648,10 @@ CPLErr VRTDataset::IRasterIO( GDALRWFlag eRWFlag,
if( bLocalCompatibleForDatasetIO && eRWFlag == GF_Read )
{
+
+ // Make sure the expand the last band before using them below
+ ExpandProxyBands();
+
for(int iBandIndex=0; iBandIndex m_apoOverviews;
std::vector m_apoOverviewsBak;
char **m_papszXMLVRTMetadata;
diff --git a/gdal/frmts/vrt/vrtsources.cpp b/gdal/frmts/vrt/vrtsources.cpp
index d4f7a02a73ed..5475ec28a0c5 100644
--- a/gdal/frmts/vrt/vrtsources.cpp
+++ b/gdal/frmts/vrt/vrtsources.cpp
@@ -691,8 +691,14 @@ CPLErr VRTSimpleSource::XMLInit( CPLXMLNode *psSrc, const char *pszVRTPath,
// Only the information of rasterBand nSrcBand will be accurate
// but that's OK since we only use that band afterwards.
- for( int i = 1; i <= nSrcBand; i++ )
- proxyDS->AddSrcBandDescription(eDataType, nBlockXSize, nBlockYSize);
+ //
+ // Previously this added a src band for every band <= nSrcBand, but this becomes
+ // prohibitely expensive for files with a large number of bands. This optimization
+ // only adds the desired band and the rest of the bands will simply be initialized with a nullptr.
+ // This assumes no other code here accesses any of the lower bands in the GDALProxyPoolDataset.
+ // It has been suggested that in addition, we should to try share GDALProxyPoolDataset between multiple
+ // Simple Sources, which would save on memory for papoBands. For now, that's not implemented.
+ proxyDS->AddSrcBand(nSrcBand, eDataType, nBlockXSize, nBlockYSize);
if( bGetMaskBand )
{
diff --git a/gdal/gcore/gdal_proxy.h b/gdal/gcore/gdal_proxy.h
index 1d96fe1c5828..d1a8fae689ad 100644
--- a/gdal/gcore/gdal_proxy.h
+++ b/gdal/gcore/gdal_proxy.h
@@ -245,6 +245,11 @@ class CPL_DLL GDALProxyPoolDataset : public GDALProxyDataset
void AddSrcBandDescription( GDALDataType eDataType, int nBlockXSize,
int nBlockYSize );
+ // Used by VRT SimpleSource to add a single GDALProxyPoolRasterBand while
+ // keeping all other bands initialized to a nullptr. This is under the assumption,
+ // VRT SimpleSource will not have to access any other bands than the one added.
+ void AddSrcBand(int nBand, GDALDataType eDataType, int nBlockXSize,
+ int nBlockYSize );
void FlushCache() override;
const char *GetProjectionRef() override;
diff --git a/gdal/gcore/gdalproxypool.cpp b/gdal/gcore/gdalproxypool.cpp
index 7180b60533be..19d1cf16a936 100644
--- a/gdal/gcore/gdalproxypool.cpp
+++ b/gdal/gcore/gdalproxypool.cpp
@@ -693,6 +693,16 @@ void GDALProxyPoolDataset::AddSrcBandDescription( GDALDataType eDataType, int nB
SetBand(nBands + 1, new GDALProxyPoolRasterBand(this, nBands + 1, eDataType, nBlockXSize, nBlockYSize));
}
+/************************************************************************/
+/* AddSrcBand() */
+/************************************************************************/
+
+void GDALProxyPoolDataset::AddSrcBand(int nBand, GDALDataType eDataType, int nBlockXSize, int nBlockYSize)
+{
+ SetBand(nBand, new GDALProxyPoolRasterBand(this, nBand, eDataType, nBlockXSize, nBlockYSize));
+}
+
+
/************************************************************************/
/* RefUnderlyingDataset() */
/************************************************************************/
From d2b9ec539a395c7d64b8cbdf3d8b8892f957522d Mon Sep 17 00:00:00 2001
From: Tamas Szekeres
Date: Fri, 26 Oct 2018 21:32:32 +0200
Subject: [PATCH 003/488] MSSQLSpatial: Fix geometry parser with M values
(#1051)
---
.../mssqlspatial/ogrmssqlgeometryparser.cpp | 99 ++++++++++++++++---
1 file changed, 83 insertions(+), 16 deletions(-)
diff --git a/gdal/ogr/ogrsf_frmts/mssqlspatial/ogrmssqlgeometryparser.cpp b/gdal/ogr/ogrsf_frmts/mssqlspatial/ogrmssqlgeometryparser.cpp
index 16271610c318..bd70c0fb1e28 100644
--- a/gdal/ogr/ogrsf_frmts/mssqlspatial/ogrmssqlgeometryparser.cpp
+++ b/gdal/ogr/ogrsf_frmts/mssqlspatial/ogrmssqlgeometryparser.cpp
@@ -139,15 +139,31 @@ OGRPoint* OGRMSSQLGeometryParser::ReadPoint(int iShape)
{
if (nColType == MSSQLCOLTYPE_GEOGRAPHY)
{
- if ( chProps & SP_HASZVALUES )
+ if ( (chProps & SP_HASZVALUES) && (chProps & SP_HASMVALUES) )
+ return new OGRPoint(ReadY(iPoint), ReadX(iPoint), ReadZ(iPoint), ReadM(iPoint) );
+ else if ( chProps & SP_HASZVALUES )
return new OGRPoint( ReadY(iPoint), ReadX(iPoint), ReadZ(iPoint) );
+ else if ( chProps & SP_HASMVALUES )
+ {
+ OGRPoint* poPoint = new OGRPoint( ReadY(iPoint), ReadX(iPoint) );
+ poPoint->setM( ReadZ(iPoint) );
+ return poPoint;
+ }
else
return new OGRPoint( ReadY(iPoint), ReadX(iPoint) );
}
else
{
- if ( chProps & SP_HASZVALUES )
+ if ( (chProps & SP_HASZVALUES) && (chProps & SP_HASMVALUES) )
+ return new OGRPoint( ReadX(iPoint), ReadY(iPoint), ReadZ(iPoint), ReadM(iPoint) );
+ else if ( chProps & SP_HASZVALUES )
return new OGRPoint( ReadX(iPoint), ReadY(iPoint), ReadZ(iPoint) );
+ else if ( chProps & SP_HASMVALUES )
+ {
+ OGRPoint* poPoint = new OGRPoint( ReadX(iPoint), ReadY(iPoint) );
+ poPoint->setM( ReadZ(iPoint) );
+ return poPoint;
+ }
else
return new OGRPoint( ReadX(iPoint), ReadY(iPoint) );
}
@@ -199,15 +215,23 @@ OGRLineString* OGRMSSQLGeometryParser::ReadLineString(int iShape)
{
if (nColType == MSSQLCOLTYPE_GEOGRAPHY)
{
- if ( chProps & SP_HASZVALUES )
+ if ( (chProps & SP_HASZVALUES) && (chProps & SP_HASMVALUES) )
+ poLineString->setPoint(i, ReadY(iPoint), ReadX(iPoint), ReadZ(iPoint), ReadM(iPoint) );
+ else if ( chProps & SP_HASZVALUES )
poLineString->setPoint(i, ReadY(iPoint), ReadX(iPoint), ReadZ(iPoint) );
+ else if ( chProps & SP_HASMVALUES )
+ poLineString->setPointM(i, ReadY(iPoint), ReadX(iPoint), ReadZ(iPoint) );
else
poLineString->setPoint(i, ReadY(iPoint), ReadX(iPoint) );
}
else
{
- if ( chProps & SP_HASZVALUES )
+ if ( (chProps & SP_HASZVALUES) && (chProps & SP_HASMVALUES) )
+ poLineString->setPoint(i, ReadX(iPoint), ReadY(iPoint), ReadZ(iPoint), ReadM(iPoint) );
+ else if ( chProps & SP_HASZVALUES )
poLineString->setPoint(i, ReadX(iPoint), ReadY(iPoint), ReadZ(iPoint) );
+ else if ( chProps & SP_HASMVALUES )
+ poLineString->setPointM(i, ReadX(iPoint), ReadY(iPoint), ReadZ(iPoint) );
else
poLineString->setPoint(i, ReadX(iPoint), ReadY(iPoint) );
}
@@ -265,15 +289,23 @@ OGRPolygon* OGRMSSQLGeometryParser::ReadPolygon(int iShape)
{
if (nColType == MSSQLCOLTYPE_GEOGRAPHY)
{
- if ( chProps & SP_HASZVALUES )
+ if ( (chProps & SP_HASZVALUES) && (chProps & SP_HASMVALUES) )
+ poRing->setPoint(i, ReadY(iPoint), ReadX(iPoint), ReadZ(iPoint), ReadM(iPoint) );
+ else if ( chProps & SP_HASZVALUES )
poRing->setPoint(i, ReadY(iPoint), ReadX(iPoint), ReadZ(iPoint) );
+ else if (chProps & SP_HASMVALUES)
+ poRing->setPointM(i, ReadY(iPoint), ReadX(iPoint), ReadZ(iPoint) );
else
poRing->setPoint(i, ReadY(iPoint), ReadX(iPoint) );
}
else
{
- if ( chProps & SP_HASZVALUES )
+ if ( (chProps & SP_HASZVALUES) && (chProps & SP_HASMVALUES) )
+ poRing->setPoint(i, ReadX(iPoint), ReadY(iPoint), ReadZ(iPoint), ReadM(iPoint) );
+ else if ( chProps & SP_HASZVALUES )
poRing->setPoint(i, ReadX(iPoint), ReadY(iPoint), ReadZ(iPoint) );
+ else if ( chProps & SP_HASMVALUES )
+ poRing->setPointM(i, ReadX(iPoint), ReadY(iPoint), ReadZ(iPoint) );
else
poRing->setPoint(i, ReadX(iPoint), ReadY(iPoint) );
}
@@ -380,12 +412,13 @@ OGRErr OGRMSSQLGeometryParser::ParseSqlGeometry(unsigned char* pszInput,
chProps = ReadByte(5);
- if ( chProps & SP_HASMVALUES )
- nPointSize = 32;
- else if ( chProps & SP_HASZVALUES )
- nPointSize = 24;
- else
- nPointSize = 16;
+ nPointSize = 16;
+
+ if (chProps & SP_HASZVALUES)
+ nPointSize += 8;
+
+ if (chProps & SP_HASMVALUES)
+ nPointSize += 8;
if ( chProps & SP_ISSINGLEPOINT )
{
@@ -400,15 +433,29 @@ OGRErr OGRMSSQLGeometryParser::ParseSqlGeometry(unsigned char* pszInput,
if (nColType == MSSQLCOLTYPE_GEOGRAPHY)
{
- if (chProps & SP_HASZVALUES)
+ if ((chProps & SP_HASZVALUES) && (chProps & SP_HASMVALUES))
+ *poGeom = new OGRPoint(ReadY(0), ReadX(0), ReadZ(0), ReadM(0));
+ else if (chProps & SP_HASZVALUES)
*poGeom = new OGRPoint(ReadY(0), ReadX(0), ReadZ(0));
+ else if (chProps & SP_HASMVALUES)
+ {
+ *poGeom = new OGRPoint(ReadY(0), ReadX(0));
+ ((OGRPoint*)(*poGeom))->setM(ReadZ(0));
+ }
else
*poGeom = new OGRPoint(ReadY(0), ReadX(0));
}
else
{
- if (chProps & SP_HASZVALUES)
+ if ((chProps & SP_HASZVALUES) && (chProps & SP_HASMVALUES))
+ *poGeom = new OGRPoint(ReadX(0), ReadY(0), ReadZ(0), ReadM(0));
+ else if (chProps & SP_HASZVALUES)
*poGeom = new OGRPoint(ReadX(0), ReadY(0), ReadZ(0));
+ else if (chProps & SP_HASMVALUES)
+ {
+ *poGeom = new OGRPoint(ReadX(0), ReadY(0));
+ ((OGRPoint*)(*poGeom))->setM(ReadZ(0));
+ }
else
*poGeom = new OGRPoint(ReadX(0), ReadY(0));
}
@@ -429,11 +476,21 @@ OGRErr OGRMSSQLGeometryParser::ParseSqlGeometry(unsigned char* pszInput,
if (nColType == MSSQLCOLTYPE_GEOGRAPHY)
{
- if ( chProps & SP_HASZVALUES )
+ if ( (chProps & SP_HASZVALUES) && (chProps & SP_HASMVALUES) )
+ {
+ line->setPoint(0, ReadY(0), ReadX(0), ReadZ(0), ReadM(0));
+ line->setPoint(1, ReadY(1), ReadX(1), ReadZ(1), ReadM(1));
+ }
+ else if ( chProps & SP_HASZVALUES )
{
line->setPoint(0, ReadY(0), ReadX(0), ReadZ(0));
line->setPoint(1, ReadY(1), ReadX(1), ReadZ(1));
}
+ else if ( chProps & SP_HASMVALUES )
+ {
+ line->setPointM(0, ReadY(0), ReadX(0), ReadZ(0));
+ line->setPointM(1, ReadY(1), ReadX(1), ReadZ(1));
+ }
else
{
line->setPoint(0, ReadY(0), ReadX(0));
@@ -442,11 +499,21 @@ OGRErr OGRMSSQLGeometryParser::ParseSqlGeometry(unsigned char* pszInput,
}
else
{
- if ( chProps & SP_HASZVALUES )
+ if ( (chProps & SP_HASZVALUES) && (chProps & SP_HASMVALUES) )
+ {
+ line->setPoint(0, ReadX(0), ReadY(0), ReadZ(0), ReadM(0));
+ line->setPoint(1, ReadX(1), ReadY(1), ReadZ(1), ReadM(1));
+ }
+ else if ( chProps & SP_HASZVALUES )
{
line->setPoint(0, ReadX(0), ReadY(0), ReadZ(0));
line->setPoint(1, ReadX(1), ReadY(1), ReadZ(1));
}
+ else if ( chProps & SP_HASMVALUES )
+ {
+ line->setPointM(0, ReadX(0), ReadY(0), ReadZ(0));
+ line->setPointM(1, ReadX(1), ReadY(1), ReadZ(1));
+ }
else
{
line->setPoint(0, ReadX(0), ReadY(0));
From 4b906b2dfd49c259e2bd3c53aece4b5a6139e2c9 Mon Sep 17 00:00:00 2001
From: Jerry Faust
Date: Sun, 28 Oct 2018 10:08:53 -0700
Subject: [PATCH 004/488] MSSQLSpatial: driver does not assign new ID following
an INSERT (fixes #1052) (#1053)
---
autotest/ogr/ogr_mssqlspatial.py | 8 ++++++++
.../ogrmssqlspatialtablelayer.cpp | 20 +++++++++++++++++--
2 files changed, 26 insertions(+), 2 deletions(-)
diff --git a/autotest/ogr/ogr_mssqlspatial.py b/autotest/ogr/ogr_mssqlspatial.py
index a403b196cb65..733c1e70e07b 100755
--- a/autotest/ogr/ogr_mssqlspatial.py
+++ b/autotest/ogr/ogr_mssqlspatial.py
@@ -225,6 +225,14 @@ def ogr_mssqlspatial_4():
'from file "' + wkt_filename + '"')
return 'fail'
+ ######################################################################
+ # Before reading back the record, verify that the newly added feature
+ # is returned from the CreateFeature method with a newly assigned FID.
+
+ if dst_feat.GetFID() == -1:
+ gdaltest.post_reason('Assigned FID was not returned in the new feature')
+ return 'fail'
+
######################################################################
# Read back the feature and get the geometry.
diff --git a/gdal/ogr/ogrsf_frmts/mssqlspatial/ogrmssqlspatialtablelayer.cpp b/gdal/ogr/ogrsf_frmts/mssqlspatial/ogrmssqlspatialtablelayer.cpp
index ce93c58e8129..0cbf181929cd 100644
--- a/gdal/ogr/ogrsf_frmts/mssqlspatial/ogrmssqlspatialtablelayer.cpp
+++ b/gdal/ogr/ogrsf_frmts/mssqlspatial/ogrmssqlspatialtablelayer.cpp
@@ -2060,11 +2060,18 @@ OGRErr OGRMSSQLSpatialTableLayer::ICreateFeature( OGRFeature *poFeature )
if (oStatement.GetCommand()[strlen(oStatement.GetCommand()) - 1] != ']')
{
/* no fields were added */
- oStatement.Appendf( "DEFAULT VALUES;" );
+ if (nFID == OGRNullFID && pszFIDColumn != nullptr && bIsIdentityFid)
+ oStatement.Appendf(" OUTPUT INSERTED.[%s] DEFAULT VALUES;", GetFIDColumn());
+ else
+ oStatement.Appendf( "DEFAULT VALUES;" );
}
else
{
- oStatement.Appendf( ") VALUES (" );
+ /* prepend VALUES section */
+ if (nFID == OGRNullFID && pszFIDColumn != nullptr && bIsIdentityFid)
+ oStatement.Appendf(") OUTPUT INSERTED.[%s] VALUES (", GetFIDColumn());
+ else
+ oStatement.Appendf( ") VALUES (" );
/* Set the geometry */
bNeedComma = FALSE;
@@ -2259,6 +2266,15 @@ OGRErr OGRMSSQLSpatialTableLayer::ICreateFeature( OGRFeature *poFeature )
return OGRERR_FAILURE;
}
+ else if(nFID == OGRNullFID && pszFIDColumn != nullptr && bIsIdentityFid)
+ {
+ // fetch new ID and set it into the feature
+ if (oStatement.Fetch())
+ {
+ GIntBig newID = atoll(oStatement.GetColData(0));
+ poFeature->SetFID(newID);
+ }
+ }
for( i = 0; i < bind_num; i++ )
CPLFree(bind_buffer[i]);
From 59411f1decb8a12fb34a595470e51df735b0692e Mon Sep 17 00:00:00 2001
From: Hiroshi Miura
Date: Mon, 29 Oct 2018 14:18:11 +0900
Subject: [PATCH 005/488] autotest: gdriver_loslas: see inside jar file for
downloaded data
Fix loslas test failure not to found test data.
Downloaded file NADCON.zip has nadcon.jar for grids data.
Test script try to unzip NADCON.zip and nadcon.jar to extract test data.
Signed-off-by: Hiroshi Miura
---
autotest/gdrivers/loslas.py | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/autotest/gdrivers/loslas.py b/autotest/gdrivers/loslas.py
index 0eea8f932533..f0e7d19e5dea 100755
--- a/autotest/gdrivers/loslas.py
+++ b/autotest/gdrivers/loslas.py
@@ -48,7 +48,15 @@ def loslas_online_1():
except OSError:
return 'skip'
- tst = gdaltest.GDALTest('LOSLAS', '/vsizip/tmp/cache/NADCON.zip/wyhpgn.los', 1, 0, filename_absolute=1)
+ try:
+ gdaltest.unzip('tmp/cache/NADCON', 'tmp/cache/NADCON.zip')
+ os.stat('tmp/cache/NADCON/nadcon.jar')
+ gdaltest.unzip('tmp/cache/NADCON', 'tmp/cache/NADCON/nadcon.jar')
+ os.stat('tmp/cache/NADCON/grids/wyhpgn.los')
+ except OSError:
+ return 'skip'
+
+ tst = gdaltest.GDALTest('LOSLAS', 'tmp/cache/NADCON/grids/wyhpgn.los', 1, 0, filename_absolute=1)
gt = (-111.625, 0.25, 0.0, 45.625, 0.0, -0.25)
stats = (-0.0080000003799796, 0.031125999987125001, 0.0093017323318172005, 0.0075646520354096004)
return tst.testOpen(check_gt=gt, check_stat=stats, check_prj='WGS84')
From d998b440f368dd43bde5db99445542c31604eb41 Mon Sep 17 00:00:00 2001
From: Hiroshi Miura
Date: Mon, 29 Oct 2018 09:21:32 +0900
Subject: [PATCH 006/488] autotest: update environment variable requirements
- Require GDAL_DONWLOAD_TEST_DATA=YES if want to enable it.
It also accept NO for explicitly disable download.
it is consistent behavior with GDAL_RUN_SLOW_TESTS
- Update message when GDAL_RUN_SLOW_TESTS and/or GDAL_DOWNLOAD_TEST_DATA
is set to NO or undefined as it is so.
- introduce gdaltest.download_test_data() for consistent env detection
- update netcdf test notice for environment variable
Signed-off-by: Hiroshi Miura
---
autotest/gdrivers/netcdf_cf.py | 4 ++--
autotest/pymod/gdaltest.py | 26 +++++++++++++++++---------
2 files changed, 19 insertions(+), 11 deletions(-)
diff --git a/autotest/gdrivers/netcdf_cf.py b/autotest/gdrivers/netcdf_cf.py
index 686be05b51fd..87276857a1e8 100755
--- a/autotest/gdrivers/netcdf_cf.py
+++ b/autotest/gdrivers/netcdf_cf.py
@@ -102,9 +102,9 @@ def netcdf_cf_setup():
return 'success'
# skip http method if GDAL_DOWNLOAD_TEST_DATA and GDAL_RUN_SLOW_TESTS are not defined
- if 'GDAL_DOWNLOAD_TEST_DATA' not in os.environ:
+ if not gdaltest.download_test_data():
print('NOTICE: skipping netcdf CF compliance checks')
- print('to enable remote http checker script, define GDAL_DOWNLOAD_TEST_DATA')
+ print('to enable remote http checker script, define GDAL_DOWNLOAD_TEST_DATA=YES')
return 'success'
if not gdaltest.run_slow_tests():
diff --git a/autotest/pymod/gdaltest.py b/autotest/pymod/gdaltest.py
index 72b078d37bc0..4f78f1b146de 100644
--- a/autotest/pymod/gdaltest.py
+++ b/autotest/pymod/gdaltest.py
@@ -247,9 +247,9 @@ def summarize():
else:
print('Duration: %02.2fs' % duration)
if count_skipped_tests_download != 0:
- print('As GDAL_DOWNLOAD_TEST_DATA environment variable is not defined, %d tests relying on data to downloaded from the Web have been skipped' % count_skipped_tests_download)
+ print('As GDAL_DOWNLOAD_TEST_DATA environment variable is not defined or set to NO, %d tests relying on data to downloaded from the Web have been skipped' % count_skipped_tests_download)
if count_skipped_tests_slow != 0:
- print('As GDAL_RUN_SLOW_TESTS environment variable is not defined, %d "slow" tests have been skipped' % count_skipped_tests_slow)
+ print('As GDAL_RUN_SLOW_TESTS environment variable is not defined or set to NO, %d "slow" tests have been skipped' % count_skipped_tests_slow)
print('')
sys.path.append('gcore')
@@ -1318,12 +1318,11 @@ def download_file(url, filename=None, download_size=-1, force_download=False, ma
elif filename.startswith(base_dir + '/'):
filename = filename[len(base_dir + '/'):]
- global count_skipped_tests_download
try:
os.stat(base_dir + '/' + filename)
return True
except OSError:
- if 'GDAL_DOWNLOAD_TEST_DATA' in os.environ or force_download:
+ if force_download or download_test_data():
val = None
start_time = time.time()
try:
@@ -1390,9 +1389,6 @@ def download_file(url, filename=None, download_size=-1, force_download=False, ma
print('Cannot write %s' % (filename))
return False
else:
- if count_skipped_tests_download == 0:
- print('As GDAL_DOWNLOAD_TEST_DATA environment variable is not defined, some tests relying on data to downloaded from the Web will be skipped')
- count_skipped_tests_download = count_skipped_tests_download + 1
return False
@@ -1659,16 +1655,28 @@ def neginf():
return -1e400
###############################################################################
-# Has the user requested to run the slow tests
+# Has the user requested to dowload test data
+def download_test_data():
+ global count_skipped_tests_download
+ val = gdal.GetConfigOption('GDAL_DOWNLOAD_TEST_DATA', None)
+ if val != 'yes' and val != 'YES':
+ if count_skipped_tests_download == 0:
+ print('As GDAL_DOWNLOAD_TEST_DATA environment variable is not defined or set to NO, some tests relying on data to downloaded from the Web will be skipped')
+ count_skipped_tests_download = count_skipped_tests_download + 1
+ return False
+ return True
+
+###############################################################################
+# Has the user requested to run the slow tests
def run_slow_tests():
global count_skipped_tests_slow
val = gdal.GetConfigOption('GDAL_RUN_SLOW_TESTS', None)
if val != 'yes' and val != 'YES':
if count_skipped_tests_slow == 0:
- print('As GDAL_RUN_SLOW_TESTS environment variable is not defined, some "slow" tests will be skipped')
+ print('As GDAL_RUN_SLOW_TESTS environment variable is not defined or set to NO, some "slow" tests will be skipped')
count_skipped_tests_slow = count_skipped_tests_slow + 1
return False
From 98906f740473968e60649eaf73b0138d814062bb Mon Sep 17 00:00:00 2001
From: Hugo Mercier
Date: Mon, 29 Oct 2018 13:53:48 +0100
Subject: [PATCH 007/488] Fix an off-by-one
---
autotest/cpp/test_marching_squares_contour.cpp | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/autotest/cpp/test_marching_squares_contour.cpp b/autotest/cpp/test_marching_squares_contour.cpp
index ee53dfccbb97..a682e4d49459 100644
--- a/autotest/cpp/test_marching_squares_contour.cpp
+++ b/autotest/cpp/test_marching_squares_contour.cpp
@@ -113,11 +113,11 @@ class TestRingAppender
// we represent them with a vector, we need to find a common "first" point
Point pfirst = aRing[0];
size_t offset = 0;
- for ( ; pfirst != bRing[offset]; offset++ ) {
- if ( offset >= bRing.size() ) {
- // can't find a common point
- return false;
- }
+ while ( offset < bRing.size() && pfirst != bRing[offset] )
+ offset++;
+ if ( offset >= bRing.size() ) {
+ // can't find a common point
+ return false;
}
// now compare each point of the two rings
for ( size_t i = 0; i < aRing.size(); i++ ) {
From 03663aa9cc098380fe5420faa4f424c3dac45a08 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Mon, 29 Oct 2018 23:18:12 +0100
Subject: [PATCH 008/488] GeoJSON: add partial support for field names
differing by case (fixes #1057)
- Should work on the reading and writing side of the GeoJSON driver itself
- Some support on the writing side of ogr2ogr, sufficient for GeoJSON->GeoJSON
conversion case
- ... but a lot/most other places in GDAL assume case insensitivity
---
autotest/ogr/ogr_geojson.py | 22 +++++++++++++
gdal/apps/ogr2ogr_lib.cpp | 22 +++++++++----
gdal/ogr/ogr_feature.h | 1 +
gdal/ogr/ogrfeaturedefn.cpp | 31 +++++++++++++++++++
.../ogrsf_frmts/geojson/ogrgeojsonreader.cpp | 24 +++++++-------
.../geojson/ogrgeojsonwritelayer.cpp | 2 +-
.../ogrsf_frmts/geojson/ogrgeojsonwriter.cpp | 6 ++--
7 files changed, 87 insertions(+), 21 deletions(-)
diff --git a/autotest/ogr/ogr_geojson.py b/autotest/ogr/ogr_geojson.py
index 0615b49d832e..2116d0db7596 100755
--- a/autotest/ogr/ogr_geojson.py
+++ b/autotest/ogr/ogr_geojson.py
@@ -4187,6 +4187,27 @@ def ogr_esrijson_without_geometryType():
return 'success'
+
+###############################################################################
+
+
+def ogr_geojson_read_fields_with_different_case():
+
+ ds = ogr.Open("""{
+"type": "FeatureCollection",
+"features": [
+{ "type": "Feature", "id": "my_id", "geometry": null, "properties":
+ { "ID": "MY_ID", "x": "foo", "X": "FOO"} }
+]}""")
+
+ lyr = ds.GetLayer(0)
+ f = lyr.GetNextFeature()
+ if f.GetField(0) != 'my_id' or f.GetField(1) != 'MY_ID' or f.GetField(2) != 'foo' or f.GetField(3) != 'FOO':
+ f.DumpReadable()
+ return 'fail'
+
+ return 'success'
+
###############################################################################
@@ -4300,6 +4321,7 @@ def ogr_geojson_cleanup():
ogr_geojson_append_flush,
ogr_geojson_empty_geometrycollection,
ogr_esrijson_without_geometryType,
+ ogr_geojson_read_fields_with_different_case,
ogr_geojson_cleanup]
if __name__ == '__main__':
diff --git a/gdal/apps/ogr2ogr_lib.cpp b/gdal/apps/ogr2ogr_lib.cpp
index 7726989d8f2d..cb357f5802db 100644
--- a/gdal/apps/ogr2ogr_lib.cpp
+++ b/gdal/apps/ogr2ogr_lib.cpp
@@ -3930,6 +3930,16 @@ TargetLayerInfo* SetupTargetLayer::Setup(OGRLayer* poSrcLayer,
{
int nDstFieldCount = poDstFDefn ? poDstFDefn->GetFieldCount() : 0;
+ const bool caseInsensitive =
+ !EQUAL(m_poDstDS->GetDriver()->GetDescription(), "GeoJSON");
+ auto formatName = [caseInsensitive](const char* name) {
+ if( caseInsensitive ) {
+ return CPLString(name).toupper();
+ } else {
+ return CPLString(name);
+ }
+ };
+
/* Save the map of existing fields, before creating new ones */
/* This helps when converting a source layer that has duplicated field names */
/* which is a bad idea */
@@ -3938,7 +3948,7 @@ TargetLayerInfo* SetupTargetLayer::Setup(OGRLayer* poSrcLayer,
for( int iField = 0; iField < nDstFieldCount; iField++ )
{
const char* pszFieldName = poDstFDefn->GetFieldDefn(iField)->GetNameRef();
- CPLString osUpperFieldName(CPLString(pszFieldName).toupper());
+ CPLString osUpperFieldName(formatName(pszFieldName));
oSetDstFieldNames.insert(osUpperFieldName);
if( oMapPreExistingFields.find(osUpperFieldName) ==
oMapPreExistingFields.end() )
@@ -3976,7 +3986,7 @@ TargetLayerInfo* SetupTargetLayer::Setup(OGRLayer* poSrcLayer,
for( int i = 0; i < poSrcFDefn->GetFieldCount(); i++ )
{
oSetSrcFieldNames.insert(
- CPLString(poSrcFDefn->GetFieldDefn(i)->GetNameRef()).toupper());
+ formatName(poSrcFDefn->GetFieldDefn(i)->GetNameRef()));
}
for( size_t i = 0; i < anSrcFieldIndices.size(); i++ )
@@ -4003,7 +4013,7 @@ TargetLayerInfo* SetupTargetLayer::Setup(OGRLayer* poSrcLayer,
/* The field may have been already created at layer creation */
std::map::iterator oIter =
- oMapPreExistingFields.find(CPLString(oFieldDefn.GetNameRef()).toupper());
+ oMapPreExistingFields.find(formatName(oFieldDefn.GetNameRef()));
if( oIter != oMapPreExistingFields.end() )
{
panMap[iField] = oIter->second;
@@ -4014,12 +4024,12 @@ TargetLayerInfo* SetupTargetLayer::Setup(OGRLayer* poSrcLayer,
/* In case the field name already exists in the target layer, */
/* build a unique field name */
if( oSetDstFieldNames.find(
- CPLString(oFieldDefn.GetNameRef()).toupper()) !=
+ formatName(oFieldDefn.GetNameRef())) !=
oSetDstFieldNames.end() )
{
int nTry = 1;
CPLString osTmpNameRaddixUC(oFieldDefn.GetNameRef());
- osTmpNameRaddixUC.toupper();
+ osTmpNameRaddixUC = formatName(osTmpNameRaddixUC);
CPLString osTmpNameUC = osTmpNameRaddixUC;
osTmpNameUC.reserve(osTmpNameUC.size() + 10);
while( true )
@@ -4069,7 +4079,7 @@ TargetLayerInfo* SetupTargetLayer::Setup(OGRLayer* poSrcLayer,
"Field '%s' already exists. Renaming it as '%s'",
poSrcFieldDefn->GetNameRef(), pszNewFieldName);
}
- oSetDstFieldNames.insert(CPLString(pszNewFieldName).toupper());
+ oSetDstFieldNames.insert(formatName(pszNewFieldName));
}
panMap[iField] = nDstFieldCount;
diff --git a/gdal/ogr/ogr_feature.h b/gdal/ogr/ogr_feature.h
index be3f7856d34a..572a5cff487a 100644
--- a/gdal/ogr/ogr_feature.h
+++ b/gdal/ogr/ogr_feature.h
@@ -284,6 +284,7 @@ class CPL_DLL OGRFeatureDefn
virtual OGRFieldDefn *GetFieldDefn( int i );
virtual const OGRFieldDefn *GetFieldDefn( int i ) const;
virtual int GetFieldIndex( const char * ) const;
+ int GetFieldIndexCaseSensitive( const char * ) const;
virtual void AddFieldDefn( OGRFieldDefn * );
virtual OGRErr DeleteFieldDefn( int iField );
diff --git a/gdal/ogr/ogrfeaturedefn.cpp b/gdal/ogr/ogrfeaturedefn.cpp
index 2b926bae15c4..ea0f9423c955 100644
--- a/gdal/ogr/ogrfeaturedefn.cpp
+++ b/gdal/ogr/ogrfeaturedefn.cpp
@@ -1228,6 +1228,37 @@ int OGRFeatureDefn::GetFieldIndex( const char * pszFieldName ) const
return -1;
}
+/************************************************************************/
+/* GetFieldIndexCaseSensitive() */
+/************************************************************************/
+
+/**
+ * \brief Find field by name, in a case sensitive way.
+ *
+ * The field index of the first field matching the passed field name is returned.
+ *
+ * @param pszFieldName the field name to search for.
+ *
+ * @return the field index, or -1 if no match found.
+ */
+
+int OGRFeatureDefn::GetFieldIndexCaseSensitive( const char * pszFieldName ) const
+
+{
+ GetFieldCount();
+ for( int i = 0; i < nFieldCount; i++ )
+ {
+ const OGRFieldDefn* poFDefn = GetFieldDefn(i);
+ if( poFDefn != nullptr &&
+ strcmp(pszFieldName, poFDefn->GetNameRef() ) == 0 )
+ {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
/************************************************************************/
/* OGR_FD_GetFieldIndex() */
/************************************************************************/
diff --git a/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonreader.cpp b/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonreader.cpp
index cb8c62d23d06..1ff9234711b8 100644
--- a/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonreader.cpp
+++ b/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonreader.cpp
@@ -1476,7 +1476,7 @@ void OGRGeoJSONBaseReader::FinalizeLayerDefn(OGRLayer* poLayer,
if( !bFeatureLevelIdAsFID_ )
{
- const int idx = poLayerDefn->GetFieldIndex( "id" );
+ const int idx = poLayerDefn->GetFieldIndexCaseSensitive( "id" );
if( idx >= 0 )
{
OGRFieldDefn* poFDefn = poLayerDefn->GetFieldDefn(idx);
@@ -1535,7 +1535,7 @@ void OGRGeoJSONReaderAddOrUpdateField(
return;
}
- int nIndex = poDefn->GetFieldIndex(pszKey);
+ int nIndex = poDefn->GetFieldIndexCaseSensitive(pszKey);
if( nIndex < 0 )
{
OGRFieldSubType eSubType;
@@ -1787,7 +1787,7 @@ bool OGRGeoJSONBaseReader::GenerateFeatureDefn( OGRLayer* poLayer,
json_object* poObjId = OGRGeoJSONFindMemberByName( poObj, "id" );
if( poObjId )
{
- const int nIdx = poDefn->GetFieldIndex( "id" );
+ const int nIdx = poDefn->GetFieldIndexCaseSensitive( "id" );
if( nIdx < 0 )
{
if( json_object_get_type(poObjId) == json_type_int )
@@ -1937,7 +1937,7 @@ bool OGRGeoJSONBaseReader::GenerateFeatureDefn( OGRLayer* poLayer,
it.entry = nullptr;
json_object_object_foreachC( poObjProps, it )
{
- int nFldIndex = poDefn->GetFieldIndex( it.key );
+ int nFldIndex = poDefn->GetFieldIndexCaseSensitive( it.key );
if( -1 == nFldIndex && !bIsGeocouchSpatiallistFormat )
{
// Detect the special kind of GeoJSON output by a spatiallist of
@@ -1970,7 +1970,7 @@ bool OGRGeoJSONBaseReader::GenerateFeatureDefn( OGRLayer* poLayer,
"TRUE"));
if( bFlattenGeocouchSpatiallistFormat )
{
- poDefn->DeleteFieldDefn(poDefn->GetFieldIndex("type"));
+ poDefn->DeleteFieldDefn(poDefn->GetFieldIndexCaseSensitive("type"));
bIsGeocouchSpatiallistFormat = true;
return GenerateFeatureDefn(poLayer, poObj);
}
@@ -2008,7 +2008,7 @@ bool OGRGeoJSONBaseReader::GenerateFeatureDefn( OGRLayer* poLayer,
strcmp(it.key, "bbox") != 0 &&
strcmp(it.key, "center") != 0 )
{
- int nFldIndex = poDefn->GetFieldIndex( it.key );
+ int nFldIndex = poDefn->GetFieldIndexCaseSensitive( it.key );
if( -1 == nFldIndex )
{
OGRGeoJSONReaderAddOrUpdateField(poDefn, it.key, it.val,
@@ -2124,7 +2124,8 @@ static void OGRGeoJSONReaderSetFieldNestedAttribute( OGRLayer* poLayer,
}
else
{
- const int nField = poFeature->GetFieldIndex(osAttrName);
+ const int nField = poFeature->GetDefnRef()->
+ GetFieldIndexCaseSensitive(osAttrName);
OGRGeoJSONReaderSetField(poLayer, poFeature, nField,
osAttrName, it.val, false, 0);
}
@@ -2292,7 +2293,8 @@ OGRFeature* OGRGeoJSONBaseReader::ReadFeature( OGRLayer* poLayer,
{
CPLAssert( nullptr != poObj );
- OGRFeature* poFeature = new OGRFeature( poLayer->GetLayerDefn() );
+ OGRFeatureDefn* poFDefn = poLayer->GetLayerDefn();
+ OGRFeature* poFeature = new OGRFeature( poFDefn );
if( bStoreNativeData_ )
{
@@ -2337,7 +2339,7 @@ OGRFeature* OGRGeoJSONBaseReader::ReadFeature( OGRLayer* poLayer,
it.entry = nullptr;
json_object_object_foreachC( poObjProps, it )
{
- const int nField = poFeature->GetFieldIndex(it.key);
+ const int nField = poFDefn->GetFieldIndexCaseSensitive(it.key);
if( nField < 0 &&
!( bFlattenNestedAttributes_ && it.val != nullptr &&
json_object_get_type(it.val) == json_type_object) )
@@ -2362,7 +2364,7 @@ OGRFeature* OGRGeoJSONBaseReader::ReadFeature( OGRLayer* poLayer,
it.entry = nullptr;
json_object_object_foreachC( poObj, it )
{
- const int nFldIndex = poFeature->GetFieldIndex(it.key);
+ const int nFldIndex = poFDefn->GetFieldIndexCaseSensitive(it.key);
if( nFldIndex >= 0 )
{
if( it.val )
@@ -2390,7 +2392,7 @@ OGRFeature* OGRGeoJSONBaseReader::ReadFeature( OGRLayer* poLayer,
/* -------------------------------------------------------------------- */
else if( nullptr != poObjId )
{
- const int nIdx = poLayer->GetLayerDefn()->GetFieldIndex( "id" );
+ const int nIdx = poFDefn->GetFieldIndexCaseSensitive( "id" );
if( nIdx >= 0 && !poFeature->IsFieldSet(nIdx) )
{
poFeature->SetField(nIdx, json_object_get_string(poObjId));
diff --git a/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwritelayer.cpp b/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwritelayer.cpp
index ea2525d5e5a3..8dcd788e8e1c 100644
--- a/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwritelayer.cpp
+++ b/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwritelayer.cpp
@@ -290,7 +290,7 @@ OGRErr OGRGeoJSONWriteLayer::ICreateFeature( OGRFeature* poFeature )
OGRErr OGRGeoJSONWriteLayer::CreateField( OGRFieldDefn* poField,
int /* bApproxOK */ )
{
- if( poFeatureDefn_->GetFieldIndex(poField->GetNameRef()) >= 0 )
+ if( poFeatureDefn_->GetFieldIndexCaseSensitive(poField->GetNameRef()) >= 0 )
{
CPLDebug( "GeoJSON", "Field '%s' already present in schema",
poField->GetNameRef() );
diff --git a/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwriter.cpp b/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwriter.cpp
index 8ea788192411..89e3d12f0bb8 100644
--- a/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwriter.cpp
+++ b/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwriter.cpp
@@ -636,7 +636,7 @@ json_object* OGRGeoJSONWriteFeature( OGRFeature* poFeature,
if( it.val != nullptr )
{
- int nIdx = poFeature->GetFieldIndex("id");
+ int nIdx = poFeature->GetDefnRef()->GetFieldIndexCaseSensitive("id");
if( eType == json_type_string &&
nIdx >= 0 &&
poFeature->GetFieldDefnRef(nIdx)->GetType() == OFTString &&
@@ -678,7 +678,7 @@ json_object* OGRGeoJSONWriteFeature( OGRFeature* poFeature,
/* -------------------------------------------------------------------- */
if( !oOptions.osIDField.empty() )
{
- int nIdx = poFeature->GetFieldIndex(oOptions.osIDField);
+ int nIdx = poFeature->GetDefnRef()->GetFieldIndexCaseSensitive(oOptions.osIDField);
if( nIdx >= 0 )
{
if( (oOptions.bForceIDFieldType &&
@@ -797,7 +797,7 @@ json_object* OGRGeoJSONWriteAttributes( OGRFeature* poFeature,
OGRFeatureDefn* poDefn = poFeature->GetDefnRef();
const int nIDField = !oOptions.osIDField.empty() ?
- poFeature->GetFieldIndex(oOptions.osIDField) : -1;
+ poDefn->GetFieldIndexCaseSensitive(oOptions.osIDField) : -1;
const int nFieldCount = poDefn->GetFieldCount();
for( int nField = 0; nField < nFieldCount; ++nField )
From e6e6ac477f8cbb8c817135caaf1154c3c6989b0c Mon Sep 17 00:00:00 2001
From: Hiroshi Miura
Date: Tue, 30 Oct 2018 21:33:31 +0900
Subject: [PATCH 009/488] autotest: fix UnicodeEncodeError when test fails in
python2
When calling ogr_gmlas.py test and its test fails,
python warns UnicodeEncodeError instead of showing
test result.
Signed-off-by: Hiroshi Miura
---
autotest/ogr/ogr_gmlas.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/autotest/ogr/ogr_gmlas.py b/autotest/ogr/ogr_gmlas.py
index d5dc2acf00e9..de19582f2890 100755
--- a/autotest/ogr/ogr_gmlas.py
+++ b/autotest/ogr/ogr_gmlas.py
@@ -75,7 +75,7 @@ def compare_ogrinfo_output(gmlfile, reffile, options=''):
if ret != expected:
gdaltest.post_reason('fail')
print('Got:')
- print(ret)
+ print(ret.encode('utf-8'))
open(tmpfilename, 'wb').write(ret.encode('utf-8'))
print('Diff:')
os.system('diff -u ' + reffile + ' ' + tmpfilename)
From 5ed09343b361b78ca5dc2fb55ec882be926e515e Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Tue, 30 Oct 2018 18:29:51 +0100
Subject: [PATCH 010/488] Shape: avoid being dependent on correctness of file
size field in .shp. Fixes
https://lists.osgeo.org/pipermail/gdal-dev/2018-October/049218.html
---
gdal/ogr/ogrsf_frmts/shape/shpopen.c | 52 +++++++++++++++-------------
1 file changed, 27 insertions(+), 25 deletions(-)
diff --git a/gdal/ogr/ogrsf_frmts/shape/shpopen.c b/gdal/ogr/ogrsf_frmts/shape/shpopen.c
index 81b245e57cd0..b2d0f5c2f2fc 100644
--- a/gdal/ogr/ogrsf_frmts/shape/shpopen.c
+++ b/gdal/ogr/ogrsf_frmts/shape/shpopen.c
@@ -2151,33 +2151,35 @@ SHPReadObject( SHPHandle psSHP, int hEntity )
/* Before allocating too much memory, check that the file is big enough */
/* and do not trust the file size in the header the first time we */
/* need to allocate more than 10 MB */
- if( nNewBufSize >= 10 * 1024 * 1024 &&
- psSHP->nBufSize < 10 * 1024 * 1024 )
- {
- SAOffset nFileSize;
- psSHP->sHooks.FSeek( psSHP->fpSHP, 0, 2 );
- nFileSize = psSHP->sHooks.FTell(psSHP->fpSHP);
- if( nFileSize >= UINT_MAX )
- psSHP->nFileSize = UINT_MAX;
- else
- psSHP->nFileSize = STATIC_CAST(unsigned int, nFileSize);
- }
-
- if( psSHP->panRecOffset[hEntity] >= psSHP->nFileSize ||
- /* We should normally use nEntitySize instead of*/
- /* psSHP->panRecSize[hEntity] in the below test, but because of */
- /* the case of non conformant .shx files detailed a bit below, */
- /* let be more tolerant */
- psSHP->panRecSize[hEntity] > psSHP->nFileSize - psSHP->panRecOffset[hEntity] )
+ if( nNewBufSize >= 10 * 1024 * 1024 )
{
- char str[128];
- snprintf( str, sizeof(str),
- "Error in fread() reading object of size %d at offset %u from .shp file",
- nEntitySize, psSHP->panRecOffset[hEntity] );
- str[sizeof(str)-1] = '\0';
+ if( psSHP->nBufSize < 10 * 1024 * 1024 )
+ {
+ SAOffset nFileSize;
+ psSHP->sHooks.FSeek( psSHP->fpSHP, 0, 2 );
+ nFileSize = psSHP->sHooks.FTell(psSHP->fpSHP);
+ if( nFileSize >= UINT_MAX )
+ psSHP->nFileSize = UINT_MAX;
+ else
+ psSHP->nFileSize = STATIC_CAST(unsigned int, nFileSize);
+ }
- psSHP->sHooks.Error( str );
- return SHPLIB_NULLPTR;
+ if( psSHP->panRecOffset[hEntity] >= psSHP->nFileSize ||
+ /* We should normally use nEntitySize instead of*/
+ /* psSHP->panRecSize[hEntity] in the below test, but because of */
+ /* the case of non conformant .shx files detailed a bit below, */
+ /* let be more tolerant */
+ psSHP->panRecSize[hEntity] > psSHP->nFileSize - psSHP->panRecOffset[hEntity] )
+ {
+ char str[128];
+ snprintf( str, sizeof(str),
+ "Error in fread() reading object of size %d at offset %u from .shp file",
+ nEntitySize, psSHP->panRecOffset[hEntity] );
+ str[sizeof(str)-1] = '\0';
+
+ psSHP->sHooks.Error( str );
+ return SHPLIB_NULLPTR;
+ }
}
pabyRecNew = STATIC_CAST(uchar *, SfRealloc(psSHP->pabyRec,nNewBufSize));
From a91aa00823f19d395f52296b58196459100b616a Mon Sep 17 00:00:00 2001
From: Hiroshi Miura
Date: Wed, 31 Oct 2018 02:53:45 +0900
Subject: [PATCH 011/488] Vagrant: work around for #1058 (#1059)
---
Vagrantfile | 2 +-
gdal/scripts/vagrant/gdal-mingw.sh | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/Vagrantfile b/Vagrantfile
index caf03c057318..5c185a2cbbc0 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -151,7 +151,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
"mingw-w64-tools",
"gdb-mingw-w64-target",
"libgeos-mingw-w64-dev",
- "libproj-mingw-w64-dev",
+ #"libproj-mingw-w64-dev",
"cmake3-curses-gui",
"gdb",
"gdbserver",
diff --git a/gdal/scripts/vagrant/gdal-mingw.sh b/gdal/scripts/vagrant/gdal-mingw.sh
index 9719fb113787..791bd921b310 100755
--- a/gdal/scripts/vagrant/gdal-mingw.sh
+++ b/gdal/scripts/vagrant/gdal-mingw.sh
@@ -33,7 +33,7 @@ ln -sf /usr/x86_64-w64-mingw32/lib/libgeos-3-5-0.dll "$HOME/.wine/drive_c/window
(cd /home/vagrant/gnumake-build-mingw-w64
CC="ccache x86_64-w64-mingw32-gcc" CXX="ccache x86_64-w64-mingw32-g++" LD=x86_64-w64-mingw32-ld \
- ./configure --prefix=/usr/x86_64-w64-mingw32 --host=x86_64-w64-mingw32 --with-geos --with-proj-static
+ ./configure --prefix=/usr/x86_64-w64-mingw32 --host=x86_64-w64-mingw32 --with-geos #--with-proj
ln -sf "$PWD/.libs/libgdal-20.dll" "$HOME/.wine/drive_c/windows"
# Python bindings
From 87f2ab2a6813231ec87acf5e47dca60f4ca95c08 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Tue, 30 Oct 2018 19:01:32 +0100
Subject: [PATCH 012/488] CONTRIBUTING.md: enhance backporting section [ci
skip]
---
CONTRIBUTING.md | 3 +++
1 file changed, 3 insertions(+)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 5a78b50f1f22..22d516b84436 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -103,7 +103,10 @@ Backporting bugfixes from master to a stable branch
git checkout master
With git log, identify the sha1sum of the commit you want to backport
git checkout 2.2 (if you want to backport to 2.2)
+git pull origin 2.2
+(git checkout -b branh_name: if you intend to submit the backport as a pull request)
git cherry-pick the_sha1_sum
+git push ...
```
If changes are needed, do them and ```git commit -a --amend```
From 48e64636d25ecb2b76fdbe1985a19ce78bc50bcc Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Tue, 30 Oct 2018 19:10:16 +0100
Subject: [PATCH 013/488] Spatialite: read table name in its original case
(fixes #1060, from suggestion by @aledonati)
---
.../ogrsf_frmts/sqlite/ogrsqlitedatasource.cpp | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/gdal/ogr/ogrsf_frmts/sqlite/ogrsqlitedatasource.cpp b/gdal/ogr/ogrsf_frmts/sqlite/ogrsqlitedatasource.cpp
index 5f034fa3cc10..09b06d54eb8e 100644
--- a/gdal/ogr/ogrsf_frmts/sqlite/ogrsqlitedatasource.cpp
+++ b/gdal/ogr/ogrsf_frmts/sqlite/ogrsqlitedatasource.cpp
@@ -1653,9 +1653,11 @@ int OGRSQLiteDataSource::Open( GDALOpenInfo* poOpenInfo)
/* -------------------------------------------------------------------- */
sqlite3_free( pszErrMsg );
rc = sqlite3_get_table( hDB,
- "SELECT f_table_name, f_geometry_column, "
- "type, coord_dimension, srid, "
- "spatial_index_enabled FROM geometry_columns "
+ "SELECT sm.name, gc.f_geometry_column, "
+ "gc.type, gc.coord_dimension, gc.srid, "
+ "gc.spatial_index_enabled FROM geometry_columns gc "
+ "JOIN sqlite_master sm ON "
+ "LOWER(gc.f_table_name)=LOWER(sm.name) "
"LIMIT 10000",
&papszResult, &nRowCount,
&nColCount, &pszErrMsg );
@@ -1664,9 +1666,11 @@ int OGRSQLiteDataSource::Open( GDALOpenInfo* poOpenInfo)
/* Test with SpatiaLite 4.0 schema */
sqlite3_free( pszErrMsg );
rc = sqlite3_get_table( hDB,
- "SELECT f_table_name, f_geometry_column, "
- "geometry_type, coord_dimension, srid, "
- "spatial_index_enabled FROM geometry_columns "
+ "SELECT sm.name, gc.f_geometry_column, "
+ "gc.geometry_type, gc.coord_dimension, gc.srid, "
+ "gc.spatial_index_enabled FROM geometry_columns gc "
+ "JOIN sqlite_master sm ON "
+ "LOWER(gc.f_table_name)=LOWER(sm.name) "
"LIMIT 10000",
&papszResult, &nRowCount,
&nColCount, &pszErrMsg );
From bb795227fc4177d5c7424a6c411f6b802fdfc22a Mon Sep 17 00:00:00 2001
From: Sander Jansen
Date: Tue, 30 Oct 2018 13:56:23 -0500
Subject: [PATCH 014/488] grib: interpret missing data values correctly for
complex packing when original data is integer
---
gdal/frmts/grib/gribdataset.cpp | 45 +++++++++++++++++++++++----------
1 file changed, 31 insertions(+), 14 deletions(-)
diff --git a/gdal/frmts/grib/gribdataset.cpp b/gdal/frmts/grib/gribdataset.cpp
index 221dfcbf5c0e..632185eb2b2e 100644
--- a/gdal/frmts/grib/gribdataset.cpp
+++ b/gdal/frmts/grib/gribdataset.cpp
@@ -516,22 +516,39 @@ void GRIBRasterBand::FindNoDataGrib2(bool bSeekToStart)
const int nMiss = pabyBody[23-1];
if( nMiss == 1 || nMiss == 2 )
{
- float fTemp;
- memcpy(&fTemp, &pabyBody[24-1], 4);
- CPL_MSBPTR32(&fTemp);
- m_dfNoData = fTemp;
- m_bHasNoData = true;
-
- if( nMiss == 2 )
+ const int original_field_type = pabyBody[21-1];
+ if ( original_field_type == 0 ) // Floating Point
{
- memcpy(&fTemp, &pabyBody[28-1], 4);
+ float fTemp;
+ memcpy(&fTemp, &pabyBody[24-1], 4);
CPL_MSBPTR32(&fTemp);
- double dfSecondaryNoData = fTemp;
-
- // What TODO?
- CPLDebug("GRIB",
- "Secondary missing value also set for band %d : %f",
- nBand, dfSecondaryNoData);
+ m_dfNoData = fTemp;
+ m_bHasNoData = true;
+ if( nMiss == 2 )
+ {
+ memcpy(&fTemp, &pabyBody[28-1], 4);
+ CPL_MSBPTR32(&fTemp);
+ CPLDebug("GRIB","Secondary missing value also set for band %d : %f", nBand, fTemp);
+ }
+ }
+ else if ( original_field_type == 1 ) // Integer
+ {
+ int iTemp;
+ memcpy(&iTemp, &pabyBody[24-1], 4);
+ CPL_MSBPTR32(&iTemp);
+ m_dfNoData = iTemp;
+ m_bHasNoData = true;
+ if( nMiss == 2 )
+ {
+ memcpy(&iTemp, &pabyBody[28-1], 4);
+ CPL_MSBPTR32(&iTemp);
+ CPLDebug("GRIB","Secondary missing value also set for band %d : %d", nBand, iTemp);
+ }
+ }
+ else
+ {
+ // FIXME What to do? Blindly convert to float?
+ CPLDebug("GRIB","Complex Packing - Type of Original Field Values for band %d: %u", nBand, original_field_type);
}
}
}
From 3ab3b331094aadf2a4dbf9aae9cc378406f31112 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Wed, 31 Oct 2018 00:13:11 +0100
Subject: [PATCH 015/488] GRIB: fix writing of nodata value for complex value
when original data is integer (refs #1063)
---
autotest/gdrivers/grib.py | 31 ++++++++++++++++++++++++
gdal/frmts/grib/gribcreatecopy.cpp | 39 ++++++++++++++++++++++++------
2 files changed, 62 insertions(+), 8 deletions(-)
diff --git a/autotest/gdrivers/grib.py b/autotest/gdrivers/grib.py
index eaa6bbabe68a..d55f31298fe8 100755
--- a/autotest/gdrivers/grib.py
+++ b/autotest/gdrivers/grib.py
@@ -1532,6 +1532,36 @@ def grib_grib2_write_temperatures():
return 'success'
###############################################################################
+
+
+def grib_grib2_write_nodata():
+
+ if gdaltest.grib_drv is None:
+ return 'skip'
+
+ for src_type in [ gdal.GDT_Byte, gdal.GDT_Float32 ]:
+ src_ds = gdal.GetDriverByName('MEM').Create('', 2, 2, 1, src_type)
+ src_ds.SetGeoTransform([2, 1, 0, 49, 0, -1])
+ sr = osr.SpatialReference()
+ sr.SetFromUserInput('WGS84')
+ src_ds.SetProjection(sr.ExportToWkt())
+ src_ds.GetRasterBand(1).SetNoDataValue(123)
+ tmpfilename = '/vsimem/out.grb2'
+ options = [
+ 'DATA_ENCODING=COMPLEX_PACKING'
+ ]
+ gdaltest.grib_drv.CreateCopy(tmpfilename, src_ds, options=options)
+
+ ds = gdal.Open(tmpfilename)
+ if ds.GetRasterBand(1).GetNoDataValue() != 123:
+ gdaltest.post_reason('fail')
+ return 'fail'
+ ds = None
+ gdal.Unlink(tmpfilename)
+
+ return 'success'
+
+###############################################################################
# Test GRIB2 file with JPEG2000 codestream on a single line (#6719)
@@ -1600,6 +1630,7 @@ def grib_online_grib2_jpeg2000_single_line():
grib_grib2_write_data_encodings,
grib_grib2_write_data_encodings_warnings_and_errors,
grib_grib2_write_temperatures,
+ grib_grib2_write_nodata,
grib_online_grib2_jpeg2000_single_line
]
diff --git a/gdal/frmts/grib/gribcreatecopy.cpp b/gdal/frmts/grib/gribcreatecopy.cpp
index 8d7aca6e90be..ac7184b591ca 100644
--- a/gdal/frmts/grib/gribcreatecopy.cpp
+++ b/gdal/frmts/grib/gribcreatecopy.cpp
@@ -33,6 +33,7 @@
#include "cpl_port.h"
#include "gribdataset.h"
+#include "gdal_priv_templates.hpp"
#include
@@ -700,6 +701,7 @@ class GRIB2Section567Writer
bool Write(float fValOffset,
char** papszOptions,
GDALProgressFunc pfnProgress, void * pProgressData);
+ void WriteComplexPackingNoData();
};
/************************************************************************/
@@ -926,6 +928,33 @@ bool GRIB2Section567Writer::WriteSimplePacking()
return true;
}
+/************************************************************************/
+/* WriteComplexPackingNoData() */
+/************************************************************************/
+
+void GRIB2Section567Writer::WriteComplexPackingNoData()
+{
+ if( !m_bHasNoData )
+ {
+ WriteUInt32(m_fp, GRIB2MISSING_u4);
+ }
+ else if( GDALDataTypeIsFloating(m_eDT) )
+ {
+ WriteFloat32(m_fp, static_cast(m_dfNoData));
+ }
+ else
+ {
+ if( GDALIsValueInRange(m_dfNoData) )
+ {
+ WriteInt32(m_fp, static_cast(m_dfNoData));
+ }
+ else
+ {
+ WriteUInt32(m_fp, GRIB2MISSING_u4);
+ }
+ }
+}
+
/************************************************************************/
/* WriteComplexPacking() */
/************************************************************************/
@@ -965,10 +994,7 @@ bool GRIB2Section567Writer::WriteComplexPacking(int nSpatialDifferencingOrder)
WriteByte(m_fp, GDALDataTypeIsFloating(m_eDT) ? 0 : 1);
WriteByte(m_fp, 0);
WriteByte(m_fp, m_bHasNoData ? 1 : 0); // 1 missing value
- if( !m_bHasNoData )
- WriteUInt32(m_fp, GRIB2MISSING_u4);
- else
- WriteFloat32(m_fp, fNoData);
+ WriteComplexPackingNoData();
WriteUInt32(m_fp, GRIB2MISSING_u4);
WriteUInt32(m_fp, 0);
WriteByte(m_fp, 0);
@@ -1092,10 +1118,7 @@ bool GRIB2Section567Writer::WriteComplexPacking(int nSpatialDifferencingOrder)
WriteByte(m_fp, GDALDataTypeIsFloating(m_eDT) ? 0 : 1);
WriteByte(m_fp, idrstmpl[TMPL5_GROUP_SPLITTING_IDX]);
WriteByte(m_fp, idrstmpl[TMPL5_MISSING_VALUE_MGNT_IDX]);
- if( !m_bHasNoData )
- WriteUInt32(m_fp, GRIB2MISSING_u4);
- else
- WriteFloat32(m_fp, fNoData);
+ WriteComplexPackingNoData();
WriteUInt32(m_fp, GRIB2MISSING_u4);
WriteUInt32(m_fp, idrstmpl[TMPL5_NG_IDX]);
WriteByte(m_fp, idrstmpl[TMPL5_REF_GROUP_WIDTHS_IDX]);
From 5bd506d5691e5c68da5bb7ab451fa0e99b5e91cf Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Mateusz=20=C5=81oskot?=
Date: Wed, 31 Oct 2018 09:58:44 +0100
Subject: [PATCH 016/488] Remove support for pre-VS2013 from
generate_vcxproj.bat [ci skip]
---
gdal/generate_vcxproj.bat | 13 +++----------
1 file changed, 3 insertions(+), 10 deletions(-)
diff --git a/gdal/generate_vcxproj.bat b/gdal/generate_vcxproj.bat
index 3e4686309f0c..9bc120c59277 100644
--- a/gdal/generate_vcxproj.bat
+++ b/gdal/generate_vcxproj.bat
@@ -5,7 +5,7 @@
::
:: Name: generate_vcxproj.bat
:: Project: GDAL
-:: Purpose: Generate MS Visual C++ => 10.0 project files
+:: Purpose: Generate MS Visual C++ => 12.0 project files
:: Author: Ivan Lucena, [ivan lucena at outlook dot com]
::
:: ****************************************************************************
@@ -61,12 +61,6 @@ if "%_vcver_%"=="15.0" (
) else ( if "%_vcver_%"=="12.0" (
set _clver_=1800
set _vstoolset_=v120
-) else ( if "%_vcver_%"=="11.0" (
- set _clver_=1700
- set _vstoolset_=v110
-) else ( if "%_vcver_%"=="10.0" (
- set _clver_=1600
- set _vstoolset_=v100
) else (
echo Wrong value for parameter 1. See usage:
goto :usage
@@ -102,15 +96,14 @@ goto :continue
echo Usage: generate_vcxproj ^ [32^|64] ^<^(*^) project file name^>
echo Parameters:
-echo 1 : Visual C++ version is not the same as Visual Studio version ^( =^> 10.0 ^)
+echo 1 : Visual C++ version is not the same as Visual Studio version ^( =^> 14.0 ^)
echo 2 : Windows platform 32 for Win32 and 64 for Win64
echo 3 : Base file name, with no path and no extension ^(*^)
echo Examples:
-echo generate_vcxproj 10.1 32 gdal_vs2010
-echo generate_vcxproj 11.0 64 gdal_vs2012
echo generate_vcxproj 12.0 64 gdal_vs2013
echo generate_vcxproj 14.0 64 gdal_vs2015
echo generate_vcxproj 15.0 64 gdal_vs2017
+echo WARNING: GDAL requires C++11. It is not guaranteed to build with VS2013.
goto :end
From a3c0caeab77354c23a237905cc451dfc89f7f06f Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Thu, 1 Nov 2018 14:28:43 +0100
Subject: [PATCH 017/488] ogr_merge.py: fix issue with non-ASCII characters
(fixes #1067)
---
autotest/pyscripts/test_ogrmerge.py | 29 +++++++++++++++++-
gdal/swig/python/scripts/ogrmerge.py | 45 ++++++++++++++++++----------
2 files changed, 58 insertions(+), 16 deletions(-)
diff --git a/autotest/pyscripts/test_ogrmerge.py b/autotest/pyscripts/test_ogrmerge.py
index 7b27f061bc27..6b728af940b4 100755
--- a/autotest/pyscripts/test_ogrmerge.py
+++ b/autotest/pyscripts/test_ogrmerge.py
@@ -404,6 +404,32 @@ def test_ogrmerge_11():
return 'success'
+###############################################################################
+# Test layer names with accents
+
+
+def test_ogrmerge_12():
+ script_path = test_py_scripts.get_py_script('ogrmerge')
+ if script_path is None:
+ return 'skip'
+
+ with open('tmp/tmp.json', 'wt') as f:
+ f.write("""{ "type": "FeatureCollection", "name": "\xc3\xa9ven", "features": [ { "type": "Feature", "properties": {}, "geometry": null} ]}""")
+
+ test_py_scripts.run_py_script(script_path, 'ogrmerge',
+ '-f VRT -o /vsimem/out.vrt tmp/tmp.json')
+
+ ds = ogr.Open('/vsimem/out.vrt')
+ if ds is None:
+ gdaltest.post_reason('fail')
+ return 'fail'
+ ds = None
+
+ gdal.Unlink('tmp/tmp.json')
+ gdal.Unlink('/vsimem/out.vrt')
+
+ return 'success'
+
gdaltest_list = [
test_ogrmerge_1,
@@ -416,7 +442,8 @@ def test_ogrmerge_11():
test_ogrmerge_8,
test_ogrmerge_9,
test_ogrmerge_10,
- test_ogrmerge_11
+ test_ogrmerge_11,
+ test_ogrmerge_12
]
diff --git a/gdal/swig/python/scripts/ogrmerge.py b/gdal/swig/python/scripts/ogrmerge.py
index 85bb31e3d2ad..c5af21de9647 100755
--- a/gdal/swig/python/scripts/ogrmerge.py
+++ b/gdal/swig/python/scripts/ogrmerge.py
@@ -163,8 +163,10 @@ def open_element(self, name, attrs=None):
xml_attrs = ''
if attrs is not None:
for key in attrs:
- xml_attrs = xml_attrs + ' %s=\"%s\"' % (key, _Esc(attrs[key]))
- _VSIFPrintfL(self.f, '%s<%s%s>\n' % (self._indent(), name, xml_attrs))
+ xml_attrs = xml_attrs + ' %s=\"%s\"' % (key, _Esc(attrs[key].encode('utf-8')))
+ x = '%s<%s%s>\n' % (self._indent(), name, xml_attrs)
+ x = x.encode('utf-8')
+ _VSIFPrintfL(self.f, x)
self.inc = self.inc + 1
self.elements.append(name)
@@ -172,10 +174,11 @@ def write_element_value(self, name, value, attrs=None):
xml_attrs = ''
if attrs is not None:
for key in attrs:
- xml_attrs = xml_attrs + ' %s=\"%s\"' % (key, _Esc(attrs[key]))
- _VSIFPrintfL(self.f, '%s<%s%s>%s%s>\n' %
- (self._indent(), name, xml_attrs,
- _Esc(value), name))
+ xml_attrs = xml_attrs + ' %s=\"%s\"' % (key, _Esc(attrs[key].encode('utf-8')))
+ x = '%s<%s%s>%s%s>\n' % (self._indent(), name, xml_attrs,
+ _Esc(value.encode('utf-8')), name)
+ x = x.encode('utf-8')
+ _VSIFPrintfL(self.f, x)
def close_element(self, closing_name=None):
self.inc = self.inc - 1
@@ -406,21 +409,27 @@ def process(argv, progress=None, progress_arg=None):
layer_name = src_layer_field_content
+ src_lyr_name = src_lyr.GetName()
+ try:
+ src_lyr_name = src_lyr_name.decode('utf-8')
+ except AttributeError:
+ pass
+
basename = None
if os.path.exists(src_dsname):
basename = os.path.basename(src_dsname)
if basename.find('.') >= 0:
basename = '.'.join(basename.split(".")[0:-1])
- if basename == src_lyr.GetName():
+ if basename == src_lyr_name:
layer_name = layer_name.replace('{AUTO_NAME}', basename)
elif basename is None:
layer_name = layer_name.replace(
'{AUTO_NAME}',
- 'Dataset%d_%s' % (src_ds_idx, src_lyr.GetName()))
+ 'Dataset%d_%s' % (src_ds_idx, src_lyr_name))
else:
layer_name = layer_name.replace(
- '{AUTO_NAME}', basename + '_' + src_lyr.GetName())
+ '{AUTO_NAME}', basename + '_' + src_lyr_name)
if basename is not None:
layer_name = layer_name.replace('{DS_BASENAME}', basename)
@@ -432,7 +441,7 @@ def process(argv, progress=None, progress_arg=None):
layer_name = layer_name.replace('{DS_INDEX}', '%d' %
src_ds_idx)
layer_name = layer_name.replace('{LAYER_NAME}',
- src_lyr.GetName())
+ src_lyr_name)
layer_name = layer_name.replace('{LAYER_INDEX}', '%d' %
src_lyr_idx)
@@ -485,6 +494,12 @@ def process(argv, progress=None, progress_arg=None):
if gt not in src_geom_types:
continue
+ src_lyr_name = src_lyr.GetName()
+ try:
+ src_lyr_name = src_lyr_name.decode('utf-8')
+ except AttributeError:
+ pass
+
layer_name = layer_name_template
basename = None
if os.path.exists(src_dsname):
@@ -492,15 +507,15 @@ def process(argv, progress=None, progress_arg=None):
if basename.find('.') >= 0:
basename = '.'.join(basename.split(".")[0:-1])
- if basename == src_lyr.GetName():
+ if basename == src_lyr_name:
layer_name = layer_name.replace('{AUTO_NAME}', basename)
elif basename is None:
layer_name = layer_name.replace(
'{AUTO_NAME}',
- 'Dataset%d_%s' % (src_ds_idx, src_lyr.GetName()))
+ 'Dataset%d_%s' % (src_ds_idx, src_lyr_name))
else:
layer_name = layer_name.replace(
- '{AUTO_NAME}', basename + '_' + src_lyr.GetName())
+ '{AUTO_NAME}', basename + '_' + src_lyr_name)
if basename is not None:
layer_name = layer_name.replace('{DS_BASENAME}', basename)
@@ -523,7 +538,7 @@ def process(argv, progress=None, progress_arg=None):
layer_name = layer_name.replace('{DS_INDEX}', '%d' %
src_ds_idx)
layer_name = layer_name.replace('{LAYER_NAME}',
- src_lyr.GetName())
+ src_lyr_name)
layer_name = layer_name.replace('{LAYER_INDEX}', '%d' %
src_lyr_idx)
@@ -541,7 +556,7 @@ def process(argv, progress=None, progress_arg=None):
attrs = {'relativeToVRT': '1'}
writer.write_element_value('SrcDataSource', src_dsname,
attrs=attrs)
- writer.write_element_value('SrcLayer', src_lyr.GetName())
+ writer.write_element_value('SrcLayer', src_lyr_name)
if a_srs is not None:
writer.write_element_value('LayerSRS', a_srs)
From 81816bf316a03f75fba348d84daac1416f7c218d Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Thu, 1 Nov 2018 18:42:21 +0100
Subject: [PATCH 018/488] ogr_merge.py: fix issue with non-ASCII filenames on
Windows (fixes #1067, derived from fix by @tobwen)
---
gdal/swig/python/scripts/ogrmerge.py | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/gdal/swig/python/scripts/ogrmerge.py b/gdal/swig/python/scripts/ogrmerge.py
index c5af21de9647..5859c46098e2 100755
--- a/gdal/swig/python/scripts/ogrmerge.py
+++ b/gdal/swig/python/scripts/ogrmerge.py
@@ -288,7 +288,10 @@ def process(argv, progress=None, progress_arg=None):
return Usage()
else:
if '*' in arg:
- src_datasets += glob.glob(arg)
+ if sys.version_info < (3,0,0):
+ src_datasets += [unicode(fn, sys.getfilesystemencoding()) for fn in glob.glob(arg)]
+ else:
+ src_datasets += glob.glob(arg)
else:
src_datasets.append(arg)
i = i + 1
@@ -601,7 +604,10 @@ def process(argv, progress=None, progress_arg=None):
def main():
- argv = ogr.GeneralCmdLineProcessor(sys.argv)
+ argv = sys.argv
+ if sys.version_info < (3,0,0):
+ argv = [unicode(fn, sys.getfilesystemencoding()) for fn in argv]
+ argv = ogr.GeneralCmdLineProcessor(argv)
if argv is None:
return 1
return process(argv[1:])
From ef65d88251a15e9f28d01a98a666238a761159c2 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Thu, 1 Nov 2018 19:28:00 +0100
Subject: [PATCH 019/488] ogrmerge.py: make pyflakes3 happy (refs #1067)
---
gdal/swig/python/scripts/ogrmerge.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/gdal/swig/python/scripts/ogrmerge.py b/gdal/swig/python/scripts/ogrmerge.py
index 5859c46098e2..06762e15ee9c 100755
--- a/gdal/swig/python/scripts/ogrmerge.py
+++ b/gdal/swig/python/scripts/ogrmerge.py
@@ -289,7 +289,7 @@ def process(argv, progress=None, progress_arg=None):
else:
if '*' in arg:
if sys.version_info < (3,0,0):
- src_datasets += [unicode(fn, sys.getfilesystemencoding()) for fn in glob.glob(arg)]
+ src_datasets += [fn.decode(sys.getfilesystemencoding()) for fn in glob.glob(arg)]
else:
src_datasets += glob.glob(arg)
else:
@@ -606,7 +606,7 @@ def process(argv, progress=None, progress_arg=None):
def main():
argv = sys.argv
if sys.version_info < (3,0,0):
- argv = [unicode(fn, sys.getfilesystemencoding()) for fn in argv]
+ argv = [fn.decode(sys.getfilesystemencoding()) for fn in argv]
argv = ogr.GeneralCmdLineProcessor(argv)
if argv is None:
return 1
From 93bd2427dbca537cfa6aa37319ec9e1e0b193934 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Thu, 1 Nov 2018 19:37:07 +0100
Subject: [PATCH 020/488] GeoJSON RFC7946 writer: clip and offset geometries
outside [-180,180] (fixes #1068)
---
autotest/ogr/ogr_geojson.py | 46 +++++++++++++++++++++++++++++++++
gdal/ogr/ogrgeometryfactory.cpp | 10 ++++---
2 files changed, 52 insertions(+), 4 deletions(-)
diff --git a/autotest/ogr/ogr_geojson.py b/autotest/ogr/ogr_geojson.py
index 2116d0db7596..17937a380135 100755
--- a/autotest/ogr/ogr_geojson.py
+++ b/autotest/ogr/ogr_geojson.py
@@ -4208,6 +4208,51 @@ def ogr_geojson_read_fields_with_different_case():
return 'success'
+###############################################################################
+# Test bugfix for https://github.com/OSGeo/gdal/issues/1068
+
+
+def ogr_geojson_clip_geometries_rfc7946():
+
+ if not ogrtest.have_geos():
+ return 'skip'
+
+ tmpfilename = '/vsimem/out.json'
+ gdal.VectorTranslate(tmpfilename, """{
+ "type": "FeatureCollection",
+ "features": [
+ { "type": "Feature", "geometry": {"type":"Polygon","coordinates":[[[-220,-20],[-220,30],[16,30],[16,-20],[-220,-20]]]} },
+ { "type": "Feature", "geometry": {"type":"Polygon","coordinates":[[[220,40],[220,70],[-16,70],[-16,40],[220,40]]]} },
+ { "type": "Feature", "geometry": {"type":"Polygon","coordinates":[[[170,-40],[170,-70],[-16,70],[-16,-40],[170,-40]]]} }
+ ]
+}""", options='-f GeoJSON -lco RFC7946=YES')
+
+ ds = ogr.Open(tmpfilename)
+ lyr = ds.GetLayer(0)
+
+ f = lyr.GetNextFeature()
+ ref_geom = ogr.CreateGeometryFromWkt('MULTIPOLYGON (((-180 30,-180 -20,16 -20,16 30,-180 30)),((140 -20,180 -20,180 30,140 30,140 -20)))')
+ if ogrtest.check_feature_geometry(f, ref_geom) != 0:
+ f.DumpReadable()
+ return 'fail'
+
+ f = lyr.GetNextFeature()
+ ref_geom = ogr.CreateGeometryFromWkt('MULTIPOLYGON (((180 40,180 70,-16 70,-16 40,180 40)),((-180 70,-180 40,-140 40,-140 70,-180 70)))')
+ if ogrtest.check_feature_geometry(f, ref_geom) != 0:
+ f.DumpReadable()
+ return 'fail'
+
+ f = lyr.GetNextFeature()
+ ref_geom = ogr.CreateGeometryFromWkt('POLYGON ((170 -40,-16 -40,-16 70,170 -70,170 -40))')
+ if ogrtest.check_feature_geometry(f, ref_geom) != 0:
+ f.DumpReadable()
+ return 'fail'
+ ds = None
+
+ gdal.Unlink(tmpfilename)
+ return 'success'
+
+
###############################################################################
@@ -4322,6 +4367,7 @@ def ogr_geojson_cleanup():
ogr_geojson_empty_geometrycollection,
ogr_esrijson_without_geometryType,
ogr_geojson_read_fields_with_different_case,
+ ogr_geojson_clip_geometries_rfc7946,
ogr_geojson_cleanup]
if __name__ == '__main__':
diff --git a/gdal/ogr/ogrgeometryfactory.cpp b/gdal/ogr/ogrgeometryfactory.cpp
index c57edd70bc16..8cfa48ee7d7b 100644
--- a/gdal/ogr/ogrgeometryfactory.cpp
+++ b/gdal/ogr/ogrgeometryfactory.cpp
@@ -2801,8 +2801,10 @@ static void CutGeometryOnDateLineAndAddToMulti( OGRGeometryCollection* poMulti,
const double dfDiffSpace = 360 - dfDateLineOffset;
const double dfXOffset = (bAroundMinus180) ? 360.0 : 0.0;
- if( oEnvelope.MinX + dfXOffset > dfLeftBorderX &&
- oEnvelope.MaxX + dfXOffset > 180 )
+ if( oEnvelope.MinX < -180 ||
+ oEnvelope.MaxX > 180 ||
+ (oEnvelope.MinX + dfXOffset > dfLeftBorderX &&
+ oEnvelope.MaxX + dfXOffset > 180) )
{
#ifndef HAVE_GEOS
CPLError( CE_Failure, CPLE_NotSupported,
@@ -2869,8 +2871,8 @@ static void CutGeometryOnDateLineAndAddToMulti( OGRGeometryCollection* poMulti,
OGRGeometry* poRectangle1 = nullptr;
OGRGeometry* poRectangle2 = nullptr;
const char* pszWKT1 = !bAroundMinus180 ?
- "POLYGON((0 90,180 90,180 -90,0 -90,0 90))" :
- "POLYGON((0 90,-180 90,-180 -90,0 -90,0 90))";
+ "POLYGON((-180 90,180 90,180 -90,-180 -90,-180 90))" :
+ "POLYGON((180 90,-180 90,-180 -90,180 -90,180 90))";
const char* pszWKT2 = !bAroundMinus180 ?
"POLYGON((180 90,360 90,360 -90,180 -90,180 90))" :
"POLYGON((-180 90,-360 90,-360 -90,-180 -90,-180 90))";
From f679c3ce6939206a5ce7ed264d2d6ffbdc027869 Mon Sep 17 00:00:00 2001
From: drons
Date: Wed, 31 Oct 2018 23:06:46 +0300
Subject: [PATCH 021/488] DBF: Add CP1251 codepage name synonym (ANSI 1251) for
DBF files. This synonym is found in the Russian-language OS Windows.
---
gdal/ogr/ogrsf_frmts/shape/ogrshapelayer.cpp | 2 ++
1 file changed, 2 insertions(+)
diff --git a/gdal/ogr/ogrsf_frmts/shape/ogrshapelayer.cpp b/gdal/ogr/ogrsf_frmts/shape/ogrshapelayer.cpp
index 99781665db72..6702ba3a8c27 100644
--- a/gdal/ogr/ogrsf_frmts/shape/ogrshapelayer.cpp
+++ b/gdal/ogr/ogrsf_frmts/shape/ogrshapelayer.cpp
@@ -445,6 +445,8 @@ CPLString OGRShapeLayer::ConvertCodePage( const char *pszCodePage )
}
if( STARTS_WITH_CI(pszCodePage, "UTF-8") )
return CPL_ENC_UTF8;
+ if( STARTS_WITH_CI(pszCodePage, "ANSI 1251") )
+ return "CP1251";
// Try just using the CPG value directly. Works for stuff like Big5.
return pszCodePage;
From 301268d2142ba741de1814540d044a45f1f597f4 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Fri, 2 Nov 2018 09:48:05 +0100
Subject: [PATCH 022/488] SIGDEM: avoid floating point division by zero. Fixes
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11220. Credit to OSS
Fuzz
---
gdal/frmts/sigdem/sigdemdataset.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/gdal/frmts/sigdem/sigdemdataset.cpp b/gdal/frmts/sigdem/sigdemdataset.cpp
index 0d8b4d769872..d188ac9323e6 100644
--- a/gdal/frmts/sigdem/sigdemdataset.cpp
+++ b/gdal/frmts/sigdem/sigdemdataset.cpp
@@ -520,14 +520,14 @@ CPLErr SIGDEMRasterBand::IReadBlock(
const int32_t* pnSourceValues = pBlockBuffer;
double* padfDestValues = static_cast(pImage);
double dfOffset = this->dfOffsetZ;
- double dfScaleFactor = this->dfScaleFactorZ;
+ const double dfInvScaleFactor = dfScaleFactorZ ? 1.0 / dfScaleFactorZ : 0.0;
int nCellCount = this->nRasterXSize;
for (int i = 0; i < nCellCount; i++) {
int32_t nValue = CPL_MSBWORD32(*pnSourceValues);
if (nValue == NO_DATA) {
*padfDestValues = -9999;
} else {
- *padfDestValues = dfOffset + nValue / dfScaleFactor;
+ *padfDestValues = dfOffset + nValue * dfInvScaleFactor;
}
pnSourceValues++;
From 2ace03ec48c36dae8ba74089d85617c095643428 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Fri, 2 Nov 2018 18:02:33 +0100
Subject: [PATCH 023/488] SDTS: avoid long processing time on corrupted
dataset. Fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11219.
Credit to OSS Fuzz
---
gdal/frmts/sdts/sdts_al.h | 1 +
gdal/frmts/sdts/sdtscatd.cpp | 13 +++++++++++++
gdal/frmts/sdts/sdtstransfer.cpp | 12 ++++++------
3 files changed, 20 insertions(+), 6 deletions(-)
diff --git a/gdal/frmts/sdts/sdts_al.h b/gdal/frmts/sdts/sdts_al.h
index dbb621699d88..df0d8d03848f 100644
--- a/gdal/frmts/sdts/sdts_al.h
+++ b/gdal/frmts/sdts/sdts_al.h
@@ -149,6 +149,7 @@ class SDTS_CATD
const char * GetEntryTypeDesc(int) const;
const char * GetEntryFilePath(int) const;
SDTSLayerType GetEntryType(int) const;
+ void SetEntryTypeUnknown(int);
};
/************************************************************************/
diff --git a/gdal/frmts/sdts/sdtscatd.cpp b/gdal/frmts/sdts/sdtscatd.cpp
index 19c9a4578eb2..093814936e1b 100644
--- a/gdal/frmts/sdts/sdtscatd.cpp
+++ b/gdal/frmts/sdts/sdtscatd.cpp
@@ -302,6 +302,19 @@ SDTSLayerType SDTS_CATD::GetEntryType( int iEntry ) const
return SLTUnknown;
}
+/************************************************************************/
+/* SetEntryTypeUnknown() */
+/************************************************************************/
+
+void SDTS_CATD::SetEntryTypeUnknown(int iEntry)
+{
+ if( iEntry >= 0 && iEntry < nEntries )
+ {
+ CPLFree(papoEntries[iEntry]->pszType);
+ papoEntries[iEntry]->pszType = CPLStrdup("Unknown");
+ }
+}
+
/************************************************************************/
/* GetEntryFilePath() */
/************************************************************************/
diff --git a/gdal/frmts/sdts/sdtstransfer.cpp b/gdal/frmts/sdts/sdtstransfer.cpp
index e176e954e237..606817e36655 100644
--- a/gdal/frmts/sdts/sdtstransfer.cpp
+++ b/gdal/frmts/sdts/sdtstransfer.cpp
@@ -238,7 +238,7 @@ SDTSLineReader *SDTSTransfer::GetLayerLineReader( int iEntry )
if( !poLineReader->Open(
oCATD.GetEntryFilePath( panLayerCATDEntry[iEntry] ) ) )
{
- panLayerCATDEntry[iEntry] = SLTUnknown; // to prevent further attempt
+ oCATD.SetEntryTypeUnknown(iEntry) ; // to prevent further attempt
delete poLineReader;
return nullptr;
}
@@ -265,7 +265,7 @@ SDTSPointReader *SDTSTransfer::GetLayerPointReader( int iEntry )
if( !poPointReader->Open(
oCATD.GetEntryFilePath( panLayerCATDEntry[iEntry] ) ) )
{
- panLayerCATDEntry[iEntry] = SLTUnknown; // to prevent further attempt
+ oCATD.SetEntryTypeUnknown(iEntry) ; // to prevent further attempt
delete poPointReader;
return nullptr;
}
@@ -292,7 +292,7 @@ SDTSPolygonReader *SDTSTransfer::GetLayerPolygonReader( int iEntry )
if( !poPolyReader->Open(
oCATD.GetEntryFilePath( panLayerCATDEntry[iEntry] ) ) )
{
- panLayerCATDEntry[iEntry] = SLTUnknown; // to prevent further attempt
+ oCATD.SetEntryTypeUnknown(iEntry) ; // to prevent further attempt
delete poPolyReader;
return nullptr;
}
@@ -319,7 +319,7 @@ SDTSAttrReader *SDTSTransfer::GetLayerAttrReader( int iEntry )
if( !poAttrReader->Open(
oCATD.GetEntryFilePath( panLayerCATDEntry[iEntry] ) ) )
{
- panLayerCATDEntry[iEntry] = SLTUnknown; // to prevent further attempt
+ oCATD.SetEntryTypeUnknown(iEntry) ; // to prevent further attempt
delete poAttrReader;
return nullptr;
}
@@ -361,7 +361,7 @@ SDTSRasterReader *SDTSTransfer::GetLayerRasterReader( int iEntry )
if( !poRasterReader->Open( &oCATD, &oIREF,
oCATD.GetEntryModule(panLayerCATDEntry[iEntry] ) ) )
{
- panLayerCATDEntry[iEntry] = SLTUnknown; // to prevent further attempt
+ oCATD.SetEntryTypeUnknown(iEntry) ; // to prevent further attempt
delete poRasterReader;
return nullptr;
}
@@ -386,7 +386,7 @@ DDFModule *SDTSTransfer::GetLayerModuleReader( int iEntry )
if( !poModuleReader->Open(
oCATD.GetEntryFilePath( panLayerCATDEntry[iEntry] ) ) )
{
- panLayerCATDEntry[iEntry] = SLTUnknown; // to prevent further attempt
+ oCATD.SetEntryTypeUnknown(iEntry) ; // to prevent further attempt
delete poModuleReader;
return nullptr;
}
From 3f206c1e3684ee1ba033e58791b37608f418b00c Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Fri, 2 Nov 2018 18:13:44 +0100
Subject: [PATCH 024/488] Fix warning
---
gdal/frmts/sigdem/sigdemdataset.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/gdal/frmts/sigdem/sigdemdataset.cpp b/gdal/frmts/sigdem/sigdemdataset.cpp
index d188ac9323e6..5ffc6e6ec61b 100644
--- a/gdal/frmts/sigdem/sigdemdataset.cpp
+++ b/gdal/frmts/sigdem/sigdemdataset.cpp
@@ -520,7 +520,7 @@ CPLErr SIGDEMRasterBand::IReadBlock(
const int32_t* pnSourceValues = pBlockBuffer;
double* padfDestValues = static_cast(pImage);
double dfOffset = this->dfOffsetZ;
- const double dfInvScaleFactor = dfScaleFactorZ ? 1.0 / dfScaleFactorZ : 0.0;
+ const double dfInvScaleFactor = dfScaleFactorZ != 0.0 ? 1.0 / dfScaleFactorZ : 0.0;
int nCellCount = this->nRasterXSize;
for (int i = 0; i < nCellCount; i++) {
int32_t nValue = CPL_MSBWORD32(*pnSourceValues);
From e5e7b313540f0ff913fadfe6a273fb7c356a22cb Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Fri, 2 Nov 2018 22:54:20 +0100
Subject: [PATCH 025/488] VRT: deal with serialized nodata value that is
slightly outside Float32 validity range (fixes #1071)
---
.../minfloat_nodata_slightly_out_of_float.vrt | 11 +++++
autotest/gcore/vrt_read.py | 42 +++++++++++++++++++
gdal/frmts/vrt/vrtrasterband.cpp | 22 ++++++++++
gdal/frmts/vrt/vrtsources.cpp | 26 ++++++++++--
gdal/gcore/gdal_misc.cpp | 14 +++++++
gdal/gcore/gdal_priv.h | 2 +
6 files changed, 114 insertions(+), 3 deletions(-)
create mode 100644 autotest/gcore/data/minfloat_nodata_slightly_out_of_float.vrt
diff --git a/autotest/gcore/data/minfloat_nodata_slightly_out_of_float.vrt b/autotest/gcore/data/minfloat_nodata_slightly_out_of_float.vrt
new file mode 100644
index 000000000000..d238f08bc7d8
--- /dev/null
+++ b/autotest/gcore/data/minfloat_nodata_slightly_out_of_float.vrt
@@ -0,0 +1,11 @@
+
+
+ -3.402823466385289e+38
+ Gray
+
+ minfloat.tif
+ 1
+ -3.402823466385289e+38
+
+
+
diff --git a/autotest/gcore/vrt_read.py b/autotest/gcore/vrt_read.py
index 476e6432dedc..495f342179d6 100755
--- a/autotest/gcore/vrt_read.py
+++ b/autotest/gcore/vrt_read.py
@@ -1385,6 +1385,47 @@ def vrt_read_31():
return 'success'
+###############################################################################
+# Test reading a VRT where the NODATA & NoDataValue are slighly below the
+# minimum float value (https://github.com/OSGeo/gdal/issues/1071)
+
+def vrt_float32_with_nodata_slightly_below_float_min():
+
+ shutil.copyfile('data/minfloat.tif', 'tmp/minfloat.tif')
+ shutil.copyfile('data/minfloat_nodata_slightly_out_of_float.vrt',
+ 'tmp/minfloat_nodata_slightly_out_of_float.vrt')
+ gdal.Unlink('tmp/minfloat_nodata_slightly_out_of_float.vrt.aux.xml')
+
+ ds = gdal.Open('tmp/minfloat_nodata_slightly_out_of_float.vrt')
+ nodata = ds.GetRasterBand(1).GetNoDataValue()
+ stats = ds.GetRasterBand(1).ComputeStatistics(False)
+ ds = None
+
+ vrt_content = open('tmp/minfloat_nodata_slightly_out_of_float.vrt', 'rt').read()
+
+ gdal.Unlink('tmp/minfloat.tif')
+ gdal.Unlink('tmp/minfloat_nodata_slightly_out_of_float.vrt')
+
+ # Check that the values were 'normalized' when regenerating the VRT
+ if vrt_content.find('-3.402823466385289') >= 0:
+ gdaltest.post_reason('did not get expected nodata in rewritten VRT')
+ print(vrt_content)
+ return 'fail'
+
+ if nodata != -3.4028234663852886e+38:
+ gdaltest.post_reason('did not get expected nodata')
+ print("%.18g" % nodata)
+ return 'fail'
+
+ if stats != [-3.0, 5.0, 1.0, 4.0]:
+ gdaltest.post_reason('did not get expected stats')
+ print(stats)
+ return 'fail'
+
+ return 'success'
+
+
+
for item in init_list:
ut = gdaltest.GDALTest('VRT', item[0], item[1], item[2])
if ut is None:
@@ -1423,6 +1464,7 @@ def vrt_read_31():
gdaltest_list.append(vrt_read_29)
gdaltest_list.append(vrt_read_30)
gdaltest_list.append(vrt_read_31)
+gdaltest_list.append(vrt_float32_with_nodata_slightly_below_float_min)
if __name__ == '__main__':
diff --git a/gdal/frmts/vrt/vrtrasterband.cpp b/gdal/frmts/vrt/vrtrasterband.cpp
index 826216481ba7..489a1ffba2d1 100644
--- a/gdal/frmts/vrt/vrtrasterband.cpp
+++ b/gdal/frmts/vrt/vrtrasterband.cpp
@@ -33,6 +33,7 @@
#include
#include
#include
+#include
#include
#include
@@ -620,10 +621,26 @@ CPLXMLNode *VRTRasterBand::SerializeToXML( const char *pszVRTPath )
if( m_bNoDataValueSet )
{
if (CPLIsNan(m_dfNoDataValue))
+ {
CPLSetXMLValue( psTree, "NoDataValue", "nan");
+ }
+ else if( eDataType == GDT_Float32 &&
+ m_dfNoDataValue == -std::numeric_limits::max() )
+ {
+ // To avoid rounding out of the range of float
+ CPLSetXMLValue( psTree, "NoDataValue", "-3.4028234663852886e+38");
+ }
+ else if( eDataType == GDT_Float32 &&
+ m_dfNoDataValue == std::numeric_limits::max() )
+ {
+ // To avoid rounding out of the range of float
+ CPLSetXMLValue( psTree, "NoDataValue", "3.4028234663852886e+38");
+ }
else
+ {
CPLSetXMLValue( psTree, "NoDataValue",
CPLSPrintf( "%.16g", m_dfNoDataValue ) );
+ }
}
if( m_bHideNoDataValue )
@@ -775,6 +792,11 @@ CPLXMLNode *VRTRasterBand::SerializeToXML( const char *pszVRTPath )
CPLErr VRTRasterBand::SetNoDataValue( double dfNewValue )
{
+ if( eDataType == GDT_Float32 )
+ {
+ dfNewValue = GDALAdjustNoDataCloseToFloatMax(dfNewValue);
+ }
+
m_bNoDataValueSet = TRUE;
m_dfNoDataValue = dfNewValue;
diff --git a/gdal/frmts/vrt/vrtsources.cpp b/gdal/frmts/vrt/vrtsources.cpp
index 5475ec28a0c5..53dc86c46903 100644
--- a/gdal/frmts/vrt/vrtsources.cpp
+++ b/gdal/frmts/vrt/vrtsources.cpp
@@ -38,6 +38,7 @@
#include
#include
#include
+#include
#include
#include "cpl_conv.h"
@@ -2097,9 +2098,23 @@ CPLXMLNode *VRTComplexSource::SerializeToXML( const char *pszVRTPath )
{
if( CPLIsNan(m_dfNoDataValue) )
CPLSetXMLValue( psSrc, "NODATA", "nan");
+ else if( m_poRasterBand->GetRasterDataType() == GDT_Float32 &&
+ m_dfNoDataValue == -std::numeric_limits::max() )
+ {
+ // To avoid rounding out of the range of float
+ CPLSetXMLValue( psSrc, "NODATA", "-3.4028234663852886e+38");
+ }
+ else if( m_poRasterBand->GetRasterDataType() == GDT_Float32 &&
+ m_dfNoDataValue == std::numeric_limits::max() )
+ {
+ // To avoid rounding out of the range of float
+ CPLSetXMLValue( psSrc, "NODATA", "3.4028234663852886e+38");
+ }
else
+ {
CPLSetXMLValue( psSrc, "NODATA",
CPLSPrintf("%.16g", m_dfNoDataValue) );
+ }
}
switch( m_eScalingType )
@@ -2235,6 +2250,10 @@ CPLErr VRTComplexSource::XMLInit( CPLXMLNode *psSrc, const char *pszVRTPath,
{
m_bNoDataSet = TRUE;
m_dfNoDataValue = CPLAtofM( CPLGetXMLValue(psSrc, "NODATA", "0") );
+ if( m_poRasterBand->GetRasterDataType() == GDT_Float32 )
+ {
+ m_dfNoDataValue = GDALAdjustNoDataCloseToFloatMax(m_dfNoDataValue);
+ }
}
if( CPLGetXMLValue(psSrc, "LUT", nullptr) != nullptr )
@@ -2506,7 +2525,9 @@ CPLErr VRTComplexSource::RasterIOInternal( int nReqXOff, int nReqYOff,
const bool bIsComplex = CPL_TO_BOOL( GDALDataTypeIsComplex(eBufType) );
const int nWordSize = GDALGetDataTypeSizeBytes(eWrkDataType);
const bool bNoDataSetIsNan = m_bNoDataSet && CPLIsNan(m_dfNoDataValue);
- const bool bNoDataSetAndNotNan = m_bNoDataSet && !CPLIsNan(m_dfNoDataValue);
+ const bool bNoDataSetAndNotNan = m_bNoDataSet && !CPLIsNan(m_dfNoDataValue) &&
+ GDALIsValueInRange(m_dfNoDataValue);
+ const auto fWorkingDataTypeNoData = static_cast(m_dfNoDataValue);
WorkingDT *pafData = nullptr;
if( m_eScalingType == VRT_SCALING_LINEAR &&
@@ -2588,8 +2609,7 @@ CPLErr VRTComplexSource::RasterIOInternal( int nReqXOff, int nReqYOff,
if( bNoDataSetIsNan && CPLIsNan(fResult) )
continue;
if( bNoDataSetAndNotNan &&
- GDALIsValueInRange(m_dfNoDataValue) &&
- ARE_REAL_EQUAL(fResult, static_cast(m_dfNoDataValue)) )
+ ARE_REAL_EQUAL(fResult, fWorkingDataTypeNoData) )
continue;
if( m_nColorTableComponent )
diff --git a/gdal/gcore/gdal_misc.cpp b/gdal/gcore/gdal_misc.cpp
index 109d4d3b3207..602bf350bb75 100644
--- a/gdal/gcore/gdal_misc.cpp
+++ b/gdal/gcore/gdal_misc.cpp
@@ -4019,3 +4019,17 @@ int GDALCanFileAcceptSidecarFile(const char* pszFilename)
return FALSE;
return TRUE;
}
+
+/************************************************************************/
+/* GDALAdjustNoDataCloseToFloatMax() */
+/************************************************************************/
+
+double GDALAdjustNoDataCloseToFloatMax(double dfVal)
+{
+ const auto kMaxFloat = std::numeric_limits::max();
+ if( std::fabs(dfVal - -kMaxFloat) < 1e-10 * kMaxFloat )
+ return -kMaxFloat;
+ if( std::fabs(dfVal - kMaxFloat) < 1e-10 * kMaxFloat )
+ return kMaxFloat;
+ return dfVal;
+}
diff --git a/gdal/gcore/gdal_priv.h b/gdal/gcore/gdal_priv.h
index 1d54d58e305e..c22a1b6bd4ba 100644
--- a/gdal/gcore/gdal_priv.h
+++ b/gdal/gcore/gdal_priv.h
@@ -1819,6 +1819,8 @@ template inline bool ARE_REAL_EQUAL(T fVal1, T fVal2, int ulp = 2)
std::abs(fVal1 - fVal2) < std::numeric_limits::epsilon() * std::abs(fVal1+fVal2) * ulp;
}
+double GDALAdjustNoDataCloseToFloatMax(double dfVal);
+
#define DIV_ROUND_UP(a, b) ( ((a) % (b)) == 0 ? ((a) / (b)) : (((a) / (b)) + 1) )
// Number of data samples that will be used to compute approximate statistics
From 22eb92683f1db950c59a7f2f703f9a007ea9244f Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Fri, 2 Nov 2018 22:54:59 +0100
Subject: [PATCH 026/488] GTiff and GDALRasterBand: use
GDALAdjustNoDataCloseToFloatMax()
---
gdal/frmts/gtiff/geotiff.cpp | 11 +----------
gdal/gcore/gdalrasterband.cpp | 15 +--------------
2 files changed, 2 insertions(+), 24 deletions(-)
diff --git a/gdal/frmts/gtiff/geotiff.cpp b/gdal/frmts/gtiff/geotiff.cpp
index 11b2fd170565..50ed54dca595 100644
--- a/gdal/frmts/gtiff/geotiff.cpp
+++ b/gdal/frmts/gtiff/geotiff.cpp
@@ -13889,16 +13889,7 @@ CPLErr GTiffDataset::OpenOffset( TIFF *hTIFFIn,
dfNoDataValue = CPLAtofM( pszText );
if( nBitsPerSample == 32 && nSampleFormat == SAMPLEFORMAT_IEEEFP )
{
- if( fabs(dfNoDataValue - std::numeric_limits::max()) <
- 1e-10 * std::numeric_limits::max() )
- {
- dfNoDataValue = std::numeric_limits::max();
- }
- else if( fabs(dfNoDataValue - (-std::numeric_limits::max())) <
- 1e-10 * std::numeric_limits::max() )
- {
- dfNoDataValue = -std::numeric_limits::max();
- }
+ dfNoDataValue = GDALAdjustNoDataCloseToFloatMax(dfNoDataValue);
}
}
diff --git a/gdal/gcore/gdalrasterband.cpp b/gdal/gcore/gdalrasterband.cpp
index 02303ad029df..ff0874bdf9b0 100644
--- a/gdal/gcore/gdalrasterband.cpp
+++ b/gdal/gcore/gdalrasterband.cpp
@@ -2860,26 +2860,13 @@ static inline void ComputeFloatNoDataValue( GDALDataType eDataType,
{
if( eDataType == GDT_Float32 && bGotNoDataValue )
{
+ dfNoDataValue = GDALAdjustNoDataCloseToFloatMax(dfNoDataValue);
if (GDALIsValueInRange(dfNoDataValue) )
{
fNoDataValue = static_cast(dfNoDataValue);
bGotFloatNoDataValue = true;
bGotNoDataValue = false;
}
- else if( fabs(dfNoDataValue - std::numeric_limits::max()) <
- 1e-10 * std::numeric_limits::max() )
- {
- fNoDataValue = std::numeric_limits::max();
- bGotFloatNoDataValue = true;
- bGotNoDataValue = false;
- }
- else if( fabs(dfNoDataValue - (-std::numeric_limits::max())) <
- 1e-10 * std::numeric_limits::max() )
- {
- fNoDataValue = -std::numeric_limits::max();
- bGotFloatNoDataValue = true;
- bGotNoDataValue = false;
- }
}
}
From 22058ee3036cda03e00e0a6c73fb6802c46dab14 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Sat, 3 Nov 2018 19:46:30 +0100
Subject: [PATCH 027/488] Internal libtiff: resync with upstream
---
gdal/frmts/gtiff/libtiff/tif_config.h | 2 ++
gdal/frmts/gtiff/libtiff/tif_dir.h | 4 ++++
gdal/frmts/gtiff/libtiff/tif_webp.c | 5 +++--
gdal/frmts/gtiff/libtiff/tiffio.h | 2 +-
gdal/frmts/gtiff/libtiff/tiffiop.h | 1 +
5 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/gdal/frmts/gtiff/libtiff/tif_config.h b/gdal/frmts/gtiff/libtiff/tif_config.h
index 938d785e7bad..8f2cb37c0135 100644
--- a/gdal/frmts/gtiff/libtiff/tif_config.h
+++ b/gdal/frmts/gtiff/libtiff/tif_config.h
@@ -62,9 +62,11 @@
#ifdef _WIN64
# define TIFF_SSIZE_T GIntBig
# define TIFF_SSIZE_FORMAT CPL_FRMT_GIB
+# define TIFF_SIZE_FORMAT CPL_FRMT_GUIB
#else
# define TIFF_SSIZE_T signed long
# define TIFF_SSIZE_FORMAT "%ld"
+# define TIFF_SIZE_FORMAT "%lu"
#endif
/* Unsigned 16-bit type */
diff --git a/gdal/frmts/gtiff/libtiff/tif_dir.h b/gdal/frmts/gtiff/libtiff/tif_dir.h
index bb508addb401..b2f5e694883b 100644
--- a/gdal/frmts/gtiff/libtiff/tif_dir.h
+++ b/gdal/frmts/gtiff/libtiff/tif_dir.h
@@ -24,6 +24,10 @@
#ifndef _TIFFDIR_
#define _TIFFDIR_
+
+#include "tiff.h"
+#include "tiffio.h"
+
/*
* ``Library-private'' Directory-related Definitions.
*/
diff --git a/gdal/frmts/gtiff/libtiff/tif_webp.c b/gdal/frmts/gtiff/libtiff/tif_webp.c
index 0753c00f709e..a002f481daa6 100644
--- a/gdal/frmts/gtiff/libtiff/tif_webp.c
+++ b/gdal/frmts/gtiff/libtiff/tif_webp.c
@@ -79,8 +79,8 @@ int TWebPDatasetWriter(const uint8_t* data, size_t data_size,
if ( (tif->tif_rawcc + (tmsize_t)data_size) > tif->tif_rawdatasize ) {
TIFFErrorExt(tif->tif_clientdata, module,
- "Buffer too small by %lu bytes.",
- tif->tif_rawcc + data_size - tif->tif_rawdatasize);
+ "Buffer too small by " TIFF_SIZE_FORMAT " bytes.",
+ (size_t) (tif->tif_rawcc + data_size - tif->tif_rawdatasize));
return 0;
} else {
_TIFFmemcpy(tif->tif_rawcp, data, data_size);
@@ -594,6 +594,7 @@ TWebPVGetField(TIFF* tif, uint32 tag, va_list ap)
break;
case TIFFTAG_WEBP_LOSSLESS:
*va_arg(ap, int*) = sp->lossless;
+ break;
default:
return (*sp->vgetparent)(tif, tag, ap);
}
diff --git a/gdal/frmts/gtiff/libtiff/tiffio.h b/gdal/frmts/gtiff/libtiff/tiffio.h
index 670cd426c9cb..31c2e676e759 100644
--- a/gdal/frmts/gtiff/libtiff/tiffio.h
+++ b/gdal/frmts/gtiff/libtiff/tiffio.h
@@ -50,7 +50,7 @@ typedef struct tiff TIFF;
* promoted type (i.e. one of int, unsigned int, pointer,
* or double) and because we defined pseudo-tags that are
* outside the range of legal Aldus-assigned tags.
- * NB: tsize_t is int32 and not uint32 because some functions
+ * NB: tsize_t is signed and not unsigned because some functions
* return -1.
* NB: toff_t is not off_t for many reasons; TIFFs max out at
* 32-bit file offsets, and BigTIFF maxes out at 64-bit
diff --git a/gdal/frmts/gtiff/libtiff/tiffiop.h b/gdal/frmts/gtiff/libtiff/tiffiop.h
index 9e96d36e5cae..186c291f5d7a 100644
--- a/gdal/frmts/gtiff/libtiff/tiffiop.h
+++ b/gdal/frmts/gtiff/libtiff/tiffiop.h
@@ -70,6 +70,7 @@ extern int snprintf(char* str, size_t size, const char* format, ...);
#endif
#define streq(a,b) (strcmp(a,b) == 0)
+#define strneq(a,b,n) (strncmp(a,b,n) == 0)
#ifndef TRUE
#define TRUE 1
From 01b286eda6e94da5f5b3fb14d16583ad282690e8 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Sat, 3 Nov 2018 20:31:46 +0100
Subject: [PATCH 028/488] Fix warning on trusty_32bit
---
gdal/frmts/gtiff/libtiff/tif_config.h | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/gdal/frmts/gtiff/libtiff/tif_config.h b/gdal/frmts/gtiff/libtiff/tif_config.h
index 8f2cb37c0135..b691b5bf36bb 100644
--- a/gdal/frmts/gtiff/libtiff/tif_config.h
+++ b/gdal/frmts/gtiff/libtiff/tif_config.h
@@ -66,7 +66,11 @@
#else
# define TIFF_SSIZE_T signed long
# define TIFF_SSIZE_FORMAT "%ld"
-# define TIFF_SIZE_FORMAT "%lu"
+# if SIZEOF_VOIDP == 8
+# define TIFF_SIZE_FORMAT "%lu"
+# else
+# define TIFF_SIZE_FORMAT "%u"
+# endif
#endif
/* Unsigned 16-bit type */
From fd6cf3bad9192247dff77dbf055eb387a05c321e Mon Sep 17 00:00:00 2001
From: jratike80
Date: Mon, 5 Nov 2018 11:54:16 +0200
Subject: [PATCH 029/488] Fix wrong doublequote charactes in OCI manual
---
gdal/ogr/ogrsf_frmts/oci/drv_oci.html | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/gdal/ogr/ogrsf_frmts/oci/drv_oci.html b/gdal/ogr/ogrsf_frmts/oci/drv_oci.html
index 432e25fc95a9..27a903079152 100644
--- a/gdal/ogr/ogrsf_frmts/oci/drv_oci.html
+++ b/gdal/ogr/ogrsf_frmts/oci/drv_oci.html
@@ -102,11 +102,11 @@
Layer Creation Options
OVERWRITE: This may be "YES" to force an existing layer (=table) of the
same desired name to be destroyed before creating the requested layer.
-The default value is “NO"
+The default value is "NO"
TRUNCATE: This may be "YES" to force the existing table to
be reused, but to first truncate all records in the table, preserving
-indexes or dependencies. The default value is “NO".
+indexes or dependencies. The default value is "NO".
LAUNDER: This may be "YES" to force new fields created on this
layer to have their field names "laundered" into a form more compatible with
From b6fd449f8a8f3ec96180eb81ded783e0e8309361 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Mon, 5 Nov 2018 23:20:04 +0100
Subject: [PATCH 030/488] ESRIJson driver doc: mention orderByFields option for
paged requests (fixes #1073) [ci skip]
---
gdal/ogr/ogrsf_frmts/geojson/drv_esrijson.html | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/gdal/ogr/ogrsf_frmts/geojson/drv_esrijson.html b/gdal/ogr/ogrsf_frmts/geojson/drv_esrijson.html
index 0c46b69252d0..89ef82ef5050 100644
--- a/gdal/ogr/ogrsf_frmts/geojson/drv_esrijson.html
+++ b/gdal/ogr/ogrsf_frmts/geojson/drv_esrijson.html
@@ -21,6 +21,10 @@
ESRIJSON / FeatureService driver
is subject to a server limit). If it is not set, OGR will set it to the maximum
value allowed by the server.
+
Note: for paged requests to work properly, it is generally necessary to add
+a sort clause on a field, typically the OBJECTID with a "&orderByFields=OBJECTID+ASC"
+parameter in the URL, so that the server returns the results in a reliable way.
+
Datasource
The driver accepts three types of sources of data:
SYNC_STRATEGY=TIMESTAMP/ETAG. Determines which criterion is used to
+ * determine if a target file must be replaced when it already exists and
+ * has the same file size as the source.
+ * Only apply for a source being a network filesystem.
+ *
+ * The default is TIMESTAMP (similarly to how 'aws s3 sync' works), that is
+ * to say that for an upload operation, a remote file is
+ * replaced if it has a different size or if it is older than the source.
+ * For a download operation, a local file is replaced if it has a different
+ * size or if it is newer than the remote file.
+ *
+ * The ETAG strategy assumes that the ETag metadata of the remote file is
+ * the MD5Sum of the file content, which is only true in the case of /vsis3/
+ * for files not using KMS server side encryption and uploaded in a single
+ * PUT operation (so smaller than 50 MB given the default used by GDAL).
+ *
SYNC_STRATEGY=TIMESTAMP/ETAG. Determines which criterion is used to
* determine if a target file must be replaced when it already exists and
* has the same file size as the source.
- * Only apply for a source being a network filesystem.
+ * Only applies for a source or target being a network filesystem.
*
* The default is TIMESTAMP (similarly to how 'aws s3 sync' works), that is
* to say that for an upload operation, a remote file is
@@ -490,6 +490,8 @@ int VSIRename( const char * oldpath, const char * newpath )
* the MD5Sum of the file content, which is only true in the case of /vsis3/
* for files not using KMS server side encryption and uploaded in a single
* PUT operation (so smaller than 50 MB given the default used by GDAL).
+ * Only to be used for /vsis3/, /vsigs/ or other filesystems using a
+ * MD5Sum as ETAG.
*
*
* @param pProgressFunc Progress callback, or NULL.
From a5343ed6a8af2495681940a9d1cc57c074bddbf9 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Sat, 10 Nov 2018 09:49:04 +0100
Subject: [PATCH 047/488] OpenFileGDB: fix potential crash on corrupted
datasets. Fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11313.
Credit to OSS Fuzz
---
.../openfilegdb/ogropenfilegdbdatasource.cpp | 56 +++++++++++--------
1 file changed, 32 insertions(+), 24 deletions(-)
diff --git a/gdal/ogr/ogrsf_frmts/openfilegdb/ogropenfilegdbdatasource.cpp b/gdal/ogr/ogrsf_frmts/openfilegdb/ogropenfilegdbdatasource.cpp
index f448cfe478be..ba1f7e557a23 100644
--- a/gdal/ogr/ogrsf_frmts/openfilegdb/ogropenfilegdbdatasource.cpp
+++ b/gdal/ogr/ogrsf_frmts/openfilegdb/ogropenfilegdbdatasource.cpp
@@ -183,6 +183,7 @@ int OGROpenFileGDBDataSource::Open( const char* pszFilename )
}
if( !(oTable.GetFieldCount() >= 2 &&
+ oTable.GetTotalRecordCount() < 100000 &&
oTable.GetField(0)->GetName() == "Name" &&
oTable.GetField(0)->GetType() == FGFT_STRING &&
oTable.GetField(1)->GetName() == "FileFormat" &&
@@ -197,40 +198,47 @@ int OGROpenFileGDBDataSource::Open( const char* pszFilename )
int iGDBObjectClasses = -1; /* V9.X */
std::vector aosTableNames;
- for( int i=0;iString);
-
- if( strcmp(psField->String, "GDB_Items") == 0 )
+ if( !oTable.SelectRow(i) )
{
- iGDBItems = i;
+ if( oTable.HasGotError() )
+ break;
+ aosTableNames.push_back("");
+ continue;
}
- else if( strcmp(psField->String, "GDB_FeatureClasses") == 0 )
+
+ const OGRField* psField = oTable.GetFieldValue(0);
+ if( psField != nullptr )
{
- iGDBFeatureClasses = i;
+ aosTableNames.push_back(psField->String);
+
+ if( strcmp(psField->String, "GDB_Items") == 0 )
+ {
+ iGDBItems = i;
+ }
+ else if( strcmp(psField->String, "GDB_FeatureClasses") == 0 )
+ {
+ iGDBFeatureClasses = i;
+ }
+ else if( strcmp(psField->String, "GDB_ObjectClasses") == 0 )
+ {
+ iGDBObjectClasses = i;
+ }
+ m_osMapNameToIdx[psField->String] = 1 + i;
}
- else if( strcmp(psField->String, "GDB_ObjectClasses") == 0 )
+ else
{
- iGDBObjectClasses = i;
+ aosTableNames.push_back("");
}
- m_osMapNameToIdx[psField->String] = 1 + i;
- }
- else
- {
- aosTableNames.push_back("");
}
}
+ catch( const std::exception& )
+ {
+ return FALSE;
+ }
oTable.Close();
From c4b4dfe1b5b03622575bd88104977ea52d08ddb7 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Sat, 10 Nov 2018 16:21:34 +0100
Subject: [PATCH 048/488] Internal libtiff: resync with change of value for
COMPRESSION_ZSTD and COMPRESSION_WEBP (breaking change)
---
autotest/gcore/data/byte_zstd.tif | Bin 576 -> 576 bytes
autotest/gcore/data/byte_zstd_corrupted.tif | Bin 576 -> 576 bytes
autotest/gcore/data/byte_zstd_corrupted2.tif | Bin 576 -> 576 bytes
autotest/gcore/data/tif_webp.tif | Bin 1774 -> 1774 bytes
.../gcore/data/tif_webp_huge_single_strip.tif | Bin 850 -> 850 bytes
gdal/frmts/gtiff/gtiff.h | 4 ++--
gdal/frmts/gtiff/libtiff/tiff.h | 4 ++--
7 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/autotest/gcore/data/byte_zstd.tif b/autotest/gcore/data/byte_zstd.tif
index ec19af8efa35301e347cc2a746fe20febfa275c0..ca5e1de3c270a326bd95a21603d8fb1a6d6b5f8f 100644
GIT binary patch
delta 13
UcmX@Wa)4!m8B@UFjTRhC03u)nR{#J2
delta 13
UcmX@Wa)4!m8B<=zMhgxm03q!JIsgCw
diff --git a/autotest/gcore/data/byte_zstd_corrupted.tif b/autotest/gcore/data/byte_zstd_corrupted.tif
index 980e88a26b7d921ade77bd3fb3118ea10c1d85b1..d2d7378ce249aa65ae78a09029df2bb77f9e6582 100644
GIT binary patch
delta 13
UcmX@Wa)4!m8B@UFjTRhC03u)nR{#J2
delta 13
UcmX@Wa)4!m8B<=zMhgxm03q!JIsgCw
diff --git a/autotest/gcore/data/byte_zstd_corrupted2.tif b/autotest/gcore/data/byte_zstd_corrupted2.tif
index 26de0a304cd8bff8ca2e45c34cd4d4a487ed5d46..d3a1c3c7eacc92bf629cfb83c28f65f4bfb89575 100644
GIT binary patch
delta 13
UcmX@Wa)4!m8B@UFjTRhC03u)nR{#J2
delta 13
UcmX@Wa)4!m8B<=zMhgxm03q!JIsgCw
diff --git a/autotest/gcore/data/tif_webp.tif b/autotest/gcore/data/tif_webp.tif
index 8ec4995a9392bfa60990cb9b7226a7da29726bad..ebefb014a53f301ead860c7aba32be9902d962b3 100644
GIT binary patch
delta 13
UcmaFI`;K>l8B^fljTYP304UrAcmMzZ
delta 13
UcmaFI`;K>l8B>18MvHB104Qk%TL1t6
diff --git a/autotest/gcore/data/tif_webp_huge_single_strip.tif b/autotest/gcore/data/tif_webp_huge_single_strip.tif
index 0ea6c28b7f48e8cce66b41026d155404211e18f0..297110aa1fae489ab69d399fa6553260d1d0bd23 100644
GIT binary patch
delta 13
Ucmcb_c8P6*8B^fljTTbO03>GwlK=n!
delta 13
Ucmcb_c8P6*8B>18Mhhus03-ASb^rhX
diff --git a/gdal/frmts/gtiff/gtiff.h b/gdal/frmts/gtiff/gtiff.h
index ad75610bd267..ce0f8ed39ceb 100644
--- a/gdal/frmts/gtiff/gtiff.h
+++ b/gdal/frmts/gtiff/gtiff.h
@@ -97,7 +97,7 @@ uint16 GTiffGetAlphaValue(const char* pszValue, uint16 nDefault);
#endif
#if !defined(COMPRESSION_ZSTD)
-#define COMPRESSION_ZSTD 34926 /* ZSTD */
+#define COMPRESSION_ZSTD 50000 /* ZSTD */
#endif
#if !defined(TIFFTAG_ZSTD_LEVEL)
@@ -109,7 +109,7 @@ uint16 GTiffGetAlphaValue(const char* pszValue, uint16 nDefault);
#endif
#if !defined(COMPRESSION_WEBP)
-#define COMPRESSION_WEBP 34927 /* WebP */
+#define COMPRESSION_WEBP 50001 /* WebP */
#endif
#if !defined(TIFFTAG_WEBP_LEVEL)
diff --git a/gdal/frmts/gtiff/libtiff/tiff.h b/gdal/frmts/gtiff/libtiff/tiff.h
index 116ac34effe4..5b0a0c90f67a 100644
--- a/gdal/frmts/gtiff/libtiff/tiff.h
+++ b/gdal/frmts/gtiff/libtiff/tiff.h
@@ -190,8 +190,8 @@ typedef enum {
#define COMPRESSION_LERC 34887 /* ESRI Lerc codec: https://github.com/Esri/lerc */
/* compression codes 34887-34889 are reserved for ESRI */
#define COMPRESSION_LZMA 34925 /* LZMA2 */
-#define COMPRESSION_ZSTD 34926 /* ZSTD: WARNING not registered in Adobe-maintained registry */
-#define COMPRESSION_WEBP 34927 /* WEBP: WARNING not registered in Adobe-maintained registry */
+#define COMPRESSION_ZSTD 50000 /* ZSTD: WARNING not registered in Adobe-maintained registry */
+#define COMPRESSION_WEBP 50001 /* WEBP: WARNING not registered in Adobe-maintained registry */
#define TIFFTAG_PHOTOMETRIC 262 /* photometric interpretation */
#define PHOTOMETRIC_MINISWHITE 0 /* min value is white */
#define PHOTOMETRIC_MINISBLACK 1 /* min value is black */
From ca7aa5677aab8221ad4251ae7aa044a3b761a79c Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Mon, 12 Nov 2018 08:53:03 +0100
Subject: [PATCH 049/488] Internal libtiff: now at v4.0.10
---
gdal/frmts/gtiff/libtiff/tiffvers.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/gdal/frmts/gtiff/libtiff/tiffvers.h b/gdal/frmts/gtiff/libtiff/tiffvers.h
index 7c415740f655..403d61be04b5 100644
--- a/gdal/frmts/gtiff/libtiff/tiffvers.h
+++ b/gdal/frmts/gtiff/libtiff/tiffvers.h
@@ -1,4 +1,4 @@
-#define TIFFLIB_VERSION_STR "LIBTIFF, Version 4.0.9\nCopyright (c) 1988-1996 Sam Leffler\nCopyright (c) 1991-1996 Silicon Graphics, Inc."
+#define TIFFLIB_VERSION_STR "LIBTIFF, Version 4.0.10\nCopyright (c) 1988-1996 Sam Leffler\nCopyright (c) 1991-1996 Silicon Graphics, Inc."
/*
* This define can be used in code that requires
* compilation-related definitions specific to a
@@ -6,4 +6,4 @@
* version checking should be done based on the
* string returned by TIFFGetVersion.
*/
-#define TIFFLIB_VERSION 20171118
+#define TIFFLIB_VERSION 20181110
From 76b0e9fd8e1149b8a412c07a964167c19100271b Mon Sep 17 00:00:00 2001
From: drons
Date: Mon, 12 Nov 2018 22:43:36 +0300
Subject: [PATCH 050/488] PRF: Add georeference shift from pixel's center to
top-left
---
gdal/frmts/prf/phprfdataset.cpp | 3 +++
1 file changed, 3 insertions(+)
diff --git a/gdal/frmts/prf/phprfdataset.cpp b/gdal/frmts/prf/phprfdataset.cpp
index 445a02a0f112..58a8498acea5 100644
--- a/gdal/frmts/prf/phprfdataset.cpp
+++ b/gdal/frmts/prf/phprfdataset.cpp
@@ -615,6 +615,9 @@ GDALDataset* PhPrfDataset::Open( GDALOpenInfo* poOpenInfo )
adfGeoTrans[4] = 0;
adfGeoTrans[5] = (adfDemMetadata[2] - adfDemMetadata[3])/(nSizeY - 1);
+ adfGeoTrans[0] -= 0.5 * adfGeoTrans[1];
+ adfGeoTrans[3] -= 0.5 * adfGeoTrans[5];
+
if( bDemShiftOk )
{
adfGeoTrans[0] += adfDemShift[0];
From f358ef36ce3adb7ed85ec59a6cbad99f78de3463 Mon Sep 17 00:00:00 2001
From: drons
Date: Mon, 12 Nov 2018 23:00:30 +0300
Subject: [PATCH 051/488] PRF: Add georeference test for x-dem files
---
autotest/gdrivers/data/PRF/dem.x-dem | 8 ++++----
autotest/gdrivers/prf.py | 10 +++++++++-
2 files changed, 13 insertions(+), 5 deletions(-)
diff --git a/autotest/gdrivers/data/PRF/dem.x-dem b/autotest/gdrivers/data/PRF/dem.x-dem
index 03588b00d8b8..9b3d7711b05f 100644
--- a/autotest/gdrivers/data/PRF/dem.x-dem
+++ b/autotest/gdrivers/data/PRF/dem.x-dem
@@ -18,10 +18,10 @@
-
-
-
-
+
+
+
+
diff --git a/autotest/gdrivers/prf.py b/autotest/gdrivers/prf.py
index cbe21c4509e5..03338af0339c 100755
--- a/autotest/gdrivers/prf.py
+++ b/autotest/gdrivers/prf.py
@@ -98,13 +98,21 @@ def prf_3():
return 'success'
+
+def prf_4():
+
+ tst = gdaltest.GDALTest('prf', './PRF/dem.x-dem', 1, 0)
+ return tst.testOpen(check_gt=(1.5, 1.0, 0.0, 9329.0, 0.0, -2.0))
+
+
###############################################################################
gdaltest_list = [
prf_1,
prf_2,
- prf_3
+ prf_3,
+ prf_4
]
if __name__ == '__main__':
From e8c9bea5dbe8e90d01d575f94a040fcaee27c24f Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Tue, 13 Nov 2018 12:13:41 +0100
Subject: [PATCH 052/488] GDALDestroy(): no longer call it automatically on
GCC/CLang (non-MSVC) builds
Experiments have shown that a __attribute__((destructor )) function is called
*AFTER* the destructor of static C++ objects. Which is prone to crash is
code called by this destructor function then uses those C++ objects.
There could have been a solution by making the destructor function a C++
object itself and playing with gcc __attribute__((init_priority(XXXX))),
unfortunately the destructor of static C++ objects in functions/methods
is always called before the destructor of objects with explicit priority,
so there is no workaround.
With MSVC, apparently according to
https://stackoverflow.com/questions/4496233/which-is-called-first-dllmain-or-global-static-object-constructor
DllMain(DLL_PROCESS_DETACH) is called before C++ object destruction,
so it seems safe to keep it.
---
gdal/gcore/gdaldllmain.cpp | 24 +++++++-----------------
1 file changed, 7 insertions(+), 17 deletions(-)
diff --git a/gdal/gcore/gdaldllmain.cpp b/gdal/gcore/gdaldllmain.cpp
index 9e0383e43a71..fac9b6da54da 100644
--- a/gdal/gcore/gdaldllmain.cpp
+++ b/gdal/gcore/gdaldllmain.cpp
@@ -56,10 +56,15 @@ void CPLFinalizeTLS();
* This function calls GDALDestroyDriverManager() and OGRCleanupAll() and
* finalize Thread Local Storage variables.
*
- * This function should *not* usually be explicitly called by application code
- * if GDAL is dynamically linked, since it is automatically called through
+ * Prior to GDAL 2.4.0, this function should normally be explicitly called by
+ * application code if GDAL is dynamically linked (but that does not hurt),
+ * since it was automatically called through
* the unregistration mechanisms of dynamic library loading.
*
+ * Since GDAL 2.4.0, this function may be called by application code, since
+ * it is no longer called automatically, on non-MSVC builds, due to ordering
+ * problems with respect to automatic destruction of global C++ objects.
+ *
* Note: no GDAL/OGR code should be called after this call!
*
* @since GDAL 2.0
@@ -100,7 +105,6 @@ void GDALDestroy(void)
#ifdef __GNUC__
static void GDALInitialize() __attribute__ ((constructor)) ;
-static void GDALDestructor() __attribute__ ((destructor)) ;
/************************************************************************/
/* Called when GDAL is loaded by loader or by dlopen(), */
@@ -118,20 +122,6 @@ static void GDALInitialize()
#endif
}
-/************************************************************************/
-/* Called when GDAL is unloaded by loader or by dlclose(), */
-/* and before dlclose() returns. */
-/************************************************************************/
-
-static void GDALDestructor()
-{
- if( bGDALDestroyAlreadyCalled )
- return;
- if( !CPLTestBool(CPLGetConfigOption("GDAL_DESTROY", "YES")) )
- return;
- GDALDestroy();
-}
-
#endif // __GNUC__
/************************************************************************/
From 41babc2c7cb61ab76c6a3772735b20e6610eca52 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Wed, 14 Nov 2018 00:15:52 +0100
Subject: [PATCH 053/488] Overview / RasterIO resampling: do not use nodata
value as a valid output value
---
autotest/gcore/rasterio.py | 81 +++++++++++++
autotest/gdrivers/vrtpansharpen.py | 4 +-
gdal/gcore/overview.cpp | 180 ++++++++++++++++++++++++++---
3 files changed, 249 insertions(+), 16 deletions(-)
diff --git a/autotest/gcore/rasterio.py b/autotest/gcore/rasterio.py
index 273badb3303a..76e15a805188 100755
--- a/autotest/gcore/rasterio.py
+++ b/autotest/gcore/rasterio.py
@@ -1127,6 +1127,86 @@ def rasterio_lanczos_nodata():
return 'success'
+###############################################################################
+
+
+def rasterio_resampled_value_is_nodata():
+
+ gdal.FileFromMemBuffer('/vsimem/in.asc',
+"""ncols 4
+nrows 4
+xllcorner 440720.000000000000
+yllcorner 3750120.000000000000
+cellsize 60.000000000000
+nodata_value 0
+ -1.1 -1.1 1.1 1.1
+ -1.1 -1.1 1.1 1.1
+ -1.1 -1.1 1.1 1.1
+ -1.1 -1.1 1.1 1.1""")
+
+ ds = gdal.Open('/vsimem/in.asc')
+
+ data = ds.GetRasterBand(1).ReadRaster(buf_xsize=1,
+ buf_ysize=1,
+ resample_alg=gdal.GRIORA_Lanczos)
+ data_ar = struct.unpack('f' * 1, data)
+ expected_ar = (1.1754943508222875e-38, )
+ if data_ar != expected_ar:
+ gdaltest.post_reason('fail')
+ print(data_ar)
+ return 'fail'
+
+ data = ds.GetRasterBand(1).ReadRaster(buf_xsize=1,
+ buf_ysize=1,
+ resample_alg=gdal.GRIORA_Average)
+ data_ar = struct.unpack('f' * 1, data)
+ expected_ar = (1.1754943508222875e-38, )
+ if data_ar != expected_ar:
+ gdaltest.post_reason('fail')
+ print(data_ar)
+ return 'fail'
+
+ gdal.Unlink('/vsimem/in.asc')
+
+
+ gdal.FileFromMemBuffer('/vsimem/in.asc',
+"""ncols 4
+nrows 4
+xllcorner 440720.000000000000
+yllcorner 3750120.000000000000
+cellsize 60.000000000000
+nodata_value 0
+ -1 -1 1 1
+ -1 -1 1 1
+ -1 -1 1 1
+ -1 -1 1 1""")
+
+ ds = gdal.Open('/vsimem/in.asc')
+
+ data = ds.GetRasterBand(1).ReadRaster(buf_xsize=1,
+ buf_ysize=1,
+ resample_alg=gdal.GRIORA_Lanczos)
+ data_ar = struct.unpack('I' * 1, data)
+ expected_ar = (1, )
+ if data_ar != expected_ar:
+ gdaltest.post_reason('fail')
+ print(data_ar)
+ return 'fail'
+
+ data = ds.GetRasterBand(1).ReadRaster(buf_xsize=1,
+ buf_ysize=1,
+ resample_alg=gdal.GRIORA_Average)
+ data_ar = struct.unpack('I' * 1, data)
+ expected_ar = (1, )
+ if data_ar != expected_ar:
+ gdaltest.post_reason('fail')
+ print(data_ar)
+ return 'fail'
+
+ gdal.Unlink('/vsimem/in.asc')
+
+ return 'success'
+
gdaltest_list = [
rasterio_1,
rasterio_2,
@@ -1145,6 +1225,7 @@ def rasterio_lanczos_nodata():
rasterio_15,
rasterio_16,
rasterio_lanczos_nodata,
+ rasterio_resampled_value_is_nodata,
]
# gdaltest_list = [ rasterio_16 ]
diff --git a/autotest/gdrivers/vrtpansharpen.py b/autotest/gdrivers/vrtpansharpen.py
index 185a192dd72e..677c0ec86843 100755
--- a/autotest/gdrivers/vrtpansharpen.py
+++ b/autotest/gdrivers/vrtpansharpen.py
@@ -1681,7 +1681,7 @@ def vrtpansharpen_9():
gdaltest.post_reason('fail')
return 'fail'
cs = [vrt_ds.GetRasterBand(i + 1).Checksum() for i in range(vrt_ds.RasterCount)]
- if cs not in([4179, 8767, 52257], [4175, 8758, 52249]):
+ if cs not in ([4640, 9158, 54450],):
gdaltest.post_reason('fail')
print(cs)
return 'fail'
@@ -1723,7 +1723,7 @@ def vrtpansharpen_9():
gdaltest.post_reason('fail')
return 'fail'
cs = [vrt_ds.GetRasterBand(i + 1).Checksum() for i in range(vrt_ds.RasterCount)]
- if cs not in([4179, 8767, 52257], [4175, 8758, 52249]):
+ if cs not in ([4640, 9158, 54450],):
gdaltest.post_reason('fail')
print(cs)
return 'fail'
diff --git a/gdal/gcore/overview.cpp b/gdal/gcore/overview.cpp
index 62b46e3c2d56..cc0c2b6636e5 100644
--- a/gdal/gcore/overview.cpp
+++ b/gdal/gcore/overview.cpp
@@ -249,6 +249,100 @@ static bool ReadColorTableAsArray( const GDALColorTable* poColorTable,
return true;
}
+/************************************************************************/
+/* GetReplacementValueIfNoData() */
+/************************************************************************/
+
+static float GetReplacementValueIfNoData(GDALDataType dt, int bHasNoData,
+ float fNoDataValue)
+{
+ float fReplacementVal = 0.0f;
+ if( bHasNoData )
+ {
+ if( dt == GDT_Byte )
+ {
+ if( fNoDataValue == std::numeric_limits::max() )
+ fReplacementVal = static_cast(
+ std::numeric_limits::max() - 1);
+ else
+ fReplacementVal = fNoDataValue + 1;
+ }
+ else if( dt == GDT_UInt16 )
+ {
+ if( fNoDataValue == std::numeric_limits::max() )
+ fReplacementVal = static_cast(
+ std::numeric_limits::max() - 1);
+ else
+ fReplacementVal = fNoDataValue + 1;
+ }
+ else if( dt == GDT_Int16 )
+ {
+ if( fNoDataValue == std::numeric_limits::max() )
+ fReplacementVal = static_cast(
+ std::numeric_limits::max() - 1);
+ else
+ fReplacementVal = fNoDataValue + 1;
+ }
+ else if( dt == GDT_UInt32 )
+ {
+ // Be careful to limited precision of float
+ fReplacementVal = fNoDataValue + 1;
+ double dfVal = fNoDataValue;
+ if( fReplacementVal >= static_cast(std::numeric_limits::max() - 128) )
+ {
+ while( fReplacementVal == fNoDataValue )
+ {
+ dfVal -= 1.0;
+ fReplacementVal = static_cast(dfVal);
+ }
+ }
+ else
+ {
+ while( fReplacementVal == fNoDataValue )
+ {
+ dfVal += 1.0;
+ fReplacementVal = static_cast(dfVal);
+ }
+ }
+ }
+ else if( dt == GDT_Int32 )
+ {
+ // Be careful to limited precision of float
+ fReplacementVal = fNoDataValue + 1;
+ double dfVal = fNoDataValue;
+ if( fReplacementVal >= static_cast(std::numeric_limits::max() - 64) )
+ {
+ while( fReplacementVal == fNoDataValue )
+ {
+ dfVal -= 1.0;
+ fReplacementVal = static_cast(dfVal);
+ }
+ }
+ else
+ {
+ while( fReplacementVal == fNoDataValue )
+ {
+ dfVal += 1.0;
+ fReplacementVal = static_cast(dfVal);
+ }
+ }
+ }
+ else if( dt == GDT_Float32 || dt == GDT_Float64 )
+ {
+ if( fNoDataValue == 0 )
+ {
+ fReplacementVal = std::numeric_limits::min();
+ }
+ else
+ {
+ fReplacementVal = static_cast(
+ fNoDataValue + 1e-7 * fNoDataValue);
+ }
+ }
+ }
+ return fReplacementVal;
+}
+
/************************************************************************/
/* GDALResampleChunk32R_Average() */
/************************************************************************/
@@ -284,6 +378,8 @@ GDALResampleChunk32R_AverageT( double dfXRatioDstToSrc,
tNoDataValue = 0;
else
tNoDataValue = static_cast(fNoDataValue);
+ const T tReplacementVal = static_cast(GetReplacementValueIfNoData(
+ poOverview->GetRasterDataType(), bHasNoData, fNoDataValue));
int nChunkRightXOff = nChunkXOff + nChunkXSize;
int nChunkBottomYOff = nChunkYOff + nChunkYSize;
@@ -441,7 +537,10 @@ GDALResampleChunk32R_AverageT( double dfXRatioDstToSrc,
+ pSrcScanlineShifted[nChunkXSize]
+ pSrcScanlineShifted[1+nChunkXSize];
- pDstScanline[iDstPixel] = static_cast((nTotal + 2) / 4);
+ auto nVal = static_cast((nTotal + 2) / 4);
+ if( bHasNoData && nVal == tNoDataValue )
+ nVal = tReplacementVal;
+ pDstScanline[iDstPixel] = nVal;
pSrcScanlineShifted += 2;
}
}
@@ -480,11 +579,19 @@ GDALResampleChunk32R_AverageT( double dfXRatioDstToSrc,
}
else if( eWrkDataType == GDT_Byte ||
eWrkDataType == GDT_UInt16)
- pDstScanline[iDstPixel] =
- static_cast((dfTotal + nCount / 2) / nCount);
+ {
+ auto nVal = static_cast((dfTotal + nCount / 2) / nCount);
+ if( bHasNoData && nVal == tNoDataValue )
+ nVal = tReplacementVal;
+ pDstScanline[iDstPixel] = nVal;
+ }
else
- pDstScanline[iDstPixel] =
- static_cast(dfTotal / nCount);
+ {
+ auto nVal = static_cast(dfTotal / nCount);
+ if( bHasNoData && nVal == tNoDataValue )
+ nVal = tReplacementVal;
+ pDstScanline[iDstPixel] = nVal;
+ }
}
}
}
@@ -1772,6 +1879,33 @@ GDALResampleChunk32R_ConvolutionT( double dfXRatioDstToSrc,
{
if( !bHasNoData )
fNoDataValue = 0.0f;
+ const float fReplacementVal = GetReplacementValueIfNoData(
+ papoDstBands[0]->GetRasterDataType(), bHasNoData, fNoDataValue);
+ // cppcheck-suppress unreadVariable
+ const int isIntegerDT = GDALDataTypeIsInteger(papoDstBands[0]->GetRasterDataType());
+ const auto nNodataValueInt64 = static_cast(fNoDataValue);
+
+ auto replaceValIfNodata =
+ [bHasNoData, isIntegerDT, nNodataValueInt64, fNoDataValue, fReplacementVal](float fVal)
+ {
+ if( bHasNoData )
+ {
+ if( isIntegerDT )
+ {
+ if( nNodataValueInt64 == static_cast(fVal) )
+ {
+ // Do not use the nodata value
+ return fReplacementVal;
+ }
+ }
+ else if( fNoDataValue == fVal )
+ {
+ // Do not use the nodata value
+ return fReplacementVal;
+ }
+ }
+ return fVal;
+ };
/* -------------------------------------------------------------------- */
/* Allocate work buffers. */
@@ -2098,6 +2232,14 @@ GDALResampleChunk32R_ConvolutionT( double dfXRatioDstToSrc,
GDALResampleConvolutionVertical_16cols(
padfHorizontalFilteredBand + j, nDstXSize, padfWeights,
nSrcLineCount, pafDstScanline + iFilteredPixelOff );
+ if( bHasNoData )
+ {
+ for( int k = 0; k < 16; k++ )
+ {
+ pafDstScanline[iFilteredPixelOff + k] =
+ replaceValIfNodata(pafDstScanline[iFilteredPixelOff + k]);
+ }
+ }
}
#else
for( ;
@@ -2107,16 +2249,24 @@ GDALResampleChunk32R_ConvolutionT( double dfXRatioDstToSrc,
GDALResampleConvolutionVertical_8cols(
padfHorizontalFilteredBand + j, nDstXSize, padfWeights,
nSrcLineCount, pafDstScanline + iFilteredPixelOff );
+ if( bHasNoData )
+ {
+ for( int k = 0; k < 8; k++ )
+ {
+ pafDstScanline[iFilteredPixelOff + k] =
+ replaceValIfNodata(pafDstScanline[iFilteredPixelOff + k]);
+ }
+ }
}
#endif
for( ; iFilteredPixelOff < nDstXSize; iFilteredPixelOff++, j++ )
{
- const double dfVal =
+ const float fVal = static_cast(
GDALResampleConvolutionVertical(
padfHorizontalFilteredBand + j,
- nDstXSize, padfWeights, nSrcLineCount );
- pafDstScanline[iFilteredPixelOff] = static_cast(dfVal);
+ nDstXSize, padfWeights, nSrcLineCount ));
+ pafDstScanline[iFilteredPixelOff] = replaceValIfNodata(fVal);
}
#else
for( ;
@@ -2128,9 +2278,10 @@ GDALResampleChunk32R_ConvolutionT( double dfXRatioDstToSrc,
GDALResampleConvolutionVertical_2cols(
padfHorizontalFilteredBand + j, nDstXSize, padfWeights,
nSrcLineCount, dfVal1, dfVal2 );
- pafDstScanline[iFilteredPixelOff] = static_cast(dfVal1);
- pafDstScanline[iFilteredPixelOff+1] =
- static_cast(dfVal2);
+ pafDstScanline[iFilteredPixelOff] = replaceValIfNodata(
+ static_cast(dfVal1));
+ pafDstScanline[iFilteredPixelOff+1] = replaceValIfNodata(
+ static_cast(dfVal2));
}
if( iFilteredPixelOff < nDstXSize )
{
@@ -2138,7 +2289,8 @@ GDALResampleChunk32R_ConvolutionT( double dfXRatioDstToSrc,
GDALResampleConvolutionVertical(
padfHorizontalFilteredBand + j,
nDstXSize, padfWeights, nSrcLineCount );
- pafDstScanline[iFilteredPixelOff] = static_cast(dfVal);
+ pafDstScanline[iFilteredPixelOff] = replaceValIfNodata(
+ static_cast(dfVal));
}
#endif
}
@@ -2195,8 +2347,8 @@ GDALResampleChunk32R_ConvolutionT( double dfXRatioDstToSrc,
}
if( dfWeightSum > 0.0 )
{
- pafDstScanline[iFilteredPixelOff] =
- static_cast(dfVal / dfWeightSum);
+ pafDstScanline[iFilteredPixelOff] = replaceValIfNodata(
+ static_cast(dfVal / dfWeightSum));
}
else
{
From 86c49036793495ff504eedd6d8bb5046181c2e8f Mon Sep 17 00:00:00 2001
From: Michael Entin
Date: Wed, 14 Nov 2018 07:49:57 -0800
Subject: [PATCH 054/488] CSV Documentation: describe producing CSV with
GeoJSON formatted geometries (#1094)
* CSV Documentation: describe using ogr2ogr to create CSV with GeoJSON geometry fields
* Fix accidental edit
---
gdal/ogr/ogrsf_frmts/csv/drv_csv.html | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/gdal/ogr/ogrsf_frmts/csv/drv_csv.html b/gdal/ogr/ogrsf_frmts/csv/drv_csv.html
index ff6a9a888b7a..1e8607c0635b 100644
--- a/gdal/ogr/ogrsf_frmts/csv/drv_csv.html
+++ b/gdal/ogr/ogrsf_frmts/csv/drv_csv.html
@@ -290,7 +290,9 @@
Creation Issues
file is discarded. It is possible to export the geometry in its WKT representation by specifying
GEOMETRY=AS_WKT. It is also possible to export point geometries into their X,Y,Z components (different
columns in the csv file) by specifying GEOMETRY=AS_XYZ, GEOMETRY=AS_XY or GEOMETRY=AS_YX.
-The geometry column(s) will be prepended to the columns with the attributes values.
+The geometry column(s) will be prepended to the columns with the attributes values.
+It is also possible to export geometries in GeoJSON representation using SQLite SQL dialect query,
+see example below.
CREATE_CSVT=YES/NO (Starting with GDAL 1.7.0): Create the associated .csvt file (see above paragraph)
to describe the type of each column of the layer and its optional width and precision. Default value : NO
SEPARATOR=COMMA/SEMICOLON/TAB/SPACE (Starting with GDAL 1.7.0): Field separator character. Default value : COMMA
@@ -324,6 +326,9 @@
Examples
This example shows using ogr2ogr to transform a shapefile with point geometry into a .csv file with the X,Y,Z coordinates of the points as first columns in the .csv file
This example shows using ogr2ogr to transform a shapefile into a .csv file with geography field formatted using GeoJSON format.
+
ogr2ogr -f CSV -dialect sqlite -sql "select AsGeoJSON(geometry) AS geom, * from input" output.csv input.shp
+
Particular datasources
From 5a9d61933f5494f590ea1a1c746f4ffd3b693242 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Wed, 14 Nov 2018 17:49:45 +0100
Subject: [PATCH 055/488] CONTRIBUTING.md: add alternative for squashing all
commits [ci skip]
---
CONTRIBUTING.md | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 22d516b84436..076a35c1a505 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -83,7 +83,15 @@ git rebase origin/master
# At end of your work, make sure history is reasonable by folding non
# significant commits into a consistent set
-git rebase -i master (use fixup for example to merge several commits together)
+git rebase -i master (use 'fixup' for example to merge several commits together,
+and 'reword' to modify commit messages)
+
+# or alternatively, in case there is a big number of commits and marking
+# all them as 'fixup' is tedious
+git fetch origin
+git rebase origin/master
+git reset --soft origin/master
+git commit -a -m "Put here the synthetic commit message"
# push your branch
git push my_user_name my_new_feature_branch
From a83a6243ec59e14acdcc6c149b47f956ef6c89a1 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Thu, 15 Nov 2018 10:13:51 +0100
Subject: [PATCH 056/488] MVT doc: fix copy & paste error pointed by @velix [ci
skip]
---
gdal/ogr/ogrsf_frmts/mvt/drv_mvt.html | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/gdal/ogr/ogrsf_frmts/mvt/drv_mvt.html b/gdal/ogr/ogrsf_frmts/mvt/drv_mvt.html
index 66f117155053..3071c2b7bffd 100644
--- a/gdal/ogr/ogrsf_frmts/mvt/drv_mvt.html
+++ b/gdal/ogr/ogrsf_frmts/mvt/drv_mvt.html
@@ -194,7 +194,7 @@
Dataset creation options
extension of tiles. Defaults to pbf.
MINZOOM=integer: Minimum zoom level at which tiles are generated. Defaults
to 0.
-
MAXZOOM=integer: Minimum zoom level at which tiles are generated. Defaults
+
MAXZOOM=integer: Maximum zoom level at which tiles are generated. Defaults
to 5. Maximum supported value is 22
CONF=string: Layer configuration as a JSon serialized string.
ID_TYPE=AUTO/String/Integer. (OGR >= 2.3) Type of the 'id'
memer of Feature objects.
+
WRITE_NON_FINITE_VALUES=YES/NO. (OGR >= 2.4) Whether to write NaN / Infinity values.
+Such values are not allowed in strict JSon mode, but some JSon parsers
+(libjson-c >= 0.12 for exampl) can understand them as they are allowed by ECMAScript.
+Defaults to NO
VSI Virtual File System API support
diff --git a/gdal/ogr/ogrsf_frmts/geojson/libjson/json_tokener.c b/gdal/ogr/ogrsf_frmts/geojson/libjson/json_tokener.c
index 42c1a45cd749..51b4fa1e3978 100644
--- a/gdal/ogr/ogrsf_frmts/geojson/libjson/json_tokener.c
+++ b/gdal/ogr/ogrsf_frmts/geojson/libjson/json_tokener.c
@@ -13,6 +13,10 @@
* (http://www.opensource.org/licenses/mit-license.php)
*/
+// For uselocale
+#undef _XOPEN_SOURCE
+#define _XOPEN_SOURCE 700
+
#include "config.h"
#include "cpl_conv.h"
@@ -30,19 +34,18 @@
#include "arraylist.h"
#include "json_inttypes.h"
#include "json_object.h"
+#include "json_object_private.h"
#include "json_tokener.h"
#include "json_util.h"
-#ifdef HAVE_LOCALE_H
+#if defined(HAVE_LOCALE_H) || defined(HAVE_USELOCALE)
#include
#endif /* HAVE_LOCALE_H */
+#ifdef HAVE_XLOCALE_H
+#include
+#endif
-#if !HAVE_STRDUP && defined(_MSC_VER)
- /* MSC has the version as _strdup */
-# define strdup _strdup
-#elif !HAVE_STRDUP
-# error You do not have strdup on your system.
-#endif /* HAVE_STRDUP */
+#define jt_hexdigit(x) (((x) <= '9') ? (x) - '0' : ((x) & 7) + 9)
#if !HAVE_STRNCASECMP && defined(_MSC_VER)
/* MSC has the version as _strnicmp */
@@ -51,12 +54,24 @@
# error You do not have strncasecmp on your system.
#endif /* HAVE_STRNCASECMP */
-static const char* json_null_str = "null";
-static const char* json_true_str = "true";
-static const char* json_false_str = "false";
-
-// XXX after v0.10 this array will become static:
-const char* json_tokener_errors[] = {
+/* Use C99 NAN by default; if not available, nan("") should work too. */
+#ifndef NAN
+#define NAN nan("")
+#endif /* !NAN */
+
+static const char json_null_str[] = "null";
+static const int json_null_str_len = sizeof(json_null_str) - 1;
+static const char json_inf_str[] = "Infinity";
+static const char json_inf_str_lower[] = "infinity";
+static const unsigned int json_inf_str_len = sizeof(json_inf_str) - 1;
+static const char json_nan_str[] = "NaN";
+static const int json_nan_str_len = sizeof(json_nan_str) - 1;
+static const char json_true_str[] = "true";
+static const int json_true_str_len = sizeof(json_true_str) - 1;
+static const char json_false_str[] = "false";
+static const int json_false_str_len = sizeof(json_false_str) - 1;
+
+static const char* json_tokener_errors[] = {
"success",
"continue",
"nesting too deep",
@@ -71,17 +86,20 @@ const char* json_tokener_errors[] = {
"object value separator ',' expected",
"invalid string sequence",
"expected comment",
+ "buffer size overflow"
};
const char *json_tokener_error_desc(enum json_tokener_error jerr)
{
- int jerr_int = (int)jerr;
- if (jerr_int < 0 || jerr_int >= (int)(sizeof(json_tokener_errors) / sizeof(json_tokener_errors[0])))
- return "Unknown error, invalid json_tokener_error value passed to json_tokener_error_desc()";
+ int jerr_int = (int) jerr;
+ if (jerr_int < 0 ||
+ jerr_int >= (int)(sizeof(json_tokener_errors) / sizeof(json_tokener_errors[0])))
+ return "Unknown error, "
+ "invalid json_tokener_error value passed to json_tokener_error_desc()";
return json_tokener_errors[jerr];
}
-enum json_tokener_error json_tokener_get_error(json_tokener *tok)
+enum json_tokener_error json_tokener_get_error(struct json_tokener *tok)
{
return tok->err;
}
@@ -98,7 +116,8 @@ struct json_tokener* json_tokener_new_ex(int depth)
tok = (struct json_tokener*)calloc(1, sizeof(struct json_tokener));
if (!tok) return NULL;
- tok->stack = (struct json_tokener_srec *)calloc(depth, sizeof(struct json_tokener_srec));
+ tok->stack = (struct json_tokener_srec *) calloc(depth,
+ sizeof(struct json_tokener_srec));
if (!tok->stack) {
free(tok);
return NULL;
@@ -118,7 +137,7 @@ void json_tokener_free(struct json_tokener *tok)
{
json_tokener_reset(tok);
if (tok->pb) printbuf_free(tok->pb);
- if (tok->stack) free(tok->stack);
+ free(tok->stack);
free(tok);
}
@@ -152,7 +171,8 @@ struct json_object* json_tokener_parse(const char *str)
return obj;
}
-struct json_object* json_tokener_parse_verbose(const char *str, enum json_tokener_error *error)
+struct json_object* json_tokener_parse_verbose(const char *str,
+ enum json_tokener_error *error)
{
struct json_tokener* tok;
struct json_object* obj;
@@ -195,14 +215,17 @@ struct json_object* json_tokener_parse_verbose(const char *str, enum json_tokene
* Returns 1 on success, sets tok->err and returns 0 if no more chars.
* Implicit inputs: str, len vars
*/
-#define PEEK_CHAR(dest, tok) \
- (((tok)->char_offset == len) ? \
- (((tok)->depth == 0 && state == json_tokener_state_eatws && saved_state == json_tokener_state_finish) ? \
- (((tok)->err = json_tokener_success), 0) \
- : \
- (((tok)->err = json_tokener_continue), 0) \
- ) : \
- (((dest) = *str), 1) \
+#define PEEK_CHAR(dest, tok) \
+ (((tok)->char_offset == len) ? \
+ (((tok)->depth == 0 && \
+ state == json_tokener_state_eatws && \
+ saved_state == json_tokener_state_finish \
+ ) ? \
+ (((tok)->err = json_tokener_success), 0) \
+ : \
+ (((tok)->err = json_tokener_continue), 0) \
+ ) : \
+ (((dest) = *str), 1) \
)
/* ADVANCE_CHAR() macro:
@@ -222,17 +245,53 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
{
struct json_object *obj = NULL;
char c = '\1';
-#ifdef HAVE_SETLOCALE
- char *oldlocale=NULL, *tmplocale;
-
- tmplocale = setlocale(LC_NUMERIC, NULL);
- if (tmplocale) oldlocale = strdup(tmplocale);
- setlocale(LC_NUMERIC, "C");
+#ifdef HAVE_USELOCALE
+ locale_t oldlocale = uselocale(NULL);
+ locale_t newloc;
+#elif defined(HAVE_SETLOCALE)
+ char *oldlocale = NULL;
#endif
tok->char_offset = 0;
tok->err = json_tokener_success;
+ /* this interface is presently not 64-bit clean due to the int len argument
+ and the internal printbuf interface that takes 32-bit int len arguments
+ so the function limits the maximum string size to INT32_MAX (2GB).
+ If the function is called with len == -1 then strlen is called to check
+ the string length is less than INT32_MAX (2GB) */
+ if ((len < -1) || (len == -1 && strlen(str) > INT32_MAX)) {
+ tok->err = json_tokener_error_size;
+ return NULL;
+ }
+
+#ifdef HAVE_USELOCALE
+ {
+ locale_t duploc = duplocale(oldlocale);
+ newloc = newlocale(LC_NUMERIC, "C", duploc);
+ // XXX at least Debian 8.4 has a bug in newlocale where it doesn't
+ // change the decimal separator unless you set LC_TIME!
+ if (newloc)
+ {
+ duploc = newloc; // original duploc has been freed by newlocale()
+ newloc = newlocale(LC_TIME, "C", duploc);
+ }
+ if (newloc == NULL)
+ {
+ freelocale(duploc);
+ return NULL;
+ }
+ uselocale(newloc);
+ }
+#elif defined(HAVE_SETLOCALE)
+ {
+ char *tmplocale;
+ tmplocale = setlocale(LC_NUMERIC, NULL);
+ if (tmplocale) oldlocale = strdup(tmplocale);
+ setlocale(LC_NUMERIC, "C");
+ }
+#endif
+
while (PEEK_CHAR(c, tok)) {
redo_char:
@@ -240,11 +299,11 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
case json_tokener_state_eatws:
/* Advance until we change state */
- while (isspace((int)c)) {
+ while (isspace((unsigned char)c)) {
if ((!ADVANCE_CHAR(str, tok)) || (!PEEK_CHAR(c, tok)))
goto out;
}
- if(c == '/') {
+ if(c == '/' && !(tok->flags & JSON_TOKENER_STRICT)) {
printbuf_reset(tok->pb);
printbuf_memappend_fast(tok->pb, &c, 1);
state = json_tokener_state_comment_start;
@@ -260,24 +319,36 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
state = json_tokener_state_eatws;
saved_state = json_tokener_state_object_field_start;
current = json_object_new_object();
- if( current == NULL )
- goto out;
+ if(current == NULL)
+ goto out;
break;
case '[':
state = json_tokener_state_eatws;
saved_state = json_tokener_state_array;
current = json_object_new_array();
- if( current == NULL )
- goto out;
+ if(current == NULL)
+ goto out;
break;
+ case 'I':
+ case 'i':
+ state = json_tokener_state_inf;
+ printbuf_reset(tok->pb);
+ tok->st_pos = 0;
+ goto redo_char;
case 'N':
case 'n':
- state = json_tokener_state_null;
+ state = json_tokener_state_null; // or NaN
printbuf_reset(tok->pb);
tok->st_pos = 0;
goto redo_char;
- case '"':
case '\'':
+ if (tok->flags & JSON_TOKENER_STRICT) {
+ /* in STRICT mode only double-quote are allowed */
+ tok->err = json_tokener_error_parse_unexpected;
+ goto out;
+ }
+ /* FALLTHRU */
+ case '"':
state = json_tokener_state_string;
printbuf_reset(tok->pb);
tok->quote_char = c;
@@ -290,10 +361,7 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
printbuf_reset(tok->pb);
tok->st_pos = 0;
goto redo_char;
-#if defined(__GNUC__)
- case '0' ... '9':
-#else
- case '0':
+ case '0':
case '1':
case '2':
case '3':
@@ -303,7 +371,6 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
case '7':
case '8':
case '9':
-#endif
case '-':
state = json_tokener_state_number;
printbuf_reset(tok->pb);
@@ -322,21 +389,95 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
tok->depth--;
goto redo_char;
- case json_tokener_state_null:
- printbuf_memappend_fast(tok->pb, &c, 1);
- if(strncasecmp(json_null_str, tok->pb->buf,
- json_min(tok->st_pos+1, (int)strlen(json_null_str))) == 0) {
- if(tok->st_pos == (int)strlen(json_null_str)) {
- current = NULL;
- saved_state = json_tokener_state_finish;
- state = json_tokener_state_eatws;
- goto redo_char;
+ case json_tokener_state_inf: /* aka starts with 'i' (or 'I', or "-i", or "-I") */
+ {
+ /* If we were guaranteed to have len set, then we could (usually) handle
+ * the entire "Infinity" check in a single strncmp (strncasecmp), but
+ * since len might be -1 (i.e. "read until \0"), we need to check it
+ * a character at a time.
+ * Trying to handle it both ways would make this code considerably more
+ * complicated with likely little performance benefit.
+ */
+ int is_negative = 0;
+ const char *_json_inf_str = json_inf_str;
+ if (!(tok->flags & JSON_TOKENER_STRICT))
+ _json_inf_str = json_inf_str_lower;
+
+ /* Note: tok->st_pos must be 0 when state is set to json_tokener_state_inf */
+ while (tok->st_pos < (int)json_inf_str_len)
+ {
+ char inf_char = *str;
+ if (!(tok->flags & JSON_TOKENER_STRICT))
+ inf_char = tolower((int)*str);
+ if (inf_char != _json_inf_str[tok->st_pos])
+ {
+ tok->err = json_tokener_error_parse_unexpected;
+ goto out;
+ }
+ tok->st_pos++;
+ (void)ADVANCE_CHAR(str, tok);
+ if (!PEEK_CHAR(c, tok))
+ {
+ /* out of input chars, for now at least */
+ goto out;
+ }
}
- } else {
- tok->err = json_tokener_error_parse_null;
- goto out;
+ /* We checked the full length of "Infinity", so create the object.
+ * When handling -Infinity, the number parsing code will have dropped
+ * the "-" into tok->pb for us, so check it now.
+ */
+ if (printbuf_length(tok->pb) > 0 && *(tok->pb->buf) == '-')
+ {
+ is_negative = 1;
+ }
+ current = json_object_new_double(is_negative
+ ? -INFINITY : INFINITY);
+ if (current == NULL)
+ goto out;
+ saved_state = json_tokener_state_finish;
+ state = json_tokener_state_eatws;
+ goto redo_char;
+
+ }
+ break;
+ case json_tokener_state_null: /* aka starts with 'n' */
+ {
+ int size;
+ int size_nan;
+ printbuf_memappend_fast(tok->pb, &c, 1);
+ size = json_min(tok->st_pos+1, json_null_str_len);
+ size_nan = json_min(tok->st_pos+1, json_nan_str_len);
+ if((!(tok->flags & JSON_TOKENER_STRICT) &&
+ strncasecmp(json_null_str, tok->pb->buf, size) == 0)
+ || (strncmp(json_null_str, tok->pb->buf, size) == 0)
+ ) {
+ if (tok->st_pos == json_null_str_len) {
+ current = NULL;
+ saved_state = json_tokener_state_finish;
+ state = json_tokener_state_eatws;
+ goto redo_char;
+ }
+ }
+ else if ((!(tok->flags & JSON_TOKENER_STRICT) &&
+ strncasecmp(json_nan_str, tok->pb->buf, size_nan) == 0) ||
+ (strncmp(json_nan_str, tok->pb->buf, size_nan) == 0)
+ )
+ {
+ if (tok->st_pos == json_nan_str_len)
+ {
+ current = json_object_new_double(NAN);
+ if (current == NULL)
+ goto out;
+ saved_state = json_tokener_state_finish;
+ state = json_tokener_state_eatws;
+ goto redo_char;
+ }
+ } else {
+ tok->err = json_tokener_error_parse_null;
+ goto out;
+ }
+ tok->st_pos++;
}
- tok->st_pos++;
break;
case json_tokener_state_comment_start:
@@ -400,8 +541,8 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
if(c == tok->quote_char) {
printbuf_memappend_fast(tok->pb, case_start, str-case_start);
current = json_object_new_string_len(tok->pb->buf, tok->pb->bpos);
- if( current == NULL )
- goto out;
+ if(current == NULL)
+ goto out;
saved_state = json_tokener_state_finish;
state = json_tokener_state_eatws;
break;
@@ -456,21 +597,21 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
/* Handle a 4-byte sequence, or two sequences if a surrogate pair */
while(1) {
- if(strchr(json_hex_chars, c)) {
- tok->ucs_char += ((unsigned int)hexdigit(c) << ((3-tok->st_pos++)*4));
+ if (c && strchr(json_hex_chars, c)) {
+ tok->ucs_char += ((unsigned int)jt_hexdigit(c) << ((3-tok->st_pos++)*4));
if(tok->st_pos == 4) {
unsigned char unescaped_utf[4];
if (got_hi_surrogate) {
if (IS_LOW_SURROGATE(tok->ucs_char)) {
- /* Recalculate the ucs_char, then fall through to process normally */
+ /* Recalculate the ucs_char, then fall thru to process normally */
tok->ucs_char = DECODE_SURROGATE_PAIR(got_hi_surrogate, tok->ucs_char);
} else {
/* Hi surrogate was not followed by a low surrogate */
/* Replace the hi and process the rest normally */
printbuf_memappend_fast(tok->pb, (char*)utf8_replacement_char, 3);
}
- /*got_hi_surrogate = 0;*/
+ got_hi_surrogate = 0;
}
if (tok->ucs_char < 0x80) {
@@ -487,8 +628,8 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
*/
got_hi_surrogate = tok->ucs_char;
/* Not at end, and the next two chars should be "\u" */
- if ((tok->char_offset+1 != len) &&
- (tok->char_offset+2 != len) &&
+ if ((len == -1 || len > (tok->char_offset + 2)) &&
+ // str[0] != '0' && // implied by json_hex_chars, above.
(str[1] == '\\') &&
(str[2] == 'u'))
{
@@ -497,13 +638,15 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
* characters.
*/
if( !ADVANCE_CHAR(str, tok) || !ADVANCE_CHAR(str, tok) ) {
- printbuf_memappend_fast(tok->pb, (char*)utf8_replacement_char, 3);
- }
+ printbuf_memappend_fast(tok->pb,
+ (char*) utf8_replacement_char, 3);
+ }
/* Advance to the first char of the next sequence and
* continue processing with the next sequence.
*/
if (!ADVANCE_CHAR(str, tok) || !PEEK_CHAR(c, tok)) {
- printbuf_memappend_fast(tok->pb, (char*)utf8_replacement_char, 3);
+ printbuf_memappend_fast(tok->pb,
+ (char*) utf8_replacement_char, 3);
goto out;
}
tok->ucs_char = 0;
@@ -514,7 +657,8 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
* it. Put a replacement char in for the hi surrogate
* and pretend we finished.
*/
- printbuf_memappend_fast(tok->pb, (char*)utf8_replacement_char, 3);
+ printbuf_memappend_fast(tok->pb,
+ (char*) utf8_replacement_char, 3);
}
} else if (IS_LOW_SURROGATE(tok->ucs_char)) {
/* Got a low surrogate not preceded by a high */
@@ -551,32 +695,40 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
break;
case json_tokener_state_boolean:
- printbuf_memappend_fast(tok->pb, &c, 1);
- if(strncasecmp(json_true_str, tok->pb->buf,
- json_min(tok->st_pos+1, (int)strlen(json_true_str))) == 0) {
- if(tok->st_pos == (int)strlen(json_true_str)) {
- current = json_object_new_boolean(1);
- if( current == NULL )
- goto out;
- saved_state = json_tokener_state_finish;
- state = json_tokener_state_eatws;
- goto redo_char;
- }
- } else if(strncasecmp(json_false_str, tok->pb->buf,
- json_min(tok->st_pos+1, (int)strlen(json_false_str))) == 0) {
- if(tok->st_pos == (int)strlen(json_false_str)) {
- current = json_object_new_boolean(0);
- if( current == NULL )
- goto out;
- saved_state = json_tokener_state_finish;
- state = json_tokener_state_eatws;
- goto redo_char;
+ {
+ int size1, size2;
+ printbuf_memappend_fast(tok->pb, &c, 1);
+ size1 = json_min(tok->st_pos+1, json_true_str_len);
+ size2 = json_min(tok->st_pos+1, json_false_str_len);
+ if((!(tok->flags & JSON_TOKENER_STRICT) &&
+ strncasecmp(json_true_str, tok->pb->buf, size1) == 0)
+ || (strncmp(json_true_str, tok->pb->buf, size1) == 0)
+ ) {
+ if(tok->st_pos == json_true_str_len) {
+ current = json_object_new_boolean(1);
+ if(current == NULL)
+ goto out;
+ saved_state = json_tokener_state_finish;
+ state = json_tokener_state_eatws;
+ goto redo_char;
+ }
+ } else if((!(tok->flags & JSON_TOKENER_STRICT) &&
+ strncasecmp(json_false_str, tok->pb->buf, size2) == 0)
+ || (strncmp(json_false_str, tok->pb->buf, size2) == 0)) {
+ if(tok->st_pos == json_false_str_len) {
+ current = json_object_new_boolean(0);
+ if(current == NULL)
+ goto out;
+ saved_state = json_tokener_state_finish;
+ state = json_tokener_state_eatws;
+ goto redo_char;
+ }
+ } else {
+ tok->err = json_tokener_error_parse_boolean;
+ goto out;
}
- } else {
- tok->err = json_tokener_error_parse_boolean;
- goto out;
+ tok->st_pos++;
}
- tok->st_pos++;
break;
case json_tokener_state_number:
@@ -584,10 +736,45 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
/* Advance until we change state */
const char *case_start = str;
int case_len=0;
+ int is_exponent=0;
+ int negativesign_next_possible_location=1;
while(c && strchr(json_number_chars, c)) {
++case_len;
- if(c == '.' || c == 'e' || c == 'E')
+
+ /* non-digit characters checks */
+ /* note: since the main loop condition to get here was
+ an input starting with 0-9 or '-', we are
+ protected from input starting with '.' or
+ e/E. */
+ if (c == '.') {
+ if (tok->is_double != 0) {
+ /* '.' can only be found once, and out of the exponent part.
+ Thus, if the input is already flagged as double, it
+ is invalid. */
+ tok->err = json_tokener_error_parse_number;
+ goto out;
+ }
+ tok->is_double = 1;
+ }
+ if (c == 'e' || c == 'E') {
+ if (is_exponent != 0) {
+ /* only one exponent possible */
+ tok->err = json_tokener_error_parse_number;
+ goto out;
+ }
+ is_exponent = 1;
tok->is_double = 1;
+ /* the exponent part can begin with a negative sign */
+ negativesign_next_possible_location = case_len + 1;
+ }
+ if (c == '-' && case_len != negativesign_next_possible_location) {
+ /* If the negative sign is not where expected (ie
+ start of input or start of exponent part), the
+ input is invalid. */
+ tok->err = json_tokener_error_parse_number;
+ goto out;
+ }
+
if (!ADVANCE_CHAR(str, tok) || !PEEK_CHAR(c, tok)) {
printbuf_memappend_fast(tok->pb, case_start, case_len);
goto out;
@@ -595,18 +782,35 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
}
if (case_len>0)
printbuf_memappend_fast(tok->pb, case_start, case_len);
+
+ // Check for -Infinity
+ if (tok->pb->buf[0] == '-' && case_len <= 1 &&
+ (c == 'i' || c == 'I'))
+ {
+ state = json_tokener_state_inf;
+ tok->st_pos = 0;
+ goto redo_char;
+ }
}
{
int64_t num64;
double numd;
if (!tok->is_double && json_parse_int64(tok->pb->buf, &num64) == 0) {
- current = json_object_new_int64(num64);
- if( current == NULL )
- goto out;
- } else if(tok->is_double && json_parse_double(tok->pb->buf, &numd) == 0) {
+ if (num64 && tok->pb->buf[0]=='0' &&
+ (tok->flags & JSON_TOKENER_STRICT)) {
+ /* in strict mode, number must not start with 0 */
+ tok->err = json_tokener_error_parse_number;
+ goto out;
+ }
+ current = json_object_new_int64(num64);
+ if(current == NULL)
+ goto out;
+ }
+ else if(tok->is_double && json_parse_double(tok->pb->buf, &numd) == 0)
+ {
current = json_object_new_double(numd);
- if( current == NULL )
- goto out;
+ if(current == NULL)
+ goto out;
} else {
tok->err = json_tokener_error_parse_number;
goto out;
@@ -620,12 +824,12 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
case json_tokener_state_array_after_sep:
case json_tokener_state_array:
if(c == ']') {
- if (state == json_tokener_state_array_after_sep &&
- (tok->flags & JSON_TOKENER_STRICT))
- {
- tok->err = json_tokener_error_parse_unexpected;
- goto out;
- }
+ if (state == json_tokener_state_array_after_sep &&
+ (tok->flags & JSON_TOKENER_STRICT))
+ {
+ tok->err = json_tokener_error_parse_unexpected;
+ goto out;
+ }
saved_state = json_tokener_state_finish;
state = json_tokener_state_eatws;
} else {
@@ -642,9 +846,7 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
case json_tokener_state_array_add:
if( json_object_array_add(current, obj) != 0 )
- {
- goto out;
- }
+ goto out;
saved_state = json_tokener_state_array_sep;
state = json_tokener_state_eatws;
goto redo_char;
@@ -737,6 +939,7 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
goto redo_char;
case json_tokener_state_object_sep:
+ /* { */
if(c == '}') {
saved_state = json_tokener_state_finish;
state = json_tokener_state_eatws;
@@ -752,18 +955,28 @@ struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
}
if (!ADVANCE_CHAR(str, tok))
goto out;
- } /* while(POP_CHAR) */
+ } /* while(PEEK_CHAR) */
out:
+ if (c &&
+ (state == json_tokener_state_finish) &&
+ (tok->depth == 0) &&
+ (tok->flags & JSON_TOKENER_STRICT)) {
+ /* unexpected char after JSON data */
+ tok->err = json_tokener_error_parse_unexpected;
+ }
if (!c) { /* We hit an eof char (0) */
if(state != json_tokener_state_finish &&
saved_state != json_tokener_state_finish)
tok->err = json_tokener_error_parse_eof;
}
-#ifdef HAVE_SETLOCALE
+#ifdef HAVE_USELOCALE
+ uselocale(oldlocale);
+ freelocale(newloc);
+#elif defined(HAVE_SETLOCALE)
setlocale(LC_NUMERIC, oldlocale);
- if (oldlocale) free(oldlocale);
+ free(oldlocale);
#endif
if (tok->err == json_tokener_success)
diff --git a/gdal/ogr/ogrsf_frmts/geojson/libjson/json_tokener.h b/gdal/ogr/ogrsf_frmts/geojson/libjson/json_tokener.h
index 247ae01f0616..c3862162b45b 100644
--- a/gdal/ogr/ogrsf_frmts/geojson/libjson/json_tokener.h
+++ b/gdal/ogr/ogrsf_frmts/geojson/libjson/json_tokener.h
@@ -33,7 +33,8 @@ enum json_tokener_error {
json_tokener_error_parse_object_key_sep,
json_tokener_error_parse_object_value_sep,
json_tokener_error_parse_string,
- json_tokener_error_parse_comment
+ json_tokener_error_parse_comment,
+ json_tokener_error_size
};
enum json_tokener_state {
@@ -60,7 +61,8 @@ enum json_tokener_state {
json_tokener_state_object_value_add,
json_tokener_state_object_sep,
json_tokener_state_array_after_sep,
- json_tokener_state_object_field_start_after_sep
+ json_tokener_state_object_field_start_after_sep,
+ json_tokener_state_inf
};
struct json_tokener_srec
@@ -105,14 +107,6 @@ struct json_tokener
*/
const char *json_tokener_error_desc(enum json_tokener_error jerr);
-/**
- * @b XXX do not use json_tokener_errors directly.
- * After v0.10 this will be removed.
- *
- * See json_tokener_error_desc() instead.
- */
-extern const char* json_tokener_errors[];
-
/**
* Retrieve the error caused by the last call to json_tokener_parse_ex(),
* or json_tokener_success if there is no error.
@@ -142,26 +136,27 @@ extern void json_tokener_set_flags(struct json_tokener *tok, int flags);
* it can also be a string, number or boolean value.
*
* A partial JSON string can be parsed. If the parsing is incomplete,
- * NULL will be returned and json_tokener_get_error() will be return
+ * NULL will be returned and json_tokener_get_error() will return
* json_tokener_continue.
* json_tokener_parse_ex() can then be called with additional bytes in str
* to continue the parsing.
*
- * If json_tokener_parse_ex() returns NULL and the error anything other than
+ * If json_tokener_parse_ex() returns NULL and the error is anything other than
* json_tokener_continue, a fatal error has occurred and parsing must be
- * halted. Then tok object must not be re-used until json_tokener_reset() is
+ * halted. Then, the tok object must not be reused until json_tokener_reset() is
* called.
*
* When a valid JSON value is parsed, a non-NULL json_object will be
- * returned. Also, json_tokener_get_error() will return json_tokener_success.
- * Be sure to check the type with json_object_is_type() or
- * json_object_get_type() before using the object.
+ * returned, with a reference count of one which belongs to the caller. Also,
+ * json_tokener_get_error() will return json_tokener_success. Be sure to check
+ * the type with json_object_is_type() or json_object_get_type() before using
+ * the object.
*
- * @b XXX this should not use internal fields:
+ * @b XXX this shouldn't use internal fields:
* Trailing characters after the parsed value do not automatically cause an
* error. It is up to the caller to decide whether to treat this as an
* error or to handle the additional characters, perhaps by parsing another
- * JSON value starting from that point.
+ * json value starting from that point.
*
* Extra characters can be detected by comparing the tok->char_offset against
* the length of the last len parameter passed in.
@@ -170,6 +165,11 @@ extern void json_tokener_set_flags(struct json_tokener *tok, int flags);
* responsible for calling json_tokener_parse_ex with an appropriate str
* parameter starting with the extra characters.
*
+ * This interface is presently not 64-bit clean due to the int len argument
+ * so the function limits the maximum string size to INT32_MAX (2GB).
+ * If the function is called with len == -1 then strlen is called to check
+ * the string length is less than INT32_MAX (2GB)
+ *
* Example:
* @code
json_object *jobj = NULL;
diff --git a/gdal/ogr/ogrsf_frmts/geojson/libjson/symbol_renames.h b/gdal/ogr/ogrsf_frmts/geojson/libjson/symbol_renames.h
index 28d209a08b30..64fb27acfc52 100644
--- a/gdal/ogr/ogrsf_frmts/geojson/libjson/symbol_renames.h
+++ b/gdal/ogr/ogrsf_frmts/geojson/libjson/symbol_renames.h
@@ -83,7 +83,6 @@
#define json_false_str gdal_json_false_str
#define json_null_str gdal_json_null_str
#define json_true_str gdal_json_true_str
-#define json_tokener_errors gdal_json_tokener_errors
#define json_object_iter_begin gdal_json_object_iter_begin
#define json_object_iter_end gdal_json_object_iter_end
#define json_object_iter_equal gdal_json_object_iter_equal
diff --git a/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsondriver.cpp b/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsondriver.cpp
index c7323ca1b113..a57d8f77f48e 100644
--- a/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsondriver.cpp
+++ b/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsondriver.cpp
@@ -662,6 +662,7 @@ void RegisterOGRGeoJSON()
" String"
" Integer"
" "
+" "
"");
poDriver->SetMetadataItem( GDAL_DCAP_VIRTUALIO, "YES" );
diff --git a/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonreader.cpp b/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonreader.cpp
index 1ff9234711b8..def16422f9fb 100644
--- a/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonreader.cpp
+++ b/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonreader.cpp
@@ -712,6 +712,21 @@ void OGRGeoJSONReaderStreamingParser::Number(const char* pszValue, size_t nLen)
{
AppendObject(json_object_new_double(CPLAtof(pszValue)));
}
+ else if( nLen == strlen("Infinity") && EQUAL(pszValue, "Infinity") )
+ {
+ AppendObject(json_object_new_double(
+ std::numeric_limits::infinity()));
+ }
+ else if( nLen == strlen("-Infinity") && EQUAL(pszValue, "-Infinity") )
+ {
+ AppendObject(json_object_new_double(
+ -std::numeric_limits::infinity()));
+ }
+ else if( nLen == strlen("NaN") && EQUAL(pszValue, "NaN") )
+ {
+ AppendObject(json_object_new_double(
+ std::numeric_limits::quiet_NaN()));
+ }
else
{
AppendObject(json_object_new_int64(CPLAtoGIntBig(pszValue)));
diff --git a/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwritelayer.cpp b/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwritelayer.cpp
index 8dcd788e8e1c..d758f83fbdd0 100644
--- a/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwritelayer.cpp
+++ b/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwritelayer.cpp
@@ -72,6 +72,8 @@ OGRGeoJSONWriteLayer::OGRGeoJSONWriteLayer( const char* pszName,
oWriteOptions_.SetRFC7946Settings();
}
oWriteOptions_.SetIDOptions(papszOptions);
+ oWriteOptions_.bAllowNonFiniteValues = CPLTestBool(
+ CSLFetchNameValueDef(papszOptions, "WRITE_NON_FINITE_VALUES", "FALSE"));
}
/************************************************************************/
diff --git a/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwriter.cpp b/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwriter.cpp
index 89e3d12f0bb8..156400805640 100644
--- a/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwriter.cpp
+++ b/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwriter.cpp
@@ -844,8 +844,23 @@ json_object* OGRGeoJSONWriteAttributes( OGRFeature* poFeature,
}
else if( OFTReal == eType )
{
+ const double val = poFeature->GetFieldAsDouble(nField);
+ if( !CPLIsFinite(val) )
+ {
+ if( !oOptions.bAllowNonFiniteValues )
+ {
+ static bool bHasWarned = false;
+ if( !bHasWarned )
+ {
+ bHasWarned = true;
+ CPLError(CE_Warning, CPLE_AppDefined,
+ "NaN of Infinity value found. Skipped");
+ }
+ continue;
+ }
+ }
poObjProp = json_object_new_double_with_significant_figures(
- poFeature->GetFieldAsDouble(nField),
+ val,
oOptions.nSignificantFigures );
}
else if( OFTString == eType )
diff --git a/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwriter.h b/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwriter.h
index 1a42d4e43b30..c899a7b7aa13 100644
--- a/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwriter.h
+++ b/gdal/ogr/ogrsf_frmts/geojson/ogrgeojsonwriter.h
@@ -81,6 +81,7 @@ class OGRGeoJSONWriteOptions
CPLString osIDField{};
bool bForceIDFieldType = false;
OGRFieldType eForcedIDFieldType = OFTString;
+ bool bAllowNonFiniteValues = false;
void SetRFC7946Settings();
void SetIDOptions(CSLConstList papszOptions);
diff --git a/gdal/port/cpl_json_streaming_parser.cpp b/gdal/port/cpl_json_streaming_parser.cpp
index e0a511447c6f..f2cc7356a67f 100644
--- a/gdal/port/cpl_json_streaming_parser.cpp
+++ b/gdal/port/cpl_json_streaming_parser.cpp
@@ -172,7 +172,8 @@ bool CPLJSonStreamingParser::EmitUnexpectedChar(char ch,
static bool IsValidNewToken(char ch)
{
return ch == '[' || ch == '{' || ch == '"' || ch == '-' ||
- ch == '.' || isdigit(ch) || ch == 't' || ch == 'f' || ch == 'n';
+ ch == '.' || isdigit(ch) || ch == 't' || ch == 'f' || ch == 'n' ||
+ ch == 'i' || ch == 'I' || ch == 'N';
}
/************************************************************************/
@@ -209,7 +210,8 @@ bool CPLJSonStreamingParser::StartNewToken(const char*& pStr, size_t& nLength)
m_aState.push_back(ARRAY);
AdvanceChar(pStr, nLength);
}
- else if( ch == '-' || ch == '.' || isdigit(ch) )
+ else if( ch == '-' || ch == '.' || isdigit(ch) ||
+ ch == 'i' || ch == 'I' || ch == 'N' )
{
m_aState.push_back(NUMBER);
}
@@ -223,7 +225,7 @@ bool CPLJSonStreamingParser::StartNewToken(const char*& pStr, size_t& nLength)
}
else if( ch == 'n' )
{
- m_aState.push_back(STATE_NULL);
+ m_aState.push_back(STATE_NULL); /* might be nan */
}
else
{
@@ -475,25 +477,68 @@ bool CPLJSonStreamingParser::Parse(const char* pStr, size_t nLength,
}
else
{
- return EmitUnexpectedChar(ch);
+ CPLString extendedToken(m_osToken + ch);
+ if( (STARTS_WITH_CI("Infinity", extendedToken) &&
+ m_osToken.size() + 1 <= strlen("Infinity")) ||
+ (STARTS_WITH_CI("-Infinity", extendedToken) &&
+ m_osToken.size() + 1 <= strlen("-Infinity")) ||
+ (STARTS_WITH_CI("NaN", extendedToken) &&
+ m_osToken.size() + 1 <= strlen("NaN")) )
+ {
+ m_osToken += ch;
+ }
+ else
+ {
+ return EmitUnexpectedChar(ch);
+ }
}
AdvanceChar(pStr, nLength);
}
+
+ if( nLength != 0 || bFinished )
+ {
+ const char firstCh = m_osToken[0];
+ if( firstCh == 'i' || firstCh == 'I' )
+ {
+ if( !EQUAL(m_osToken.c_str(), "Infinity") )
+ {
+ return EmitException("Invalid number");
+ }
+ }
+ else if( firstCh == '-' )
+ {
+ if( m_osToken[1] == 'i' || m_osToken[1] == 'I' )
+ {
+ if( !EQUAL(m_osToken.c_str(), "-Infinity") )
+ {
+ return EmitException("Invalid number");
+ }
+ }
+ }
+ else if( firstCh == 'n' || firstCh == 'N' )
+ {
+ if( m_osToken[1] == 'a' || m_osToken[1] == 'A' )
+ {
+ if( !EQUAL(m_osToken.c_str(), "NaN") )
+ {
+ return EmitException("Invalid number");
+ }
+ }
+ }
+
+ Number(m_osToken.c_str(), m_osToken.size());
+ m_osToken.clear();
+ m_aState.pop_back();
+ }
+
if( nLength == 0 )
{
if( bFinished )
{
- Number(m_osToken.c_str(), m_osToken.size());
- m_osToken.clear();
- m_aState.pop_back();
return CheckStackEmpty();
}
return true;
}
-
- Number(m_osToken.c_str(), m_osToken.size());
- m_osToken.clear();
- m_aState.pop_back();
}
else if( eCurState == STRING )
{
@@ -790,6 +835,12 @@ bool CPLJSonStreamingParser::Parse(const char* pStr, size_t nLength,
while(nLength)
{
char ch = *pStr;
+ if( eCurState == STATE_NULL && (ch == 'a' || ch == 'A') &&
+ m_osToken.size() == 1 )
+ {
+ m_aState.back() = NUMBER;
+ break;
+ }
if( isalpha(ch) )
{
m_osToken += ch;
@@ -826,6 +877,10 @@ bool CPLJSonStreamingParser::Parse(const char* pStr, size_t nLength,
}
AdvanceChar(pStr, nLength);
}
+ if( m_aState.back() == NUMBER )
+ {
+ continue;
+ }
if( nLength == 0 )
{
if( bFinished )
diff --git a/gdal/scripts/detect_suspicious_char_digit_zero.sh b/gdal/scripts/detect_suspicious_char_digit_zero.sh
index 10aba0dd6d2e..7fd9b96ce4bf 100755
--- a/gdal/scripts/detect_suspicious_char_digit_zero.sh
+++ b/gdal/scripts/detect_suspicious_char_digit_zero.sh
@@ -5,7 +5,7 @@ ret_code=0
echo "Checking for suspicious comparisons to '0'..."
# Detect comparisons where we'd likely want to check against nul terminating byte in the condition of a for/while loop
-if grep -r --include="*.c*" "!= '0'" alg gnm port ogr gcore frmts apps ; then
+if grep -r --include="*.c*" "!= '0'" alg gnm port ogr gcore frmts apps | grep -v libjson ; then
ret_code=1
fi
if grep -r --include="*.c*" "!='0'" alg gnm port ogr gcore frmts apps | grep -v libjson ; then
From 1f9a66b4f5f617ca082094fcc6e35439eaa611f0 Mon Sep 17 00:00:00 2001
From: Alan Thomas
Date: Sun, 25 Nov 2018 17:16:25 +1100
Subject: [PATCH 075/488] DXF: Correctly handle non-uniformly-weighted spline
HATCH boundaries (fixes #1011)
Also fix the processing of hatches which have a spline segment as the last
segment of the boundary. Previously the processor would fail to detect
the end of these hatches, and parts of the entity after the HATCH would
be eaten.
---
autotest/ogr/data/additional-entities.dxf | 158 ++++++++++++++++++++++
autotest/ogr/ogr_dxf.py | 9 +-
gdal/ogr/ogrsf_frmts/dxf/ogrdxf_hatch.cpp | 28 ++--
3 files changed, 183 insertions(+), 12 deletions(-)
diff --git a/autotest/ogr/data/additional-entities.dxf b/autotest/ogr/data/additional-entities.dxf
index dfbfe4d158cc..52acc01dc762 100755
--- a/autotest/ogr/data/additional-entities.dxf
+++ b/autotest/ogr/data/additional-entities.dxf
@@ -961,6 +961,164 @@ SOLID
421
16776960
470
+LINEAR
+ 0
+HATCH
+ 5
+EF077
+330
+EEEEE
+100
+AcDbEntity
+ 8
+0
+ 6
+Continuous
+ 62
+ 4
+100
+AcDbHatch
+ 10
+0.0
+ 20
+0.0
+ 30
+0.0
+210
+0.0
+220
+0.0
+230
+1.0
+ 2
+_SOLID
+ 70
+ 1
+ 71
+ 0
+ 91
+ 1
+ 92
+ 1
+ 93
+ 1
+ 72
+ 4
+ 94
+ 3
+ 73
+ 1
+ 74
+ 0
+ 95
+ 11
+ 96
+ 7
+ 40
+0.0
+ 40
+0.0
+ 40
+0.0
+ 40
+0.0
+ 40
+20.94600890510247
+ 40
+48.82577770828017
+ 40
+65.41802406807341
+ 40
+82.5482354761489
+ 40
+82.5482354761489
+ 40
+82.5482354761489
+ 40
+82.5482354761489
+ 10
+77.24099480934673
+ 20
+172.430072843974
+ 42
+1.210771479263352
+ 10
+66.10730220891221
+ 20
+168.4798309518848
+ 42
+1.0
+ 10
+52.05758392371472
+ 20
+148.2061680972096
+ 42
+1.0
+ 10
+94.42286298422361
+ 20
+145.1350796615255
+ 42
+0.65
+ 10
+94.11793457416115
+ 20
+174.100096163338
+ 42
+2.0
+ 10
+83.82412757509678
+ 20
+174.7657729360923
+ 42
+1.383146083653713
+ 10
+77.24099480934673
+ 20
+172.430072843974
+ 42
+1.210771479263352
+ 97
+ 0
+ 97
+ 0
+ 75
+ 1
+ 76
+ 1
+ 98
+ 1
+ 10
+0.0
+ 20
+0.0
+450
+ 0
+451
+ 0
+460
+0.0
+461
+0.0
+452
+ 0
+462
+0.0
+453
+ 2
+463
+0.0
+ 63
+ 5
+421
+ 255
+463
+1.0
+ 63
+ 2
+421
+ 16776960
+470
LINEAR
0
MLINE
diff --git a/autotest/ogr/ogr_dxf.py b/autotest/ogr/ogr_dxf.py
index 182c96080c69..853a05b8f80b 100644
--- a/autotest/ogr/ogr_dxf.py
+++ b/autotest/ogr/ogr_dxf.py
@@ -3777,7 +3777,14 @@ def ogr_dxf_52():
# HATCH with a spline boundary path (and OCS as well, just for fun)
f = lyr.GetNextFeature()
if ogrtest.check_feature_geometry(f, 'POLYGON Z ((47.6969600708475 60.0 15,47.6969600708475 0.0 15,46.1103652823066 -0.466958240734954 14.5010390223444,44.5309994192688 -0.919910449553494 14.0043514365868,42.9660914072371 -1.34485059453921 13.5122106346236,41.4228701717145 -1.72777264377568 13.0268900083519,39.9085646382042 -2.0546705653465 12.5506629496691,38.4304037322091 -2.31153832733525 12.0858028504722,36.9956163792324 -2.48436989782552 11.6345831026584,35.6114315047771 -2.55915924490089 11.1992770981251,34.2850780343463 -2.52190033664495 10.7821582287693,33.0237848934429 -2.3585871411413 10.3854998864882,31.8347810075701 -2.0552136264735 10.011575463179,30.725295302231 -1.59777376072516 9.66265835073903,29.7025567029285 -0.972261511979859 9.34102194106535,28.7737941351658 -0.164670848321179 9.04893962605519,27.9445456607789 0.835923776643351 8.78815304558283,27.2086691364137 2.01916842728349 8.55673058492536,26.5550905172208 3.36572402537053 8.35118961809371,25.9727183005027 4.85621968724478 8.1680420288596,25.450460983562 6.47128452924656 8.00379970099481,24.9772270637013 8.19154766771616 7.85497451827107,24.5419250382231 9.99763821899391 7.71807836446012,24.1334634044299 11.8701852994201 7.58962312333373,23.7407506596245 13.7898180253351 7.46612067866363,23.3526953011092 15.7371655130791 7.34408291422158,22.9582058261868 17.6928568789925 7.22002171377933,22.5461907321598 19.6375212394157 7.09044896110861,22.1055585163308 21.5517877106888 6.95187653998118,21.6252176760022 23.4162854091522 6.80081633416879,21.0940767084768 25.2116434511463 6.63378022744318,20.501044111057 26.9184909530113 6.44728010357611,19.8350283810455 28.5174570310876 6.23782784633932,19.0849380157448 29.9891708017154 6.00193533950455,18.2425220975445 31.3190096857923 5.73700778952582,17.3111586046656 32.5117898949509 5.44410752140743,16.2972009340528 33.5773013324584 5.12523258605305,15.2070024839932 34.5253339038266 4.7823810347885,14.046916652774 35.3656775145671 4.41755091893963,12.8232968386826 36.1081220701913 4.0327402898323,11.5424964400062 36.7624574762111 3.62994719879235,10.2108688550319 37.338473638138 3.21116969714562,8.834767482047 37.8459604614835 2.77840583621797,7.42054571933875 38.2947078517594 2.33365366733523,5.97455696519436 38.6945057144772 1.87891124182326,4.50315461790106 39.0551439551486 1.41617661100791,3.01269207574607 39.3864124792851 0.947447826215011,1.50952273701662 39.6981011923983 0.474722938770421,0 40 0,-0.0 40.0 -2.12999999999989e-15,47.6969600708475 60.0 15))') != 0:
- gdaltest.post_reason('Wrong geometry on HATCH')
+ gdaltest.post_reason('Wrong geometry on HATCH 1')
+ f.DumpReadable()
+ return 'fail'
+
+ # Another HATCH with a spline boundary path
+ f = lyr.GetNextFeature()
+ if ogrtest.check_feature_geometry(f, 'POLYGON ((77.2409948093467 172.430072843974,75.261065049518 171.643815908613,73.2981627172517 170.696359298076,71.3875263981659 169.604375316261,69.5631545040892 168.388782317518,67.8563613292361 167.073645833116,66.2946346031063 165.684923719356,64.9008927779373 164.249191641798,63.6931834376534 162.792474868402,62.6848095284188 161.339284018754,61.8848281190297 159.911914493846,61.298843085132 158.530030785262,60.9300095654357 157.21052494952,60.7801817312349 155.967617443343,60.8511622391671 154.813159397702,61.1453506194465 153.756315627339,61.664528568996 152.803726831033,62.4105189258065 151.964301284269,63.3843801494824 151.249807149788,64.5850174245011 150.674379748575,66.0076885629501 150.253830592488,67.6424701411086 150.004704104407,69.4728079995063 149.943055679116,71.4743351228068 150.082969908572,73.6141841756299 150.434899728198,75.8510314766708 151.003978292885,78.1360638882735 151.788519596984,80.41494916904 152.778958906121,82.6307223520159 153.957467932202,84.7273090430607 155.298400731333,86.6532441052495 156.769592674546,88.3650659486779 158.334376895018,89.8299059955281 159.954044613131,91.0272387483612 161.589690168686,91.957368361883 163.189390065386,92.6421956980388 164.701128484532,93.1110835351921 166.094608264522,93.3942229550992 167.356341648662,93.5189493216415 168.483994478151,93.5079031853763 169.481912548033,93.3782987206629 170.357905428604,93.1417233193846 171.121075154443,92.8040593119231 171.78040251468,92.3652285565357 172.343822183981,91.8186752047856 172.817679470437,91.1571081487304 173.20986823606,90.3766844995721 173.528608818345,89.4716123962251 173.777329886712,88.4346297478537 173.955199520934,87.2580461173156 174.057607166253,85.9350751682361 174.076420167619,84.4616595153474 174.000186427787,82.8389797200317 173.814465060529,81.0767853391384 173.502507153586,79.1975328516125 173.04655812613,77.2409948093467 172.430072843974))') != 0:
+ gdaltest.post_reason('Wrong geometry on HATCH 2')
f.DumpReadable()
return 'fail'
diff --git a/gdal/ogr/ogrsf_frmts/dxf/ogrdxf_hatch.cpp b/gdal/ogr/ogrsf_frmts/dxf/ogrdxf_hatch.cpp
index 6d25431185c5..ba53e4e3813e 100644
--- a/gdal/ogr/ogrsf_frmts/dxf/ogrdxf_hatch.cpp
+++ b/gdal/ogr/ogrsf_frmts/dxf/ogrdxf_hatch.cpp
@@ -476,6 +476,7 @@ OGRErr OGRDXFLayer::CollectBoundaryPath( OGRGeometryCollection *poGC,
}
std::vector adfControlPoints( 1, 0.0 );
+ std::vector adfWeights( 1, 0.0 );
if( nCode != 10 )
break;
@@ -485,25 +486,30 @@ OGRErr OGRDXFLayer::CollectBoundaryPath( OGRGeometryCollection *poGC,
adfControlPoints.push_back( CPLAtof(szLineBuf) );
if( (nCode = poDS->ReadValue(szLineBuf, sizeof(szLineBuf))) == 20 )
+ {
adfControlPoints.push_back( CPLAtof(szLineBuf) );
+ }
else
break;
adfControlPoints.push_back( 0.0 ); // Z coordinate
- nCode = poDS->ReadValue(szLineBuf, sizeof(szLineBuf));
- }
-
- std::vector adfWeights( 1, 0.0 );
- // 42 (weights) are optional
- while( nCode == 42 )
- {
- adfWeights.push_back( CPLAtof(szLineBuf) );
- nCode = poDS->ReadValue(szLineBuf, sizeof(szLineBuf));
+ // 42 (weights) are optional
+ if( (nCode = poDS->ReadValue(szLineBuf, sizeof(szLineBuf))) == 42 )
+ {
+ adfWeights.push_back( CPLAtof(szLineBuf) );
+ nCode = poDS->ReadValue(szLineBuf, sizeof(szLineBuf));
+ }
}
- // Eat the rest of this section, if present
- while( nCode > 0 && nCode != 72 )
+ // Skip past the number of fit points
+ if( nCode != 97 )
+ break;
+
+ // Eat the rest of this section, if present, until the next
+ // boundary segment (72) or the conclusion of the boundary data (97)
+ nCode = poDS->ReadValue(szLineBuf, sizeof(szLineBuf));
+ while( nCode > 0 && nCode != 72 && nCode != 97 )
nCode = poDS->ReadValue(szLineBuf, sizeof(szLineBuf));
if( nCode > 0 )
poDS->UnreadValue();
From 0f65047e480bf148a10fbe20dd4c2baacc97e5de Mon Sep 17 00:00:00 2001
From: Markus Metz <33666869+metzm@users.noreply.github.com>
Date: Sun, 25 Nov 2018 21:44:07 +0100
Subject: [PATCH 076/488] NetCDF: fix west > east if west should be < east
(#1114)
---
autotest/gdrivers/data/nc_lonwrap.nc | Bin 0 -> 776 bytes
autotest/gdrivers/netcdf.py | 30 +++++++++++++++++++++++++++
gdal/frmts/netcdf/netcdfdataset.cpp | 27 ++++++++++++++++++++++++
3 files changed, 57 insertions(+)
create mode 100644 autotest/gdrivers/data/nc_lonwrap.nc
diff --git a/autotest/gdrivers/data/nc_lonwrap.nc b/autotest/gdrivers/data/nc_lonwrap.nc
new file mode 100644
index 0000000000000000000000000000000000000000..38c31115dd4d2ef248546ee6bffc92c141fb7e53
GIT binary patch
literal 776
zcmaJY*nrz64J(;5vm+uL9?ROCugg
zkM*Sj1qdOhdAh1YZ+z80V1i{ToG5kU=k5Ra=_?^^bK5b
zLp2gg%TUFLp{?L{HtSnEsFDgW_LrR@oe}%S7-MIO_mdfav2;lA$q$v(rizWJdcu!{
zh?(G~f1DNaIdAaJ?#Ik(Z3}D7`e*CZ(mE=P^c5rmN^8BNe881gH?YY+J6jRpGV917
zxZuIi4rlsFoJ#Cm#nM;8`-{)%$av1}zh~_j=1}|@nfoMr!Cm9-@HTVt3+^}Plk`x)
z9F3>J%jf;$VKe-gt_-Fd*?gA(0O1~-{nyE^do$Ss`M>{%z2kkp+=nq~JQLC!6Vh4*
UJ_5cbq`SR-de%Pfo={4E07uV>wg3PC
literal 0
HcmV?d00001
diff --git a/autotest/gdrivers/netcdf.py b/autotest/gdrivers/netcdf.py
index ce1d313be866..347f3bce23af 100755
--- a/autotest/gdrivers/netcdf.py
+++ b/autotest/gdrivers/netcdf.py
@@ -1451,6 +1451,35 @@ def netcdf_36():
return 'success'
+
+###############################################################################
+# test for correct geotransform with longitude wrap
+
+
+def netcdf_36_lonwrap():
+
+ if gdaltest.netcdf_drv is None:
+ return 'skip'
+
+ ifile = 'data/nc_lonwrap.nc'
+
+ ds = gdal.Open(ifile)
+ if ds is None:
+ gdaltest.post_reason('open failed')
+ return 'fail'
+
+ gt = ds.GetGeoTransform()
+ if gt is None:
+ gdaltest.post_reason('got no GeoTransform')
+ return 'fail'
+ gt_expected = (-2.25, 2.5, 0.0, 16.25, 0.0, -2.5)
+ if gt != gt_expected:
+ gdaltest.post_reason('got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)))
+ return 'fail'
+
+ return 'success'
+
+
###############################################################################
# test for reading gaussian grid (bugs #4513 and #5118)
@@ -3365,6 +3394,7 @@ def netcdf_postcheck():
netcdf_34,
netcdf_35,
netcdf_36,
+ netcdf_36_lonwrap,
netcdf_37,
netcdf_38,
netcdf_39,
diff --git a/gdal/frmts/netcdf/netcdfdataset.cpp b/gdal/frmts/netcdf/netcdfdataset.cpp
index 581d68bb34ba..ac6955e8b3b9 100644
--- a/gdal/frmts/netcdf/netcdfdataset.cpp
+++ b/gdal/frmts/netcdf/netcdfdataset.cpp
@@ -3122,6 +3122,33 @@ void netCDFDataset::SetProjectionFromVar( int nVarId, bool bReadSRSOnly )
}
else
{
+ bool nWestIsLeft = (pdfXCoord[0] < pdfXCoord[xdim - 1]);
+
+ // fix longitudes if longitudes should increase from
+ // west to east, but west > east
+ if (!nWestIsLeft)
+ {
+ size_t ndecreases = 0;
+
+ // there is lon wrap if longitudes increase
+ // with one single decrease
+ for( size_t i = 1; i < xdim; i++ )
+ {
+ if (pdfXCoord[i] < pdfXCoord[i - 1])
+ ndecreases++;
+ }
+
+ if (ndecreases == 1)
+ {
+ CPLDebug("GDAL_netCDF", "longitude wrap detected");
+ for( size_t i = 0; i < xdim; i++ )
+ {
+ if (pdfXCoord[i] > pdfXCoord[xdim - 1])
+ pdfXCoord[i] -= 360;
+ }
+ }
+ }
+
nSpacingBegin = static_cast(
poDS->rint((pdfXCoord[1] - pdfXCoord[0]) * 1000));
From bbdacf7309ca3abd4cc833701ebd3019108e8456 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Mon, 26 Nov 2018 11:37:03 +0100
Subject: [PATCH 077/488] WMTS: fix issue with inappropriate zoom level being
selected causing integer overflow in raster dimension computation
---
autotest/gdrivers/wmts.py | 278 ++++++++++++++++++++++++++++++++
gdal/frmts/wmts/wmtsdataset.cpp | 84 +++++-----
2 files changed, 316 insertions(+), 46 deletions(-)
diff --git a/autotest/gdrivers/wmts.py b/autotest/gdrivers/wmts.py
index b3ba03cbf061..c874230600e3 100755
--- a/autotest/gdrivers/wmts.py
+++ b/autotest/gdrivers/wmts.py
@@ -1811,6 +1811,283 @@ def wmts_invalid_global_to_tm_reprojection():
#
+def wmts_check_no_overflow_zoom_level():
+
+ if gdaltest.wmts_drv is None:
+ return 'skip'
+
+ inputXml = '/vsimem/wmts_check_no_overflow_zoom_level.xml'
+ gdal.FileFromMemBuffer(inputXml, """
+
+
+
+ foo
+
+
+ -179.99999000000003 -85.00000000000003
+ 179.99999000000003 85.0
+
+ foo
+
+ image/png
+
+ default
+
+
+
+
+ default
+ urn:ogc:def:crs:EPSG::3857
+
+ 0
+ 5.590822640285016E8
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 1
+ 1
+
+
+ 1
+ 2.7954113201425034E8
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 2
+ 2
+
+
+ 2
+ 1.3977056600712562E8
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 4
+ 4
+
+
+ 3
+ 6.988528300356235E7
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 8
+ 8
+
+
+ 4
+ 3.494264150178117E7
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 16
+ 16
+
+
+ 5
+ 1.7471320750890587E7
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 32
+ 32
+
+
+ 6
+ 8735660.375445293
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 64
+ 64
+
+
+ 7
+ 4367830.187722647
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 128
+ 128
+
+
+ 8
+ 2183915.0938617955
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 256
+ 256
+
+
+ 9
+ 1091957.5469304253
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 512
+ 512
+
+
+ 10
+ 545978.7734656851
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 1024
+ 1023
+
+
+ 11
+ 272989.38673237007
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 2048
+ 2045
+
+
+ 12
+ 136494.69336618503
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 4096
+ 4090
+
+
+ 13
+ 68247.34668309252
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 8192
+ 8179
+
+
+ 14
+ 34123.67334154626
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 16384
+ 16358
+
+
+ 15
+ 17061.836671245605
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 32768
+ 32715
+
+
+ 16
+ 8530.918335622802
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 65536
+ 65429
+
+
+ 17
+ 4265.459167338929
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 131072
+ 130858
+
+
+ 18
+ 2132.729584141936
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 262144
+ 261715
+
+
+ 19
+ 1066.3647915984968
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 524288
+ 523430
+
+
+ 20
+ 533.1823957992484
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 1048576
+ 1046859
+
+
+ 21
+ 266.5911978996242
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 2097152
+ 2093718
+
+
+ 22
+ 133.2955989498121
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 4194304
+ 4187435
+
+
+ 23
+ 66.64779947490605
+ -2.0037508342787E7 2.0037508342787E7
+ 256
+ 256
+ 8388608
+ 8374869
+
+
+
+""")
+
+ ds = gdal.Open(inputXml)
+ if ds.RasterXSize != 1073741766 or ds.RasterYSize != 1070224430:
+ gdaltest.post_reason('fail')
+ print(ds.RasterXSize, ds.RasterYSize)
+ return 'fail'
+ count_levels = 1 + ds.GetRasterBand(1).GetOverviewCount()
+ if count_levels != 23: # there are 24 in total, but we discard the one labelled 23
+ gdaltest.post_reason('fail')
+ print(count_levels)
+ return 'fail'
+ ds = None
+
+ gdal.Unlink(inputXml)
+
+ return 'success'
+
+
+
+###############################################################################
+#
+
+
def wmts_CleanCache():
hexstr = '012346789abcdef'
for i in range(len(hexstr)):
@@ -1877,6 +2154,7 @@ def wmts_cleanup():
wmts_23_rgb,
wmts_23_rgba,
wmts_invalid_global_to_tm_reprojection,
+ wmts_check_no_overflow_zoom_level,
wmts_cleanup]
if __name__ == '__main__':
diff --git a/gdal/frmts/wmts/wmtsdataset.cpp b/gdal/frmts/wmts/wmtsdataset.cpp
index 18b9186cfc55..803c121c59c3 100644
--- a/gdal/frmts/wmts/wmtsdataset.cpp
+++ b/gdal/frmts/wmts/wmtsdataset.cpp
@@ -1824,49 +1824,6 @@ GDALDataset* WMTSDataset::Open(GDALOpenInfo* poOpenInfo)
}
}
- // Establish raster dimension and extent
- int nMaxZoomLevel = (int)oTMS.aoTM.size()-1;
- while(nMaxZoomLevel >= 0)
- {
- const WMTSTileMatrix& oTM = oTMS.aoTM[nMaxZoomLevel];
- double dfRasterXSize = (sAOI.MaxX - sAOI.MinX) / oTM.dfPixelSize;
- double dfRasterYSize = (sAOI.MaxY - sAOI.MinY) / oTM.dfPixelSize;
- if( dfRasterXSize < INT_MAX && dfRasterYSize < INT_MAX )
- {
- if( nMaxZoomLevel != (int)oTMS.aoTM.size()-1 )
- {
- CPLDebug("WMTS", "Using zoom level %s instead of %s to avoid int overflow",
- oTMS.aoTM[nMaxZoomLevel].osIdentifier.c_str(),
- oTMS.aoTM.back().osIdentifier.c_str());
- }
-
- // Align AOI on pixel boundaries with respect to TopLeftCorner of
- // this tile matrix
- poDS->adfGT[0] = oTM.dfTLX + floor((sAOI.MinX - oTM.dfTLX) / oTM.dfPixelSize+1e-10) * oTM.dfPixelSize;
- poDS->adfGT[1] = oTM.dfPixelSize;
- poDS->adfGT[2] = 0.0;
- poDS->adfGT[3] = oTM.dfTLY + ceil((sAOI.MaxY - oTM.dfTLY) / oTM.dfPixelSize-1e-10) * oTM.dfPixelSize;
- poDS->adfGT[4] = 0.0;
- poDS->adfGT[5] = -oTM.dfPixelSize;
- poDS->nRasterXSize = int(0.5 + (sAOI.MaxX - poDS->adfGT[0]) / oTM.dfPixelSize);
- poDS->nRasterYSize = int(0.5 + (poDS->adfGT[3] - sAOI.MinY) / oTM.dfPixelSize);
- break;
- }
- nMaxZoomLevel --;
- }
- if( nMaxZoomLevel < 0 )
- {
- CPLError(CE_Failure, CPLE_AppDefined,
- "No zoom level in tile matrix set found");
- CPLDestroyXMLNode(psXML);
- delete poDS;
- return nullptr;
- }
- CPLDebug("WMTS", "Using tilematrix=%s (zoom level %d)",
- oTMS.aoTM[nMaxZoomLevel].osIdentifier.c_str(), nMaxZoomLevel);
- oTMS.aoTM.resize(1 + nMaxZoomLevel);
- poDS->oTMS = oTMS;
-
if( !osProjection.empty() )
{
OGRSpatialReference oSRS;
@@ -1986,9 +1943,30 @@ GDALDataset* WMTSDataset::Open(GDALOpenInfo* poOpenInfo)
poDS->osURLFeatureInfoTemplate = osURLFeatureInfoTemplate;
// Build all TMS datasets, wrapped in VRT datasets
- for(int i=nMaxZoomLevel;i>=0;i--)
+ for(int i=static_cast(oTMS.aoTM.size()-1);i>=0;i--)
{
const WMTSTileMatrix& oTM = oTMS.aoTM[i];
+ double dfRasterXSize = (sAOI.MaxX - sAOI.MinX) / oTM.dfPixelSize;
+ double dfRasterYSize = (sAOI.MaxY - sAOI.MinY) / oTM.dfPixelSize;
+ if( dfRasterXSize > INT_MAX || dfRasterYSize > INT_MAX )
+ {
+ continue;
+ }
+
+ if( poDS->apoDatasets.empty() )
+ {
+ // Align AOI on pixel boundaries with respect to TopLeftCorner of
+ // this tile matrix
+ poDS->adfGT[0] = oTM.dfTLX + floor((sAOI.MinX - oTM.dfTLX) / oTM.dfPixelSize+1e-10) * oTM.dfPixelSize;
+ poDS->adfGT[1] = oTM.dfPixelSize;
+ poDS->adfGT[2] = 0.0;
+ poDS->adfGT[3] = oTM.dfTLY + ceil((sAOI.MaxY - oTM.dfTLY) / oTM.dfPixelSize-1e-10) * oTM.dfPixelSize;
+ poDS->adfGT[4] = 0.0;
+ poDS->adfGT[5] = -oTM.dfPixelSize;
+ poDS->nRasterXSize = int(0.5 + (sAOI.MaxX - poDS->adfGT[0]) / oTM.dfPixelSize);
+ poDS->nRasterYSize = int(0.5 + (poDS->adfGT[3] - sAOI.MinY) / oTM.dfPixelSize);
+ }
+
int nRasterXSize = int(0.5 + poDS->nRasterXSize / oTM.dfPixelSize * poDS->adfGT[1]);
int nRasterYSize = int(0.5 + poDS->nRasterYSize / oTM.dfPixelSize * poDS->adfGT[1]);
if( !poDS->apoDatasets.empty() &&
@@ -2014,8 +1992,22 @@ GDALDataset* WMTSDataset::Open(GDALOpenInfo* poOpenInfo)
dfLRX = dfULX + ceil((dfLRX - dfULX) / dfTileWidthUnits - 1e-10) * dfTileWidthUnits;
dfLRY = dfULY + floor((dfLRY - dfULY) / dfTileHeightUnits + 1e-10) * dfTileHeightUnits;
- int nSizeX = int(0.5+(dfLRX - dfULX) / oTM.dfPixelSize);
- int nSizeY = int(0.5+(dfULY - dfLRY) / oTM.dfPixelSize);
+ double dfSizeX = 0.5+(dfLRX - dfULX) / oTM.dfPixelSize;
+ double dfSizeY = 0.5+(dfULY - dfLRY) / oTM.dfPixelSize;
+ if( dfSizeX > INT_MAX || dfSizeY > INT_MAX )
+ {
+ continue;
+ }
+ if( poDS->apoDatasets.empty() )
+ {
+ CPLDebug("WMTS", "Using tilematrix=%s (zoom level %d)",
+ oTMS.aoTM[i].osIdentifier.c_str(), i);
+ oTMS.aoTM.resize(1 + i);
+ poDS->oTMS = oTMS;
+ }
+
+ int nSizeX = static_cast(dfSizeX);
+ int nSizeY = static_cast(dfSizeY);
double dfDateLineX = oTM.dfTLX + oTM.nMatrixWidth * dfTileWidthUnits;
int nSizeX1 = int(0.5+(dfDateLineX - dfULX) / oTM.dfPixelSize);
From f56afe87c7c487dd85eff46eec1d16a229693466 Mon Sep 17 00:00:00 2001
From: Even Rouault
Date: Mon, 26 Nov 2018 12:36:59 +0100
Subject: [PATCH 078/488] WMTS: fix potential off-by-one pixel when compositing
the underlying WMS/TMS source into the final raster
---
gdal/frmts/wmts/wmtsdataset.cpp | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/gdal/frmts/wmts/wmtsdataset.cpp b/gdal/frmts/wmts/wmtsdataset.cpp
index 803c121c59c3..642187d20441 100644
--- a/gdal/frmts/wmts/wmtsdataset.cpp
+++ b/gdal/frmts/wmts/wmtsdataset.cpp
@@ -35,6 +35,7 @@
#include "../vrt/gdal_vrt.h"
#include
+#include
#include