QGIS API Documentation 3.41.0-Master (d2aaa9c6e02)
Loading...
Searching...
No Matches
qgscopcupdate.cpp
Go to the documentation of this file.
1/***************************************************************************
2 qgscopcupdate.cpp
3 ---------------------
4 begin : January 2025
5 copyright : (C) 2025 by Martin Dobias
6 email : wonder dot sk at gmail dot com
7 ***************************************************************************
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License as published by *
11 * the Free Software Foundation; either version 2 of the License, or *
12 * (at your option) any later version. *
13 * *
14 ***************************************************************************/
15
16#include "qgscopcupdate.h"
17
18#include "qgslazdecoder.h"
19
20#include <fstream>
21#include <iostream>
22
23#include <lazperf/header.hpp>
24#include <lazperf/vlr.hpp>
25#include <lazperf/Extractor.hpp>
26#include <lazperf/filestream.hpp>
27#include <lazperf/readers.hpp>
28#include <lazperf/writers.hpp>
29
30
33{
36
42 uint64_t offset;
43
49 int32_t byteSize;
50
56 int32_t pointCount;
57};
58
59typedef QVector<HierarchyEntry> HierarchyEntries;
60
61
62HierarchyEntries getHierarchyPage( std::ifstream &file, uint64_t offset, uint64_t size )
63{
65 std::vector<char> buf( 32 );
66 int numEntries = static_cast<int>( size / 32 );
67 file.seekg( static_cast<int64_t>( offset ) );
68 while ( numEntries-- )
69 {
70 file.read( buf.data(), static_cast<long>( buf.size() ) );
71 lazperf::LeExtractor s( buf.data(), buf.size() );
72
74 int d, x, y, z;
75 s >> d >> x >> y >> z;
76 s >> e.offset >> e.byteSize >> e.pointCount;
77 e.key = QgsPointCloudNodeId( d, x, y, z );
78
79 page.push_back( e );
80 }
81 return page;
82}
83
84
85bool QgsCopcUpdate::write( const QString &outputFilename, const QHash<QgsPointCloudNodeId, UpdatedChunk> &updatedChunks )
86{
87 std::ofstream m_f;
88 m_f.open( QgsLazDecoder::toNativePath( outputFilename ), std::ios::out | std::ios::binary );
89
90 // write header and all VLRs all the way to point offset
91 // (then we patch what we need)
92 mFile.seekg( 0 );
93 std::vector<char> allHeaderData;
94 allHeaderData.resize( mHeader.point_offset );
95 mFile.read( allHeaderData.data(), static_cast<long>( allHeaderData.size() ) );
96 m_f.write( allHeaderData.data(), static_cast<long>( allHeaderData.size() ) );
97
98 m_f.write( "XXXXXXXX", 8 ); // placeholder for chunk table offset
99
100 uint64_t currentChunkOffset = mHeader.point_offset + 8;
101 mFile.seekg( static_cast<long>( currentChunkOffset ) ); // this is where first chunk starts
102
103 // now, let's write chunks:
104 // - iterate through original chunk table, write out chunks
105 // - if chunk is updated, use that instead
106 // - keep updating hierarchy as we go
107 // - keep updating chunk table as we go
108
109 QHash<QgsPointCloudNodeId, uint64_t> voxelToNewOffset;
110
111 int chIndex = 0;
112 for ( lazperf::chunk ch : mChunks )
113 {
114 Q_ASSERT( mOffsetToVoxel.contains( currentChunkOffset ) );
115 QgsPointCloudNodeId n = mOffsetToVoxel[currentChunkOffset];
116
117 uint64_t newOffset = m_f.tellp();
118 voxelToNewOffset[n] = newOffset;
119
120 // check whether the chunk is modified
121 if ( updatedChunks.contains( n ) )
122 {
123 const UpdatedChunk &updatedChunk = updatedChunks[n];
124
125 // use updated one and skip in the original file
126 mFile.seekg( static_cast<long>( mFile.tellg() ) + static_cast<long>( ch.offset ) );
127
128 m_f.write( updatedChunk.chunkData.constData(), updatedChunk.chunkData.size() );
129
130 // update sizes
131 mChunks[chIndex].offset = updatedChunk.chunkData.size();
132 }
133 else
134 {
135 // use as is
136 std::vector<char> originalChunkData;
137 originalChunkData.resize( ch.offset );
138 mFile.read( originalChunkData.data(), static_cast<long>( originalChunkData.size() ) );
139 m_f.write( originalChunkData.data(), static_cast<long>( originalChunkData.size() ) );
140 }
141
142 currentChunkOffset += ch.offset;
143 ++chIndex;
144 }
145
146 // write chunk table: size in bytes + point count of each chunk
147
148 const uint64_t newChunkTableOffset = m_f.tellp();
149
150 m_f.write( "\0\0\0\0", 4 ); // chunk table version
151 m_f.write( reinterpret_cast<const char *>( &mChunkCount ), sizeof( mChunkCount ) );
152
153 lazperf::OutFileStream outStream( m_f );
154 lazperf::compress_chunk_table( outStream.cb(), mChunks, true );
155
156 // update hierarchy
157
158 // NOTE: one big assumption we're doing here is that existing hierarchy pages
159 // are packed one after another, with no gaps. if that's not the case, things
160 // will break apart
161
162 const long hierPositionShift = static_cast<long>( m_f.tellp() ) + 60 - static_cast<long>( mHierarchyOffset );
163
164 HierarchyEntry *oldCopcHierarchyBlobEntries = reinterpret_cast<HierarchyEntry *>( mHierarchyBlob.data() );
165 const int nEntries = static_cast<int>( mHierarchyBlob.size() / 32 );
166 for ( int i = 0; i < nEntries; ++i )
167 {
168 HierarchyEntry &e = oldCopcHierarchyBlobEntries[i];
169 if ( e.pointCount > 0 )
170 {
171 // update entry to new offset
172 Q_ASSERT( voxelToNewOffset.contains( e.key ) );
173 e.offset = voxelToNewOffset[e.key];
174
175 if ( updatedChunks.contains( e.key ) )
176 {
177 uint64_t newByteSize = updatedChunks[e.key].chunkData.size();
178 e.byteSize = static_cast<int>( newByteSize );
179 }
180 }
181 else if ( e.pointCount < 0 )
182 {
183 // move hierarchy pages to new offset
184 e.offset += hierPositionShift;
185 }
186 else // pointCount == 0
187 {
188 // nothing to do - byte size and offset should be zero
189 }
190
191 }
192
193 // write hierarchy eVLR
194
195 const uint64_t newEvlrOffset = m_f.tellp();
196
197 lazperf::evlr_header outCopcHierEvlr;
198 outCopcHierEvlr.reserved = 0;
199 outCopcHierEvlr.user_id = "copc";
200 outCopcHierEvlr.record_id = 1000;
201 outCopcHierEvlr.data_length = mHierarchyBlob.size();
202 outCopcHierEvlr.description = "EPT Hierarchy";
203
204 outCopcHierEvlr.write( m_f );
205 m_f.write( mHierarchyBlob.data(), static_cast<long>( mHierarchyBlob.size() ) );
206
207 // write other eVLRs
208
209 for ( size_t i = 0; i < mEvlrHeaders.size(); ++i )
210 {
211 lazperf::evlr_header evlrHeader = mEvlrHeaders[i];
212 std::vector<char> evlrBody = mEvlrData[i];
213
214 evlrHeader.write( m_f );
215 m_f.write( evlrBody.data(), static_cast<long>( evlrBody.size() ) );
216 }
217
218 // patch header
219
220 m_f.seekp( 235 );
221 m_f.write( reinterpret_cast<const char *>( &newEvlrOffset ), 8 );
222
223 const uint64_t newRootHierOffset = mCopcVlr.root_hier_offset + hierPositionShift;
224 m_f.seekp( 469 );
225 m_f.write( reinterpret_cast<const char *>( &newRootHierOffset ), 8 );
226
227 m_f.seekp( mHeader.point_offset );
228 m_f.write( reinterpret_cast<const char *>( &newChunkTableOffset ), 8 );
229
230 return true;
231}
232
233
234
235bool QgsCopcUpdate::read( const QString &inputFilename )
236{
237 mInputFilename = inputFilename;
238
239 mFile.open( QgsLazDecoder::toNativePath( inputFilename ), std::ios::binary | std::ios::in );
240 if ( mFile.fail() )
241 {
242 mErrorMessage = QStringLiteral( "Could not open file for reading: %1" ).arg( inputFilename );
243 return false;
244 }
245
246 if ( !readHeader() )
247 return false;
248
249 readChunkTable();
250 readHierarchy();
251
252 return true;
253}
254
255
256bool QgsCopcUpdate::readHeader()
257{
258 // read header and COPC VLR
259 mHeader = lazperf::header14::create( mFile );
260 if ( !mFile )
261 {
262 mErrorMessage = QStringLiteral( "Error reading COPC header" );
263 return false;
264 }
265
266 lazperf::vlr_header vh = lazperf::vlr_header::create( mFile );
267 mCopcVlr = lazperf::copc_info_vlr::create( mFile );
268
269 int baseCount = lazperf::baseCount( mHeader.point_format_id );
270 if ( baseCount == 0 )
271 {
272 mErrorMessage = QStringLiteral( "Bad point record format: %1" ).arg( mHeader.point_format_id );
273 return false;
274 }
275
276 return true;
277}
278
279
280void QgsCopcUpdate::readChunkTable()
281{
282 uint64_t chunkTableOffset;
283
284 mFile.seekg( mHeader.point_offset );
285 mFile.read( reinterpret_cast<char *>( &chunkTableOffset ), sizeof( chunkTableOffset ) );
286 mFile.seekg( static_cast<long>( chunkTableOffset ) + 4 ); // The first 4 bytes are the version, then the chunk count.
287 mFile.read( reinterpret_cast<char *>( &mChunkCount ), sizeof( mChunkCount ) );
288
289 //
290 // read chunk table
291 //
292
293 bool variable = true;
294
295 // TODO: not sure why, but after decompress_chunk_table() the input stream seems to be dead, so we create a temporary one
296 std::ifstream copcFileTmp;
297 copcFileTmp.open( QgsLazDecoder::toNativePath( mInputFilename ), std::ios::binary | std::ios::in );
298 copcFileTmp.seekg( mFile.tellg() );
299 lazperf::InFileStream copcInFileStream( copcFileTmp );
300
301 mChunks = lazperf::decompress_chunk_table( copcInFileStream.cb(), mChunkCount, variable );
302 std::vector<lazperf::chunk> chunksWithAbsoluteOffsets;
303 uint64_t nextChunkOffset = mHeader.point_offset + 8;
304 for ( lazperf::chunk ch : mChunks )
305 {
306 chunksWithAbsoluteOffsets.push_back( {nextChunkOffset, ch.count} );
307 nextChunkOffset += ch.offset;
308 }
309}
310
311
312void QgsCopcUpdate::readHierarchy()
313{
314 // get all hierarchy pages
315
316 HierarchyEntries childEntriesToProcess;
317 childEntriesToProcess.push_back( HierarchyEntry
318 {
319 QgsPointCloudNodeId( 0, 0, 0, 0 ),
320 mCopcVlr.root_hier_offset,
321 static_cast<int32_t>( mCopcVlr.root_hier_size ),
322 -1 } );
323
324 while ( !childEntriesToProcess.empty() )
325 {
326 HierarchyEntry childEntry = childEntriesToProcess.back();
327 childEntriesToProcess.pop_back();
328
329 HierarchyEntries page = getHierarchyPage( mFile, childEntry.offset, childEntry.byteSize );
330
331 for ( const HierarchyEntry &e : page )
332 {
333 if ( e.pointCount > 0 ) // it's a non-empty node
334 {
335 Q_ASSERT( !mOffsetToVoxel.contains( e.offset ) );
336 mOffsetToVoxel[e.offset] = e.key;
337 }
338 else if ( e.pointCount < 0 ) // referring to a child page
339 {
340 childEntriesToProcess.push_back( e );
341 }
342 }
343 }
344
345 lazperf::evlr_header evlr1;
346 mFile.seekg( static_cast<long>( mHeader.evlr_offset ) );
347
348 mHierarchyOffset = 0; // where the hierarchy eVLR payload starts
349
350 for ( uint32_t i = 0; i < mHeader.evlr_count; ++i )
351 {
352 evlr1.read( mFile );
353 if ( evlr1.user_id == "copc" && evlr1.record_id == 1000 )
354 {
355 mHierarchyBlob.resize( evlr1.data_length );
356 mHierarchyOffset = mFile.tellg();
357 mFile.read( mHierarchyBlob.data(), static_cast<long>( evlr1.data_length ) );
358 }
359 else
360 {
361 // keep for later
362 mEvlrHeaders.push_back( evlr1 );
363 std::vector<char> evlrBlob;
364 evlrBlob.resize( evlr1.data_length );
365 mFile.read( evlrBlob.data(), static_cast<long>( evlrBlob.size() ) );
366 mEvlrData.push_back( evlrBlob );
367 }
368 }
369
370 Q_ASSERT( !mHierarchyBlob.empty() );
371}
372
373
374bool QgsCopcUpdate::writeUpdatedFile( const QString &inputFilename,
375 const QString &outputFilename,
376 const QHash<QgsPointCloudNodeId, UpdatedChunk> &updatedChunks,
377 QString *errorMessage )
378{
379 QgsCopcUpdate copcUpdate;
380 if ( !copcUpdate.read( inputFilename ) )
381 {
382 if ( errorMessage )
383 *errorMessage = copcUpdate.errorMessage();
384 return false;
385 }
386
387 if ( !copcUpdate.write( outputFilename, updatedChunks ) )
388 {
389 if ( errorMessage )
390 *errorMessage = copcUpdate.errorMessage();
391 return false;
392 }
393
394 return true;
395}
This class takes an existing COPC file and a list of chunks that should be modified,...
QString errorMessage() const
Returns error message.
static bool writeUpdatedFile(const QString &inputFilename, const QString &outputFilename, const QHash< QgsPointCloudNodeId, UpdatedChunk > &updatedChunks, QString *errorMessage=nullptr)
Convenience function to do the whole process in one go: load a COPC file, then write a new COPC file ...
bool read(const QString &inputFilename)
Reads input COPC file and initializes all the members.
bool write(const QString &outputFilename, const QHash< QgsPointCloudNodeId, UpdatedChunk > &updatedChunks)
Writes a COPC file with updated chunks.
Represents a indexed point cloud node's position in octree.
QVector< HierarchyEntry > HierarchyEntries
HierarchyEntries getHierarchyPage(std::ifstream &file, uint64_t offset, uint64_t size)
Keeps one entry of COPC hierarchy.
QgsPointCloudNodeId key
Key of the data to which this entry corresponds.
uint64_t offset
Absolute offset to the data chunk if the pointCount > 0.
int32_t pointCount
If > 0, represents the number of points in the data chunk.
int32_t byteSize
Size of the data chunk in bytes (compressed size) if the pointCount > 0.
Keeps information how points of a single chunk has been modified.
QByteArray chunkData
Data of the chunk (compressed already with LAZ compressor)