aboutsummaryrefslogtreecommitdiff
path: root/src/main.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/main.rs')
-rw-r--r--src/main.rs89
1 files changed, 5 insertions, 84 deletions
diff --git a/src/main.rs b/src/main.rs
index 8d561b4..bb80311 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -1,20 +1,15 @@
#![feature(array_chunks)]
pub mod cache;
+pub mod client;
pub mod mesh;
-use anyhow::{Result, bail};
+use anyhow::Result;
use cache::Cache;
use clap::Parser;
+use client::GeClient;
use futures_util::{StreamExt, stream::FuturesUnordered};
use glam::{DMat4, Vec3};
-use log::{debug, error, info};
use mesh::{convert_mesh, decode_normal_table};
-use prost::{Message, bytes::Bytes};
-use proto::{BulkMetadata, NodeData, NodeMetadata, PlanetoidMetadata};
-use reqwest::{
- Client,
- header::{HeaderMap, HeaderName, HeaderValue},
-};
use std::{f32::consts::PI, path::PathBuf, pin::Pin, sync::Arc};
use tokio::sync::Semaphore;
use weareshared::{
@@ -35,6 +30,7 @@ struct Args {
#[clap(subcommand)]
action: Action,
}
+
#[derive(Parser)]
enum Action {
Cache { level: usize },
@@ -184,83 +180,8 @@ fn cache_all(
})
}
-struct GeClient {
- client: Client,
- cache: Cache,
- par_limit: Semaphore,
-}
-
-impl GeClient {
- pub async fn new(par_limit: usize, cache: Cache) -> Result<Self> {
- Ok(Self {
- par_limit: Semaphore::new(par_limit),cache,
- client: Client::builder().default_headers(HeaderMap::from_iter([
- (HeaderName::from_static("user-agent"), HeaderValue::from_static("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36")),
- (HeaderName::from_static("referer"), HeaderValue::from_static("https://earth.google.com/"))
- ])).build().unwrap()
- })
- }
- pub async fn download(&self, path: &str) -> Result<Bytes> {
- let _permit = self.par_limit.acquire().await?;
- if let Some(d) = self.cache.get(path)? {
- debug!("cached {path:?}");
- Ok(d.into())
- } else {
- info!("download {path:?}");
- let res = self
- .client
- .get(format!("https://kh.google.com/rt/earth/{path}"))
- .send()
- .await?;
- if !res.status().is_success() {
- error!("error response: {:?}", res.text().await?);
- bail!("error response")
- }
- let buf = res.bytes().await?;
- self.cache.insert(path, &buf)?;
- Ok(buf)
- }
- }
-
- pub async fn planetoid_metdata(&self) -> Result<PlanetoidMetadata> {
- let buf = self.download("PlanetoidMetadata").await?;
- Ok(PlanetoidMetadata::decode(buf)?)
- }
- pub async fn bulk_metadata(&self, path: &str, epoch: u32) -> Result<BulkMetadata> {
- let buf = self
- .download(&format!("BulkMetadata/pb=!1m2!1s{path}!2u{epoch}"))
- .await?;
- Ok(BulkMetadata::decode(buf)?)
- }
- pub async fn node_data(
- &self,
- abspath: &str,
- flags: Flags,
- bulk: &BulkMetadata,
- node: &NodeMetadata,
- ) -> Result<NodeData> {
- let texture_format = bulk.default_available_texture_formats();
- let imagery_epoch = node.imagery_epoch.unwrap_or(bulk.default_imagery_epoch());
- let node_epoch = node
- .epoch
- .unwrap_or(bulk.head_node_key.as_ref().unwrap().epoch.unwrap()); // ?
-
- let image_epoch_part = if flags.use_image_epoch {
- format!("!3u{imagery_epoch}")
- } else {
- String::new()
- };
- let url = format!(
- "NodeData/pb=!1m2!1s{abspath}!2u{node_epoch}!2e{texture_format}{image_epoch_part}!4b0"
- );
-
- let buf = self.download(&url).await?;
- Ok(NodeData::decode(buf)?)
- }
-}
-
#[derive(Debug, Clone, Copy)]
-struct Flags {
+pub struct Flags {
has_node: bool,
has_metadata: bool,
use_image_epoch: bool,