aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/cache.rs2
-rw-r--r--src/client.rs93
-rw-r--r--src/main.rs89
3 files changed, 99 insertions, 85 deletions
diff --git a/src/cache.rs b/src/cache.rs
index 114ab09..c0e160f 100644
--- a/src/cache.rs
+++ b/src/cache.rs
@@ -22,7 +22,7 @@ impl Cache {
Ok(Self::Directory(cachedir))
}
pub fn new_db(path: &Path) -> Result<Self> {
- let db = Database::open(path)?;
+ let db = Database::create(path)?;
{
let txn = db.begin_write()?;
txn.open_table(T_DOWNLOAD)?;
diff --git a/src/client.rs b/src/client.rs
new file mode 100644
index 0000000..2699c26
--- /dev/null
+++ b/src/client.rs
@@ -0,0 +1,93 @@
+use anyhow::{Result, bail};
+use log::{debug, error, info};
+use prost::{Message, bytes::Bytes};
+use reqwest::{
+ Client,
+ header::{HeaderMap, HeaderName, HeaderValue},
+};
+use std::sync::atomic::{AtomicUsize, Ordering};
+use tokio::sync::Semaphore;
+
+use crate::{
+ Flags,
+ cache::Cache,
+ proto::{BulkMetadata, NodeData, NodeMetadata, PlanetoidMetadata},
+};
+
+pub struct GeClient {
+ counter: AtomicUsize,
+ client: Client,
+ cache: Cache,
+ par_limit: Semaphore,
+}
+
+impl GeClient {
+ pub async fn new(par_limit: usize, cache: Cache) -> Result<Self> {
+ Ok(Self {
+ counter: AtomicUsize::new(0),
+ par_limit: Semaphore::new(par_limit),cache,
+ client: Client::builder().default_headers(HeaderMap::from_iter([
+ (HeaderName::from_static("user-agent"), HeaderValue::from_static("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36")),
+ (HeaderName::from_static("referer"), HeaderValue::from_static("https://earth.google.com/"))
+ ])).build().unwrap()
+ })
+ }
+ pub async fn download(&self, path: &str) -> Result<Bytes> {
+ let _permit = self.par_limit.acquire().await?;
+ if let Some(d) = self.cache.get(path)? {
+ debug!("cached {path:?}");
+ Ok(d.into())
+ } else {
+ let n = self.counter.fetch_add(1, Ordering::Relaxed);
+ info!("download #{n} {path:?}");
+ let res = self
+ .client
+ .get(format!("https://kh.google.com/rt/earth/{path}"))
+ .send()
+ .await?;
+ if !res.status().is_success() {
+ error!("error response: {:?}", res.text().await?);
+ bail!("error response")
+ }
+ let buf = res.bytes().await?;
+ self.cache.insert(path, &buf)?;
+ Ok(buf)
+ }
+ }
+
+ pub async fn planetoid_metdata(&self) -> Result<PlanetoidMetadata> {
+ let buf = self.download("PlanetoidMetadata").await?;
+ Ok(PlanetoidMetadata::decode(buf)?)
+ }
+ pub async fn bulk_metadata(&self, path: &str, epoch: u32) -> Result<BulkMetadata> {
+ let buf = self
+ .download(&format!("BulkMetadata/pb=!1m2!1s{path}!2u{epoch}"))
+ .await?;
+ Ok(BulkMetadata::decode(buf)?)
+ }
+ pub async fn node_data(
+ &self,
+ abspath: &str,
+ flags: Flags,
+ bulk: &BulkMetadata,
+ node: &NodeMetadata,
+ ) -> Result<NodeData> {
+ let texture_format = bulk.default_available_texture_formats();
+ let imagery_epoch = node.imagery_epoch.unwrap_or(bulk.default_imagery_epoch());
+ let node_epoch = node
+ .epoch
+ .unwrap_or(bulk.head_node_key.as_ref().unwrap().epoch.unwrap()); // ?
+
+ let image_epoch_part = if flags.use_image_epoch {
+ format!("!3u{imagery_epoch}")
+ } else {
+ String::new()
+ };
+ let url = format!(
+ "NodeData/pb=!1m2!1s{abspath}!2u{node_epoch}!2e{texture_format}{image_epoch_part}!4b0"
+ );
+
+ let buf = self.download(&url).await?;
+ Ok(NodeData::decode(buf)?)
+ }
+}
diff --git a/src/main.rs b/src/main.rs
index 8d561b4..bb80311 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -1,20 +1,15 @@
#![feature(array_chunks)]
pub mod cache;
+pub mod client;
pub mod mesh;
-use anyhow::{Result, bail};
+use anyhow::Result;
use cache::Cache;
use clap::Parser;
+use client::GeClient;
use futures_util::{StreamExt, stream::FuturesUnordered};
use glam::{DMat4, Vec3};
-use log::{debug, error, info};
use mesh::{convert_mesh, decode_normal_table};
-use prost::{Message, bytes::Bytes};
-use proto::{BulkMetadata, NodeData, NodeMetadata, PlanetoidMetadata};
-use reqwest::{
- Client,
- header::{HeaderMap, HeaderName, HeaderValue},
-};
use std::{f32::consts::PI, path::PathBuf, pin::Pin, sync::Arc};
use tokio::sync::Semaphore;
use weareshared::{
@@ -35,6 +30,7 @@ struct Args {
#[clap(subcommand)]
action: Action,
}
+
#[derive(Parser)]
enum Action {
Cache { level: usize },
@@ -184,83 +180,8 @@ fn cache_all(
})
}
-struct GeClient {
- client: Client,
- cache: Cache,
- par_limit: Semaphore,
-}
-
-impl GeClient {
- pub async fn new(par_limit: usize, cache: Cache) -> Result<Self> {
- Ok(Self {
- par_limit: Semaphore::new(par_limit),cache,
- client: Client::builder().default_headers(HeaderMap::from_iter([
- (HeaderName::from_static("user-agent"), HeaderValue::from_static("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36")),
- (HeaderName::from_static("referer"), HeaderValue::from_static("https://earth.google.com/"))
- ])).build().unwrap()
- })
- }
- pub async fn download(&self, path: &str) -> Result<Bytes> {
- let _permit = self.par_limit.acquire().await?;
- if let Some(d) = self.cache.get(path)? {
- debug!("cached {path:?}");
- Ok(d.into())
- } else {
- info!("download {path:?}");
- let res = self
- .client
- .get(format!("https://kh.google.com/rt/earth/{path}"))
- .send()
- .await?;
- if !res.status().is_success() {
- error!("error response: {:?}", res.text().await?);
- bail!("error response")
- }
- let buf = res.bytes().await?;
- self.cache.insert(path, &buf)?;
- Ok(buf)
- }
- }
-
- pub async fn planetoid_metdata(&self) -> Result<PlanetoidMetadata> {
- let buf = self.download("PlanetoidMetadata").await?;
- Ok(PlanetoidMetadata::decode(buf)?)
- }
- pub async fn bulk_metadata(&self, path: &str, epoch: u32) -> Result<BulkMetadata> {
- let buf = self
- .download(&format!("BulkMetadata/pb=!1m2!1s{path}!2u{epoch}"))
- .await?;
- Ok(BulkMetadata::decode(buf)?)
- }
- pub async fn node_data(
- &self,
- abspath: &str,
- flags: Flags,
- bulk: &BulkMetadata,
- node: &NodeMetadata,
- ) -> Result<NodeData> {
- let texture_format = bulk.default_available_texture_formats();
- let imagery_epoch = node.imagery_epoch.unwrap_or(bulk.default_imagery_epoch());
- let node_epoch = node
- .epoch
- .unwrap_or(bulk.head_node_key.as_ref().unwrap().epoch.unwrap()); // ?
-
- let image_epoch_part = if flags.use_image_epoch {
- format!("!3u{imagery_epoch}")
- } else {
- String::new()
- };
- let url = format!(
- "NodeData/pb=!1m2!1s{abspath}!2u{node_epoch}!2e{texture_format}{image_epoch_part}!4b0"
- );
-
- let buf = self.download(&url).await?;
- Ok(NodeData::decode(buf)?)
- }
-}
-
#[derive(Debug, Clone, Copy)]
-struct Flags {
+pub struct Flags {
has_node: bool,
has_metadata: bool,
use_image_epoch: bool,