diff --git a/src/app/mod.rs b/src/app/mod.rs
index ffeccfa..257a056 100644
--- a/src/app/mod.rs
+++ b/src/app/mod.rs
@@ -623,6 +623,9 @@ impl App {
         info!("App Init | Setting saved [Tab]...");
         app.tab = app.state.gupax.tab;
 
+        // Set saved prefer local node to runtime
+        app.p2pool_api.lock().unwrap().prefer_local_node = app.state.p2pool.prefer_local_node;
+
         // Set saved Hero mode to runtime.
         debug!("Setting runtime_mode & runtime_manual_amount");
         // apply hero if simple mode saved with checkbox true, will let default to auto otherwise
diff --git a/src/app/panels/bottom.rs b/src/app/panels/bottom.rs
index b7d5d40..d222652 100644
--- a/src/app/panels/bottom.rs
+++ b/src/app/panels/bottom.rs
@@ -254,6 +254,7 @@ impl crate::app::App {
                                 &self.state.p2pool,
                                 &self.state.gupax.absolute_p2pool_path,
                                 self.gather_backup_hosts(),
+                                false,
                             );
                         }
                         ProcessName::Xmrig => {
@@ -324,6 +325,7 @@ impl crate::app::App {
                                 &self.state.p2pool,
                                 &self.state.gupax.absolute_p2pool_path,
                                 self.gather_backup_hosts(),
+                                false,
                             ),
 
                             ProcessName::Xmrig => {
diff --git a/src/app/panels/middle/p2pool/mod.rs b/src/app/panels/middle/p2pool/mod.rs
index fcb922e..a92728c 100644
--- a/src/app/panels/middle/p2pool/mod.rs
+++ b/src/app/panels/middle/p2pool/mod.rs
@@ -43,8 +43,10 @@ impl P2pool {
     ) {
         //---------------------------------------------------------------------------------------------------- [Simple] Console
         // debug!("P2Pool Tab | Rendering [Console]");
+        let mut api_lock = api.lock().unwrap();
+        // let mut prefer_local_node = api.lock().unwrap().prefer_local_node;
         egui::ScrollArea::vertical().show(ui, |ui| {
-            let text = &api.lock().unwrap().output;
+            let text = &api_lock.output;
             ui.group(|ui| {
                 console(ui, text);
                 if !self.simple {
@@ -74,7 +76,7 @@ impl P2pool {
             );
 
             if self.simple {
-                self.simple(ui, ping);
+                self.simple(ui, ping, &mut api_lock);
             } else {
                 self.advanced(ui, node_vec);
             }
diff --git a/src/app/panels/middle/p2pool/simple.rs b/src/app/panels/middle/p2pool/simple.rs
index 109b296..f6526ec 100644
--- a/src/app/panels/middle/p2pool/simple.rs
+++ b/src/app/panels/middle/p2pool/simple.rs
@@ -35,11 +35,14 @@ use egui::vec2;
 use crate::constants::*;
 use egui::{Color32, ComboBox, RichText, Ui};
 use log::*;
+
+use super::p2pool::PubP2poolApi;
+
 impl P2pool {
-    pub(super) fn simple(&mut self, ui: &mut Ui, ping: &Arc<Mutex<Ping>>) {
+    pub(super) fn simple(&mut self, ui: &mut Ui, ping: &Arc<Mutex<Ping>>, api: &mut PubP2poolApi) {
         ui.vertical_centered(|ui|{
             ui.add_space(SPACE);
-            ui.checkbox(&mut self.local_node, "Use a local node").on_hover_text("If checked (recommended), p2pool will automatically use the local node.\nCheck the Node tab to start a local node.\nIf unchecked, p2pool will attempt to use a remote node.");
+            ui.checkbox(&mut self.local_node, "Start with a local node").on_hover_text("If checked (recommended), p2pool will start trying to use the local node.\nCheck the Node tab to start a local node.\nIf unchecked, p2pool will attempt to use a remote node.");
         });
         ui.add_space(SPACE * 2.0);
         // if checked, use only local node
@@ -99,7 +102,6 @@ impl P2pool {
                         ui.style_mut().override_text_style = Some(egui::TextStyle::Button);
                         ui.horizontal(|ui| {
                             ui.style_mut().wrap_mode = Some(TextWrapMode::Extend);
-                            // ui.columns_const(|[col1, col2, col3, col4, col5]| {
                             let width = ((ui.available_width() / 5.0)
                                 - (ui.spacing().item_spacing.x * (4.0 / 5.0)))
                                 .max(20.0);
@@ -195,33 +197,43 @@ impl P2pool {
                         ui.group(|ui| {
                             ui.horizontal(|ui| {
                                 let width =
-                                    (((ui.available_width() - ui.spacing().item_spacing.x) / 3.0)
+                                    (((ui.available_width() - ui.spacing().item_spacing.x) / 4.0)
                                         - SPACE * 1.5)
                                         .max(ui.text_style_height(&TextStyle::Button) * 7.0);
                                 let size = vec2(
                                     width,
                                     height_txt_before_button(ui, &TextStyle::Button) * 2.0,
                                 );
-                                // [Auto-node]
                                 ui.add_sized(
                                     size,
                                     Checkbox::new(&mut self.auto_select, "Auto-select"),
                                 )
-                                // ui.checkbox(&mut self.auto_select, "Auto-select")
                                 .on_hover_text(P2POOL_AUTO_SELECT);
                                 ui.separator();
-                                // [Auto-node]
                                 ui.add_sized(size, Checkbox::new(&mut self.auto_ping, "Auto-ping"))
-                                    // ui.checkbox(&mut self.auto_ping, "Auto-ping")
                                     .on_hover_text(P2POOL_AUTO_NODE);
                                 ui.separator();
-                                // [Backup host]
                                 ui.add_sized(
                                     size,
                                     Checkbox::new(&mut self.backup_host, "Backup host"),
                                 )
-                                // ui.checkbox(&mut self.backup_host, "Backup host")
                                 .on_hover_text(P2POOL_BACKUP_HOST_SIMPLE);
+                                ui.separator();
+                                // set preferred local node immediately if we are on simple mode.
+                                if ui
+                                    .add_sized(
+                                        size,
+                                        Checkbox::new(
+                                            &mut self.prefer_local_node,
+                                            "Auto-Switch to Local Node",
+                                        ),
+                                    )
+                                    .on_hover_text(P2POOL_AUTOSWITCH_LOCAL_NODE)
+                                    .clicked()
+                                {
+                                    api.prefer_local_node = self.prefer_local_node;
+                                    // api.lock().unwrap().prefer_local_node = self.prefer_local_node;
+                                }
                             })
                         });
                     });
diff --git a/src/disk/state.rs b/src/disk/state.rs
index 1fb7023..4b912b1 100644
--- a/src/disk/state.rs
+++ b/src/disk/state.rs
@@ -300,6 +300,7 @@ pub struct P2pool {
     pub rpc: String,
     pub zmq: String,
     pub selected_node: SelectedPoolNode,
+    pub prefer_local_node: bool,
 }
 
 // compatible for P2Pool and Xmrig/Proxy
@@ -618,6 +619,7 @@ impl Default for P2pool {
                 rpc: "18081".to_string(),
                 zmq_rig: "18083".to_string(),
             },
+            prefer_local_node: true,
         }
     }
 }
diff --git a/src/disk/tests.rs b/src/disk/tests.rs
index 5074ba8..87a3775 100644
--- a/src/disk/tests.rs
+++ b/src/disk/tests.rs
@@ -83,7 +83,8 @@ mod test {
 			ip = "192.168.1.123"
 			rpc = "18089"
 			zmq = "18083"
-            
+            prefer_local_node = true
+
             [p2pool.selected_node]
             index = 0
             name = "Local Monero Node"
diff --git a/src/helper/p2pool.rs b/src/helper/p2pool.rs
index 26d8f5f..9d10e9f 100644
--- a/src/helper/p2pool.rs
+++ b/src/helper/p2pool.rs
@@ -44,8 +44,10 @@ use crate::{
     macros::*,
     xmr::*,
 };
+use enclose::enc;
 use log::*;
 use serde::{Deserialize, Serialize};
+use std::mem;
 use std::path::Path;
 use std::{
     fmt::Write,
@@ -54,6 +56,7 @@ use std::{
     thread,
     time::*,
 };
+use tokio::time::sleep;
 impl Helper {
     #[cold]
     #[inline(never)]
@@ -192,6 +195,7 @@ impl Helper {
         state: &P2pool,
         path: &Path,
         backup_hosts: Option<Vec<PoolNode>>,
+        override_to_local_node: bool,
     ) {
         info!("P2Pool | Attempting to restart...");
         helper.lock().unwrap().p2pool.lock().unwrap().signal = ProcessSignal::Restart;
@@ -208,7 +212,7 @@ impl Helper {
             }
             // Ok, process is not alive, start the new one!
             info!("P2Pool | Old process seems dead, starting new one!");
-            Self::start_p2pool(&helper, &state, &path, backup_hosts);
+            Self::start_p2pool(&helper, &state, &path, backup_hosts, override_to_local_node);
         });
         info!("P2Pool | Restart ... OK");
     }
@@ -221,11 +225,17 @@ impl Helper {
         state: &P2pool,
         path: &Path,
         backup_hosts: Option<Vec<PoolNode>>,
+        override_to_local_node: bool,
     ) {
         helper.lock().unwrap().p2pool.lock().unwrap().state = ProcessState::Middle;
-
         let (args, api_path_local, api_path_network, api_path_pool, api_path_p2p) =
-            Self::build_p2pool_args_and_mutate_img(helper, state, path, backup_hosts);
+            Self::build_p2pool_args_and_mutate_img(
+                helper,
+                state,
+                path,
+                &backup_hosts,
+                override_to_local_node,
+            );
 
         // Print arguments & user settings to console
         crate::disk::print_dash(&format!(
@@ -239,6 +249,20 @@ impl Helper {
         let pub_api = Arc::clone(&helper.lock().unwrap().pub_api_p2pool);
         let gupax_p2pool_api = Arc::clone(&helper.lock().unwrap().gupax_p2pool_api);
         let path = path.to_path_buf();
+        // thread to check if the button for switching to local node if it is synced to restart p2pool.
+        // starting the thread even if the option is disabled allows to apply the change immediately in case it is enabled again without asking the user to restart p2pool.
+        // Start this thread only if we don't already override to local node
+        if !override_to_local_node {
+            thread::spawn(enc!((helper, state, path, backup_hosts) move || {
+                Self::watch_switch_p2pool_to_local_node(
+                    &helper,
+                    &state,
+                    &path,
+                    backup_hosts,
+                );
+            }));
+        }
+
         thread::spawn(move || {
             Self::spawn_p2pool_watchdog(
                 process,
@@ -274,7 +298,8 @@ impl Helper {
         helper: &Arc<Mutex<Self>>,
         state: &P2pool,
         path: &Path,
-        backup_hosts: Option<Vec<PoolNode>>,
+        backup_hosts: &Option<Vec<PoolNode>>,
+        override_to_local_node: bool,
     ) -> (Vec<String>, PathBuf, PathBuf, PathBuf, PathBuf) {
         let mut args = Vec::with_capacity(500);
         let path = path.to_path_buf();
@@ -282,7 +307,7 @@ impl Helper {
         api_path.pop();
 
         // [Simple]
-        if state.simple && !state.local_node {
+        if state.simple && (!state.local_node && !override_to_local_node) {
             // Build the p2pool argument
             let (ip, rpc, zmq) = RemoteNode::get_ip_rpc_zmq(&state.node); // Get: (IP, RPC, ZMQ)
             args.push("--wallet".to_string());
@@ -323,7 +348,7 @@ impl Helper {
                 out_peers: "10".to_string(),
                 in_peers: "10".to_string(),
             };
-        } else if state.simple && state.local_node {
+        } else if state.simple && (state.local_node || override_to_local_node) {
             // use the local node
             // Build the p2pool argument
             args.push("--wallet".to_string());
@@ -576,9 +601,8 @@ impl Helper {
         }
         let start = process.lock().unwrap().start;
 
-        // Reset stats before loop
-        *pub_api.lock().unwrap() = PubP2poolApi::new();
-        *gui_api.lock().unwrap() = PubP2poolApi::new();
+        // Reset stats before loop, except action parameters without a need for saving to state.
+        reset_data_p2pool(&pub_api, &gui_api);
 
         // 4. Loop as watchdog
         let mut first_loop = true;
@@ -611,6 +635,8 @@ impl Helper {
                 ) {
                     break;
                 }
+                // check that if prefer local node is true and local node is alived and p2pool was not started with local node
+
                 // Check vector of user input
                 check_user_input(&process, &mut stdin);
                 // Check if logs need resetting
@@ -713,6 +739,43 @@ impl Helper {
         // 5. If loop broke, we must be done here.
         info!("P2Pool Watchdog | Watchdog thread exiting... Goodbye!");
     }
+    #[tokio::main]
+    #[allow(clippy::await_holding_lock)]
+    async fn watch_switch_p2pool_to_local_node(
+        helper: &Arc<Mutex<Helper>>,
+        state: &P2pool,
+        path_p2pool: &Path,
+        backup_hosts: Option<Vec<PoolNode>>,
+    ) {
+        // do not try to restart immediately after a first start, or else the two start will be in conflict.
+        sleep(Duration::from_secs(10)).await;
+
+        // check every seconds
+        loop {
+            let helper_lock = helper.lock().unwrap();
+            let node_process = helper_lock.node.lock().unwrap();
+            let process = helper_lock.p2pool.lock().unwrap();
+            let gui_api = helper_lock.gui_api_p2pool.lock().unwrap();
+            if gui_api.prefer_local_node
+                && state.simple
+                && !state.local_node
+                && node_process.state == ProcessState::Alive
+                && process.is_alive()
+            {
+                drop(gui_api);
+                drop(process);
+                drop(node_process);
+                drop(helper_lock);
+                Helper::restart_p2pool(helper, state, path_p2pool, backup_hosts, true);
+                break;
+            }
+            drop(gui_api);
+            drop(process);
+            drop(node_process);
+            drop(helper_lock);
+            sleep(Duration::from_secs(1)).await;
+        }
+    }
 }
 //---------------------------------------------------------------------------------------------------- [ImgP2pool]
 // A static "image" of data that P2Pool started with.
@@ -814,6 +877,7 @@ pub struct PubP2poolApi {
     // from local/p2p
     pub p2p_connected: u32,
     pub node_connected: bool,
+    pub prefer_local_node: bool,
 }
 
 impl Default for PubP2poolApi {
@@ -868,6 +932,7 @@ impl PubP2poolApi {
             p2p_connected: 0,
             synchronised: false,
             node_connected: false,
+            prefer_local_node: true,
         }
     }
 
@@ -887,6 +952,7 @@ impl PubP2poolApi {
             tick: std::mem::take(&mut gui_api.tick),
             sidechain_shares: std::mem::take(&mut gui_api.sidechain_shares),
             sidechain_ehr: std::mem::take(&mut gui_api.sidechain_ehr),
+            prefer_local_node: std::mem::take(&mut gui_api.prefer_local_node),
             ..pub_api.clone()
         };
     }
@@ -1121,10 +1187,13 @@ impl PubP2poolApi {
     }
     /// Check if all conditions are met to be alive or if something is wrong
     fn update_state(&self, process: &mut Process) {
-        if self.synchronised && self.node_connected && self.p2p_connected > 1 && self.height > 10 {
+        if process.state == ProcessState::Syncing
+            && self.synchronised
+            && self.node_connected
+            && self.p2p_connected > 1
+            && self.height > 10
+        {
             process.state = ProcessState::Alive;
-        } else {
-            process.state = ProcessState::Syncing;
         }
     }
 
@@ -1385,3 +1454,12 @@ impl PrivP2PoolP2PApi {
         }
     }
 }
+fn reset_data_p2pool(pub_api: &Arc<Mutex<PubP2poolApi>>, gui_api: &Arc<Mutex<PubP2poolApi>>) {
+    let current_pref = mem::take(&mut pub_api.lock().unwrap().prefer_local_node);
+    // even if it is a restart, we want to keep set values by the user without the need from him to click on save button.
+
+    *pub_api.lock().unwrap() = PubP2poolApi::new();
+    *gui_api.lock().unwrap() = PubP2poolApi::new();
+    // to keep the value modified by xmrig even if xvb is dead.
+    pub_api.lock().unwrap().prefer_local_node = current_pref;
+}
diff --git a/src/inits.rs b/src/inits.rs
index 497d23a..20f7fc9 100644
--- a/src/inits.rs
+++ b/src/inits.rs
@@ -208,6 +208,7 @@ pub fn init_auto(app: &mut App) {
                 &app.state.p2pool,
                 &app.state.gupax.absolute_p2pool_path,
                 backup_hosts,
+                false,
             );
         }
     } else {
diff --git a/src/utils/constants.rs b/src/utils/constants.rs
index 1787e8b..7b26b92 100644
--- a/src/utils/constants.rs
+++ b/src/utils/constants.rs
@@ -384,6 +384,8 @@ pub const P2POOL_BACKUP_HOST_SIMPLE: &str = r#"Automatically switch to the other
 Note: you must ping the remote nodes or this feature will default to only using the currently selected node."#;
 pub const P2POOL_BACKUP_HOST_ADVANCED: &str =
     "Automatically switch to the other nodes in your list if the current one is down.";
+pub const P2POOL_AUTOSWITCH_LOCAL_NODE: &str =
+    "Automatically switch to the local node when it will be ready to be used.";
 pub const P2POOL_SELECT_FASTEST: &str = "Select the fastest remote Monero node";
 pub const P2POOL_SELECT_RANDOM: &str = "Select a random remote Monero node";
 pub const P2POOL_SELECT_LAST: &str = "Select the previous remote Monero node";