"""
Module for config dialog classes used by MatSci panels
Copyright Schrodinger, LLC. All rights reserved.
"""
import sys
from schrodinger.job import queue
from schrodinger.application.desmond import fep_dialog
from schrodinger.application.desmond import gui as desmond_gui
from schrodinger.ui.qt import config_dialog
from schrodinger.ui.qt import swidgets
from schrodinger.ui.qt.appframework2 import af2
PER_STRUC_CPU_LABEL = 'processors per structure'
PER_STRUC_GPU_LABEL = 'GPUs per structure'
[docs]class PerStrucConfigDialog(desmond_gui.DesmondGuiConfigDialog):
    """
    Dialog for configuring jobs that can have CPUs/GPUs per input structure
    specified.
    """
    CPU_UNIT_LABEL = PER_STRUC_CPU_LABEL
    GPU_UNIT_LABEL = PER_STRUC_GPU_LABEL 
[docs]class PerStrucSingleGpuConfigDialog(desmond_gui.SingleGpuDesmondGuiConfigDialog
                                   ):
    """
    Class to configure jobs that can use a single GPU per input structure.
    """
    CPU_UNIT_LABEL = PER_STRUC_CPU_LABEL
    GPU_UNIT_LABEL = PER_STRUC_GPU_LABEL 
[docs]class PerStructDesmondSubhostConfigDialog(fep_dialog.FEPConfigDialog):
    """
    Subclass fep_dialog.FEPConfigDialog dialog, and customize it for panels
    that run multiple desmond subjobs.
    """
    MAX_SUBJOBS_LABEL_TEXT = "Maximum simultaneous subjobs:"
[docs]    def __init__(self,
                 *arg,
                 gpu_num=None,
                 cpu_num=None,
                 sim_jobnum=None,
                 sim_job_sb=None,
                 has_subjobs_func=None,
                 **kwargs):
        """
        See parent class for additional documentation string.
        :type gpu_num: int
        :param gpu_num: Fix the gpu processor number per subjob, if provided.
        :type cpu_num: int
        :param cpu_num: Fix the cpu processor number per subjob, if provided.
        :type sim_jobnum: int
        :param sim_jobnum: The default simultaneous subjob number.
        :type sim_job_sb: str
        :param sim_job_sb: the attribute name of the parent to define the number
            of simultaneous subjob.
        :type: callable
        :param has_subjobs_func: Function that takes no arguments and returns a
            boolean indicating whether subjobs will be run. If not supplied,
            validation will always assume subjobs are run.
        """
        self.sim_job_sb = sim_job_sb  # Should be set before super init
        super().__init__(*arg, **kwargs)
        self.has_subjobs_func = has_subjobs_func
        gpu_sb = self.num_cpus_sw.widget(fep_dialog.GPU_LAYOUT)
        cpu_sb = self.num_cpus_sw.widget(fep_dialog.CPU_LAYOUT)
        for p_num_sb, p_num_value in zip([gpu_sb, cpu_sb], [gpu_num, cpu_num]):
            if p_num_value:
                p_num_sb.setValue(p_num_value)
                p_num_sb.setEnabled(False)
        subjob_host = self.currentHost(self.subhost_menu)
        if not subjob_host:
            # Disable the subhost menu if no subhost is available
            self.subhost_menu.setEnabled(False)
        if sim_jobnum:
            # Default simultaneous job number is the number of independent runs
            self.maxjobs_ef.setText(str(sim_jobnum)) 
[docs]    def updateMaxjobsDefault(self):
        """
        Overwrite the parent class method to set the upper limit of max
        simultaneous subjobs.
        """
        host = self.currentHost()
        if host is None:
            return
        max_sim_jobs = host.processors
        if self.sim_job_sb and hasattr(self.parent, self.sim_job_sb):
            md_num = getattr(self.parent, self.sim_job_sb).value()
            max_sim_jobs = min([md_num, max_sim_jobs])
        # bolt_gpu_short prints (10000, 8) as host.queue
        # localhost could print (8, 2) as host.queue
        # processors are 10000 and 8 usually a large number compared to the total
        # gpu number on a single host.
        if self.maxjobs_ef.text() and int(
                self.maxjobs_ef.text()) > max_sim_jobs:
            self.maxjobs_ef.setText(str(max_sim_jobs))
        non_negative_int_val = swidgets.SNonNegativeIntValidator(
            top=max_sim_jobs)
        self.maxjobs_ef.setValidator(non_negative_int_val) 
[docs]    def validateSubHost(self):
        """
        Overwritten the parent method.
        """
        if self.has_subjobs_func is not None and not self.has_subjobs_func():
            # No subjobs are being run (MATSCI-9978)
            return True
        subjob_host = self.currentHost(self.subhost_menu)
        if not subjob_host:
            self.warning(
                'No GPU host available. This workflow is only supported '
                'on GPUs.')
            return False
        # Temporary workaround for PANEL-15456 as 'pdxgpu-base' fails gpu jobs
        if 'pdxgpu-base' in subjob_host.label():
            self.warning(
                f"{subjob_host.label()} doesn't allow GPU jobs. Please "
                "choose another GPU host.")
            return False
        job_host = self.currentHost(self.host_menu)
        is_local = str(job_host).startswith(queue.LOCALHOST_ENTRY_NAME)
        is_sublocal = str(subjob_host).startswith(queue.LOCALHOST_ENTRY_NAME)
        if is_local != is_sublocal:
            self.warning(
                "Within a job hierarchy, all jobs must be submitted to "
                "the same job server")
            return False
        return True 
[docs]    def validate(self):
        """
        Overwritten the parent method to allow cpu hosts for master jobs.
        """
        if not self.validateSubjobs():
            return False
        if not self.validateSubHost(
        ) and self.parent.start_mode != af2.ONLY_WRITE:
            return False
        return config_dialog.ConfigDialog.validate(self)  
[docs]class ThreadOnlyConfigDialog(af2.ConfigDialog):
    """
    Manage a config dialog that only exposes the number of threads
    and locks the number of simultaneous subjobs at one.
    """
    def _setupOpenMPWidgets(self):
        """
        See parent class for documentation.
        """
        super()._setupOpenMPWidgets()
        self.open_mp_ui.mp_cpus_rb.setVisible(False)
        self.open_mp_ui.mp_cpus_grouping.setVisible(False)
        self.open_mp_ui.mp_open_mp_rb.setChecked(True)
        self.open_mp_ui.mp_open_mp_rb.setVisible(False)
        self.open_mp_ui.mp_max_subjobs_sb.setValue(1)
        self.open_mp_ui.mp_max_subjobs_sb.setEnabled(False)
        self.updateOpenMPInfo() 
[docs]class DesmondSubhostConfigDialog(af2.ConfigDialog):
    """
    Custom job config dialog that launches using Launch API and supports
    subhost.
    """
    DRIVER_HOST, GPU_SUBHOST = 'Driver/GPU host', 'GPU subhost'
[docs]    def __init__(self, *args, multiple_gpus=False, **kwargs):
        """
        Initialize the dialog. Set host products.
        :param bool multiple_gpus: Whether to allow multiple GPUs
        """
        if multiple_gpus:
            products = {
                self.DRIVER_HOST:
                    config_dialog.GpuHostProductMode.SingleCpuMultipleGpu,
                self.GPU_SUBHOST:
                    config_dialog.GpuHostProductMode.MultipleOnlyGpu
            }
        else:
            products = {
                self.DRIVER_HOST: config_dialog.GpuHostProductMode.Single,
                self.GPU_SUBHOST: config_dialog.GpuHostProductMode.SingleOnlyGpu
            }
        kwargs['host_products'] = [self.DRIVER_HOST, self.GPU_SUBHOST]
        kwargs['gpu_host_products'] = products
        kwargs['cpus'] = multiple_gpus
        super().__init__(*args, **kwargs)
        self.multiple_gpus = multiple_gpus
        self.host_prods[self.DRIVER_HOST].host_menu.currentIndexChanged.connect(
            self._onDriverHostChanged)
        self.host_prods[self.GPU_SUBHOST].host_menu.setToolTip(
            'Only used/enabled when %s is CPU.' % self.DRIVER_HOST)
        self._onDriverHostChanged() 
    def _onDriverHostChanged(self):
        """ React to the driver host change. """
        use_subhost = self.useSubhost()
        self.host_prods[self.GPU_SUBHOST].host_menu.setEnabled(use_subhost)
        if self.host_prods[self.GPU_SUBHOST].cpus_sb:
            cpus_sb_enable = use_subhost and self.multiple_gpus
            self.host_prods[self.GPU_SUBHOST].cpus_sb.setEnabled(cpus_sb_enable)
[docs]    def useSubhost(self):
        """
        Check if subhost is needed.
        :rtype: bool
        :return: Whether subhost is needed. In this particular case, if driver
            host is GPU, subhost is not needed.
        """
        host = self.host_prods[self.DRIVER_HOST].host_menu.currentData()
        return host.hostType() != host.GPUTYPE 
[docs]    def getLicHost(self):
        """
        Get license host.
        :rtype: str or None
        :return: If driver host is GPU, all the jobs will run there, need to
            request license for it. Otherwise return None
        """
        if not self.useSubhost():
            # Subhost is not used, license is needed
            host = self.host_prods[self.DRIVER_HOST].host_menu.currentData()
            return '%s:1' % host.name 
[docs]    def getNumGPUS(self):
        """
        Get number of GPUs. Only makes sense when multiple GPUs is allowed.
        :rtype: None or int
        :return: Number of GPUs requested, None if multiple GPUs hasn't been
            requested
        """
        if not self.multiple_gpus:
            return
        host = self.GPU_SUBHOST if self.useSubhost() else self.DRIVER_HOST
        return self.host_prods[host].cpus_sb.value()